aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorMark Rustad <mark.d.rustad@intel.com>2014-07-22 02:51:08 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2014-07-25 22:58:36 -0400
commite90dd264566405e2f1bbb8595a4b5612281f6315 (patch)
tree0f9056b115f64524d689b6afe8e43728e26937b6 /drivers/net/ethernet/intel
parent9f1fb8acd30c9ace0145e66942481bdb90beca15 (diff)
ixgbe: Make return values more direct
Make return values more direct, eliminating some gotos and otherwise unneeded conditionals. This also eliminates some local variables. Also a few minor cleanups in affected code so checkpatch won't complain. Signed-off-by: Mark Rustad <mark.d.rustad@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c111
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c184
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c574
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c106
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c582
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c30
11 files changed, 742 insertions, 1010 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 681e3205038d..c5c97b483d7c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -122,7 +122,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
122{ 122{
123 struct ixgbe_mac_info *mac = &hw->mac; 123 struct ixgbe_mac_info *mac = &hw->mac;
124 struct ixgbe_phy_info *phy = &hw->phy; 124 struct ixgbe_phy_info *phy = &hw->phy;
125 s32 ret_val = 0; 125 s32 ret_val;
126 u16 list_offset, data_offset; 126 u16 list_offset, data_offset;
127 127
128 /* Identify the PHY */ 128 /* Identify the PHY */
@@ -147,28 +147,23 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
147 147
148 /* Call SFP+ identify routine to get the SFP+ module type */ 148 /* Call SFP+ identify routine to get the SFP+ module type */
149 ret_val = phy->ops.identify_sfp(hw); 149 ret_val = phy->ops.identify_sfp(hw);
150 if (ret_val != 0) 150 if (ret_val)
151 goto out; 151 return ret_val;
152 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { 152 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
153 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 153 return IXGBE_ERR_SFP_NOT_SUPPORTED;
154 goto out;
155 }
156 154
157 /* Check to see if SFP+ module is supported */ 155 /* Check to see if SFP+ module is supported */
158 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 156 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
159 &list_offset, 157 &list_offset,
160 &data_offset); 158 &data_offset);
161 if (ret_val != 0) { 159 if (ret_val)
162 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 160 return IXGBE_ERR_SFP_NOT_SUPPORTED;
163 goto out;
164 }
165 break; 161 break;
166 default: 162 default:
167 break; 163 break;
168 } 164 }
169 165
170out: 166 return 0;
171 return ret_val;
172} 167}
173 168
174/** 169/**
@@ -183,7 +178,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
183{ 178{
184 u32 regval; 179 u32 regval;
185 u32 i; 180 u32 i;
186 s32 ret_val = 0; 181 s32 ret_val;
187 182
188 ret_val = ixgbe_start_hw_generic(hw); 183 ret_val = ixgbe_start_hw_generic(hw);
189 184
@@ -203,11 +198,13 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
203 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 198 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
204 } 199 }
205 200
201 if (ret_val)
202 return ret_val;
203
206 /* set the completion timeout for interface */ 204 /* set the completion timeout for interface */
207 if (ret_val == 0) 205 ixgbe_set_pcie_completion_timeout(hw);
208 ixgbe_set_pcie_completion_timeout(hw);
209 206
210 return ret_val; 207 return 0;
211} 208}
212 209
213/** 210/**
@@ -222,7 +219,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
222 ixgbe_link_speed *speed, 219 ixgbe_link_speed *speed,
223 bool *autoneg) 220 bool *autoneg)
224{ 221{
225 s32 status = 0;
226 u32 autoc = 0; 222 u32 autoc = 0;
227 223
228 /* 224 /*
@@ -262,11 +258,10 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
262 break; 258 break;
263 259
264 default: 260 default:
265 status = IXGBE_ERR_LINK_SETUP; 261 return IXGBE_ERR_LINK_SETUP;
266 break;
267 } 262 }
268 263
269 return status; 264 return 0;
270} 265}
271 266
272/** 267/**
@@ -277,14 +272,12 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
277 **/ 272 **/
278static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) 273static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
279{ 274{
280 enum ixgbe_media_type media_type;
281
282 /* Detect if there is a copper PHY attached. */ 275 /* Detect if there is a copper PHY attached. */
283 switch (hw->phy.type) { 276 switch (hw->phy.type) {
284 case ixgbe_phy_cu_unknown: 277 case ixgbe_phy_cu_unknown:
285 case ixgbe_phy_tn: 278 case ixgbe_phy_tn:
286 media_type = ixgbe_media_type_copper; 279 return ixgbe_media_type_copper;
287 goto out; 280
288 default: 281 default:
289 break; 282 break;
290 } 283 }
@@ -294,30 +287,27 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
294 case IXGBE_DEV_ID_82598: 287 case IXGBE_DEV_ID_82598:
295 case IXGBE_DEV_ID_82598_BX: 288 case IXGBE_DEV_ID_82598_BX:
296 /* Default device ID is mezzanine card KX/KX4 */ 289 /* Default device ID is mezzanine card KX/KX4 */
297 media_type = ixgbe_media_type_backplane; 290 return ixgbe_media_type_backplane;
298 break; 291
299 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 292 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
300 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 293 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
301 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 294 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
302 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 295 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
303 case IXGBE_DEV_ID_82598EB_XF_LR: 296 case IXGBE_DEV_ID_82598EB_XF_LR:
304 case IXGBE_DEV_ID_82598EB_SFP_LOM: 297 case IXGBE_DEV_ID_82598EB_SFP_LOM:
305 media_type = ixgbe_media_type_fiber; 298 return ixgbe_media_type_fiber;
306 break; 299
307 case IXGBE_DEV_ID_82598EB_CX4: 300 case IXGBE_DEV_ID_82598EB_CX4:
308 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 301 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
309 media_type = ixgbe_media_type_cx4; 302 return ixgbe_media_type_cx4;
310 break; 303
311 case IXGBE_DEV_ID_82598AT: 304 case IXGBE_DEV_ID_82598AT:
312 case IXGBE_DEV_ID_82598AT2: 305 case IXGBE_DEV_ID_82598AT2:
313 media_type = ixgbe_media_type_copper; 306 return ixgbe_media_type_copper;
314 break; 307
315 default: 308 default:
316 media_type = ixgbe_media_type_unknown; 309 return ixgbe_media_type_unknown;
317 break;
318 } 310 }
319out:
320 return media_type;
321} 311}
322 312
323/** 313/**
@@ -328,7 +318,6 @@ out:
328 **/ 318 **/
329static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) 319static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
330{ 320{
331 s32 ret_val = 0;
332 u32 fctrl_reg; 321 u32 fctrl_reg;
333 u32 rmcs_reg; 322 u32 rmcs_reg;
334 u32 reg; 323 u32 reg;
@@ -338,10 +327,8 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
338 bool link_up; 327 bool link_up;
339 328
340 /* Validate the water mark configuration */ 329 /* Validate the water mark configuration */
341 if (!hw->fc.pause_time) { 330 if (!hw->fc.pause_time)
342 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 331 return IXGBE_ERR_INVALID_LINK_SETTINGS;
343 goto out;
344 }
345 332
346 /* Low water mark of zero causes XOFF floods */ 333 /* Low water mark of zero causes XOFF floods */
347 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 334 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -350,8 +337,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
350 if (!hw->fc.low_water[i] || 337 if (!hw->fc.low_water[i] ||
351 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 338 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
352 hw_dbg(hw, "Invalid water mark configuration\n"); 339 hw_dbg(hw, "Invalid water mark configuration\n");
353 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 340 return IXGBE_ERR_INVALID_LINK_SETTINGS;
354 goto out;
355 } 341 }
356 } 342 }
357 } 343 }
@@ -428,8 +414,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
428 break; 414 break;
429 default: 415 default:
430 hw_dbg(hw, "Flow control param set incorrectly\n"); 416 hw_dbg(hw, "Flow control param set incorrectly\n");
431 ret_val = IXGBE_ERR_CONFIG; 417 return IXGBE_ERR_CONFIG;
432 goto out;
433 } 418 }
434 419
435 /* Set 802.3x based flow control settings. */ 420 /* Set 802.3x based flow control settings. */
@@ -460,8 +445,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
460 /* Configure flow control refresh threshold value */ 445 /* Configure flow control refresh threshold value */
461 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 446 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
462 447
463out: 448 return 0;
464 return ret_val;
465} 449}
466 450
467/** 451/**
@@ -597,7 +581,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
597 } 581 }
598 582
599 if (!*link_up) 583 if (!*link_up)
600 goto out; 584 return 0;
601 } 585 }
602 586
603 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 587 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
@@ -628,7 +612,6 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
628 (ixgbe_validate_link_ready(hw) != 0)) 612 (ixgbe_validate_link_ready(hw) != 0))
629 *link_up = false; 613 *link_up = false;
630 614
631out:
632 return 0; 615 return 0;
633} 616}
634 617
@@ -645,7 +628,6 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
645 bool autoneg_wait_to_complete) 628 bool autoneg_wait_to_complete)
646{ 629{
647 bool autoneg = false; 630 bool autoneg = false;
648 s32 status = 0;
649 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 631 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
650 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 632 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
651 u32 autoc = curr_autoc; 633 u32 autoc = curr_autoc;
@@ -656,7 +638,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
656 speed &= link_capabilities; 638 speed &= link_capabilities;
657 639
658 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 640 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
659 status = IXGBE_ERR_LINK_SETUP; 641 return IXGBE_ERR_LINK_SETUP;
660 642
661 /* Set KX4/KX support according to speed requested */ 643 /* Set KX4/KX support according to speed requested */
662 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 644 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
@@ -670,17 +652,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
670 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 652 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
671 } 653 }
672 654
673 if (status == 0) { 655 /* Setup and restart the link based on the new values in
674 /* 656 * ixgbe_hw This will write the AUTOC register based on the new
675 * Setup and restart the link based on the new values in 657 * stored values
676 * ixgbe_hw This will write the AUTOC register based on the new 658 */
677 * stored values 659 return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
678 */
679 status = ixgbe_start_mac_link_82598(hw,
680 autoneg_wait_to_complete);
681 }
682
683 return status;
684} 660}
685 661
686 662
@@ -717,7 +693,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
717 **/ 693 **/
718static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) 694static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
719{ 695{
720 s32 status = 0; 696 s32 status;
721 s32 phy_status = 0; 697 s32 phy_status = 0;
722 u32 ctrl; 698 u32 ctrl;
723 u32 gheccr; 699 u32 gheccr;
@@ -727,8 +703,8 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
727 703
728 /* Call adapter stop to disable tx/rx and clear interrupts */ 704 /* Call adapter stop to disable tx/rx and clear interrupts */
729 status = hw->mac.ops.stop_adapter(hw); 705 status = hw->mac.ops.stop_adapter(hw);
730 if (status != 0) 706 if (status)
731 goto reset_hw_out; 707 return status;
732 708
733 /* 709 /*
734 * Power up the Atlas Tx lanes if they are currently powered down. 710 * Power up the Atlas Tx lanes if they are currently powered down.
@@ -770,7 +746,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
770 /* Init PHY and function pointers, perform SFP setup */ 746 /* Init PHY and function pointers, perform SFP setup */
771 phy_status = hw->phy.ops.init(hw); 747 phy_status = hw->phy.ops.init(hw);
772 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 748 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
773 goto reset_hw_out; 749 return phy_status;
774 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 750 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
775 goto mac_reset_top; 751 goto mac_reset_top;
776 752
@@ -836,7 +812,6 @@ mac_reset_top:
836 */ 812 */
837 hw->mac.ops.init_rx_addrs(hw); 813 hw->mac.ops.init_rx_addrs(hw);
838 814
839reset_hw_out:
840 if (phy_status) 815 if (phy_status)
841 status = phy_status; 816 status = phy_status;
842 817
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 6fc633a2726a..cf55a0df877b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -123,7 +123,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
123 123
124static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 124static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
125{ 125{
126 s32 ret_val = 0; 126 s32 ret_val;
127 u16 list_offset, data_offset, data_value; 127 u16 list_offset, data_offset, data_value;
128 128
129 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 129 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -133,16 +133,14 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
133 133
134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 134 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
135 &data_offset); 135 &data_offset);
136 if (ret_val != 0) 136 if (ret_val)
137 goto setup_sfp_out; 137 return ret_val;
138 138
139 /* PHY config will finish before releasing the semaphore */ 139 /* PHY config will finish before releasing the semaphore */
140 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 140 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
141 IXGBE_GSSR_MAC_CSR_SM); 141 IXGBE_GSSR_MAC_CSR_SM);
142 if (ret_val != 0) { 142 if (ret_val)
143 ret_val = IXGBE_ERR_SWFW_SYNC; 143 return IXGBE_ERR_SWFW_SYNC;
144 goto setup_sfp_out;
145 }
146 144
147 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 145 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
148 goto setup_sfp_err; 146 goto setup_sfp_err;
@@ -169,13 +167,11 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
169 167
170 if (ret_val) { 168 if (ret_val) {
171 hw_dbg(hw, " sfp module setup not complete\n"); 169 hw_dbg(hw, " sfp module setup not complete\n");
172 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 170 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
173 goto setup_sfp_out;
174 } 171 }
175 } 172 }
176 173
177setup_sfp_out: 174 return 0;
178 return ret_val;
179 175
180setup_sfp_err: 176setup_sfp_err:
181 /* Release the semaphore */ 177 /* Release the semaphore */
@@ -294,7 +290,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
294{ 290{
295 struct ixgbe_mac_info *mac = &hw->mac; 291 struct ixgbe_mac_info *mac = &hw->mac;
296 struct ixgbe_phy_info *phy = &hw->phy; 292 struct ixgbe_phy_info *phy = &hw->phy;
297 s32 ret_val = 0; 293 s32 ret_val;
298 u32 esdp; 294 u32 esdp;
299 295
300 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { 296 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
@@ -355,7 +351,6 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
355 ixgbe_link_speed *speed, 351 ixgbe_link_speed *speed,
356 bool *autoneg) 352 bool *autoneg)
357{ 353{
358 s32 status = 0;
359 u32 autoc = 0; 354 u32 autoc = 0;
360 355
361 /* Determine 1G link capabilities off of SFP+ type */ 356 /* Determine 1G link capabilities off of SFP+ type */
@@ -367,7 +362,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
367 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 362 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
368 *speed = IXGBE_LINK_SPEED_1GB_FULL; 363 *speed = IXGBE_LINK_SPEED_1GB_FULL;
369 *autoneg = true; 364 *autoneg = true;
370 goto out; 365 return 0;
371 } 366 }
372 367
373 /* 368 /*
@@ -430,8 +425,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
430 break; 425 break;
431 426
432 default: 427 default:
433 status = IXGBE_ERR_LINK_SETUP; 428 return IXGBE_ERR_LINK_SETUP;
434 goto out;
435 } 429 }
436 430
437 if (hw->phy.multispeed_fiber) { 431 if (hw->phy.multispeed_fiber) {
@@ -445,8 +439,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
445 *autoneg = true; 439 *autoneg = true;
446 } 440 }
447 441
448out: 442 return 0;
449 return status;
450} 443}
451 444
452/** 445/**
@@ -457,14 +450,12 @@ out:
457 **/ 450 **/
458static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 451static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
459{ 452{
460 enum ixgbe_media_type media_type;
461
462 /* Detect if there is a copper PHY attached. */ 453 /* Detect if there is a copper PHY attached. */
463 switch (hw->phy.type) { 454 switch (hw->phy.type) {
464 case ixgbe_phy_cu_unknown: 455 case ixgbe_phy_cu_unknown:
465 case ixgbe_phy_tn: 456 case ixgbe_phy_tn:
466 media_type = ixgbe_media_type_copper; 457 return ixgbe_media_type_copper;
467 goto out; 458
468 default: 459 default:
469 break; 460 break;
470 } 461 }
@@ -477,34 +468,31 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
477 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 468 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
478 case IXGBE_DEV_ID_82599_XAUI_LOM: 469 case IXGBE_DEV_ID_82599_XAUI_LOM:
479 /* Default device ID is mezzanine card KX/KX4 */ 470 /* Default device ID is mezzanine card KX/KX4 */
480 media_type = ixgbe_media_type_backplane; 471 return ixgbe_media_type_backplane;
481 break; 472
482 case IXGBE_DEV_ID_82599_SFP: 473 case IXGBE_DEV_ID_82599_SFP:
483 case IXGBE_DEV_ID_82599_SFP_FCOE: 474 case IXGBE_DEV_ID_82599_SFP_FCOE:
484 case IXGBE_DEV_ID_82599_SFP_EM: 475 case IXGBE_DEV_ID_82599_SFP_EM:
485 case IXGBE_DEV_ID_82599_SFP_SF2: 476 case IXGBE_DEV_ID_82599_SFP_SF2:
486 case IXGBE_DEV_ID_82599_SFP_SF_QP: 477 case IXGBE_DEV_ID_82599_SFP_SF_QP:
487 case IXGBE_DEV_ID_82599EN_SFP: 478 case IXGBE_DEV_ID_82599EN_SFP:
488 media_type = ixgbe_media_type_fiber; 479 return ixgbe_media_type_fiber;
489 break; 480
490 case IXGBE_DEV_ID_82599_CX4: 481 case IXGBE_DEV_ID_82599_CX4:
491 media_type = ixgbe_media_type_cx4; 482 return ixgbe_media_type_cx4;
492 break; 483
493 case IXGBE_DEV_ID_82599_T3_LOM: 484 case IXGBE_DEV_ID_82599_T3_LOM:
494 media_type = ixgbe_media_type_copper; 485 return ixgbe_media_type_copper;
495 break; 486
496 case IXGBE_DEV_ID_82599_LS: 487 case IXGBE_DEV_ID_82599_LS:
497 media_type = ixgbe_media_type_fiber_lco; 488 return ixgbe_media_type_fiber_lco;
498 break; 489
499 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 490 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
500 media_type = ixgbe_media_type_fiber_qsfp; 491 return ixgbe_media_type_fiber_qsfp;
501 break; 492
502 default: 493 default:
503 media_type = ixgbe_media_type_unknown; 494 return ixgbe_media_type_unknown;
504 break;
505 } 495 }
506out:
507 return media_type;
508} 496}
509 497
510/** 498/**
@@ -554,7 +542,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
554 status = hw->mac.ops.acquire_swfw_sync(hw, 542 status = hw->mac.ops.acquire_swfw_sync(hw,
555 IXGBE_GSSR_MAC_CSR_SM); 543 IXGBE_GSSR_MAC_CSR_SM);
556 if (status) 544 if (status)
557 goto out; 545 return status;
558 546
559 got_lock = true; 547 got_lock = true;
560 } 548 }
@@ -591,7 +579,6 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
591 /* Add delay to filter out noises during initial link setup */ 579 /* Add delay to filter out noises during initial link setup */
592 msleep(50); 580 msleep(50);
593 581
594out:
595 return status; 582 return status;
596} 583}
597 584
@@ -958,7 +945,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
958 bool autoneg_wait_to_complete) 945 bool autoneg_wait_to_complete)
959{ 946{
960 bool autoneg = false; 947 bool autoneg = false;
961 s32 status = 0; 948 s32 status;
962 u32 pma_pmd_1g, link_mode, links_reg, i; 949 u32 pma_pmd_1g, link_mode, links_reg, i;
963 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 950 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
964 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 951 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
@@ -974,15 +961,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
974 /* Check to see if speed passed in is supported. */ 961 /* Check to see if speed passed in is supported. */
975 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, 962 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
976 &autoneg); 963 &autoneg);
977 if (status != 0) 964 if (status)
978 goto out; 965 return status;
979 966
980 speed &= link_capabilities; 967 speed &= link_capabilities;
981 968
982 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 969 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
983 status = IXGBE_ERR_LINK_SETUP; 970 return IXGBE_ERR_LINK_SETUP;
984 goto out;
985 }
986 971
987 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 972 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
988 if (hw->mac.orig_link_settings_stored) 973 if (hw->mac.orig_link_settings_stored)
@@ -1033,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1033 /* Restart link */ 1018 /* Restart link */
1034 status = hw->mac.ops.prot_autoc_write(hw, autoc, false); 1019 status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
1035 if (status) 1020 if (status)
1036 goto out; 1021 return status;
1037 1022
1038 /* Only poll for autoneg to complete if specified to do so */ 1023 /* Only poll for autoneg to complete if specified to do so */
1039 if (autoneg_wait_to_complete) { 1024 if (autoneg_wait_to_complete) {
@@ -1060,7 +1045,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1060 msleep(50); 1045 msleep(50);
1061 } 1046 }
1062 1047
1063out:
1064 return status; 1048 return status;
1065} 1049}
1066 1050
@@ -1105,8 +1089,8 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1105 1089
1106 /* Call adapter stop to disable tx/rx and clear interrupts */ 1090 /* Call adapter stop to disable tx/rx and clear interrupts */
1107 status = hw->mac.ops.stop_adapter(hw); 1091 status = hw->mac.ops.stop_adapter(hw);
1108 if (status != 0) 1092 if (status)
1109 goto reset_hw_out; 1093 return status;
1110 1094
1111 /* flush pending Tx transactions */ 1095 /* flush pending Tx transactions */
1112 ixgbe_clear_tx_pending(hw); 1096 ixgbe_clear_tx_pending(hw);
@@ -1117,7 +1101,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1117 status = hw->phy.ops.init(hw); 1101 status = hw->phy.ops.init(hw);
1118 1102
1119 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1103 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1120 goto reset_hw_out; 1104 return status;
1121 1105
1122 /* Setup SFP module if there is one present. */ 1106 /* Setup SFP module if there is one present. */
1123 if (hw->phy.sfp_setup_needed) { 1107 if (hw->phy.sfp_setup_needed) {
@@ -1126,7 +1110,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1126 } 1110 }
1127 1111
1128 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1112 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1129 goto reset_hw_out; 1113 return status;
1130 1114
1131 /* Reset PHY */ 1115 /* Reset PHY */
1132 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 1116 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
@@ -1216,7 +1200,7 @@ mac_reset_top:
1216 hw->mac.orig_autoc, 1200 hw->mac.orig_autoc,
1217 false); 1201 false);
1218 if (status) 1202 if (status)
1219 goto reset_hw_out; 1203 return status;
1220 } 1204 }
1221 1205
1222 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1206 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1258,7 +1242,6 @@ mac_reset_top:
1258 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1242 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1259 &hw->mac.wwpn_prefix); 1243 &hw->mac.wwpn_prefix);
1260 1244
1261reset_hw_out:
1262 return status; 1245 return status;
1263} 1246}
1264 1247
@@ -1927,20 +1910,20 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1927 s32 ret_val = 0; 1910 s32 ret_val = 0;
1928 1911
1929 ret_val = ixgbe_start_hw_generic(hw); 1912 ret_val = ixgbe_start_hw_generic(hw);
1930 if (ret_val != 0) 1913 if (ret_val)
1931 goto out; 1914 return ret_val;
1932 1915
1933 ret_val = ixgbe_start_hw_gen2(hw); 1916 ret_val = ixgbe_start_hw_gen2(hw);
1934 if (ret_val != 0) 1917 if (ret_val)
1935 goto out; 1918 return ret_val;
1936 1919
1937 /* We need to run link autotry after the driver loads */ 1920 /* We need to run link autotry after the driver loads */
1938 hw->mac.autotry_restart = true; 1921 hw->mac.autotry_restart = true;
1939 1922
1940 if (ret_val == 0) 1923 if (ret_val)
1941 ret_val = ixgbe_verify_fw_version_82599(hw); 1924 return ret_val;
1942out: 1925
1943 return ret_val; 1926 return ixgbe_verify_fw_version_82599(hw);
1944} 1927}
1945 1928
1946/** 1929/**
@@ -1953,16 +1936,15 @@ out:
1953 **/ 1936 **/
1954static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1937static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1955{ 1938{
1956 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1939 s32 status;
1957 1940
1958 /* Detect PHY if not unknown - returns success if already detected. */ 1941 /* Detect PHY if not unknown - returns success if already detected. */
1959 status = ixgbe_identify_phy_generic(hw); 1942 status = ixgbe_identify_phy_generic(hw);
1960 if (status != 0) { 1943 if (status) {
1961 /* 82599 10GBASE-T requires an external PHY */ 1944 /* 82599 10GBASE-T requires an external PHY */
1962 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 1945 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1963 goto out; 1946 return status;
1964 else 1947 status = ixgbe_identify_module_generic(hw);
1965 status = ixgbe_identify_module_generic(hw);
1966 } 1948 }
1967 1949
1968 /* Set PHY type none if no PHY detected */ 1950 /* Set PHY type none if no PHY detected */
@@ -1973,9 +1955,8 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1973 1955
1974 /* Return error if SFP module has been detected but is not supported */ 1956 /* Return error if SFP module has been detected but is not supported */
1975 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 1957 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1976 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1958 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1977 1959
1978out:
1979 return status; 1960 return status;
1980} 1961}
1981 1962
@@ -2021,26 +2002,24 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2021 u16 fw_version = 0; 2002 u16 fw_version = 0;
2022 2003
2023 /* firmware check is only necessary for SFI devices */ 2004 /* firmware check is only necessary for SFI devices */
2024 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2005 if (hw->phy.media_type != ixgbe_media_type_fiber)
2025 status = 0; 2006 return 0;
2026 goto fw_version_out;
2027 }
2028 2007
2029 /* get the offset to the Firmware Module block */ 2008 /* get the offset to the Firmware Module block */
2030 offset = IXGBE_FW_PTR; 2009 offset = IXGBE_FW_PTR;
2031 if (hw->eeprom.ops.read(hw, offset, &fw_offset)) 2010 if (hw->eeprom.ops.read(hw, offset, &fw_offset))
2032 goto fw_version_err; 2011 goto fw_version_err;
2033 2012
2034 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2013 if (fw_offset == 0 || fw_offset == 0xFFFF)
2035 goto fw_version_out; 2014 return IXGBE_ERR_EEPROM_VERSION;
2036 2015
2037 /* get the offset to the Pass Through Patch Configuration block */ 2016 /* get the offset to the Pass Through Patch Configuration block */
2038 offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; 2017 offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
2039 if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) 2018 if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
2040 goto fw_version_err; 2019 goto fw_version_err;
2041 2020
2042 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2021 if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
2043 goto fw_version_out; 2022 return IXGBE_ERR_EEPROM_VERSION;
2044 2023
2045 /* get the firmware version */ 2024 /* get the firmware version */
2046 offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; 2025 offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
@@ -2050,7 +2029,6 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2050 if (fw_version > 0x5) 2029 if (fw_version > 0x5)
2051 status = 0; 2030 status = 0;
2052 2031
2053fw_version_out:
2054 return status; 2032 return status;
2055 2033
2056fw_version_err: 2034fw_version_err:
@@ -2067,37 +2045,33 @@ fw_version_err:
2067 **/ 2045 **/
2068static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2046static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2069{ 2047{
2070 bool lesm_enabled = false;
2071 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2048 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2072 s32 status; 2049 s32 status;
2073 2050
2074 /* get the offset to the Firmware Module block */ 2051 /* get the offset to the Firmware Module block */
2075 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2052 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2076 2053
2077 if ((status != 0) || 2054 if (status || fw_offset == 0 || fw_offset == 0xFFFF)
2078 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2055 return false;
2079 goto out;
2080 2056
2081 /* get the offset to the LESM Parameters block */ 2057 /* get the offset to the LESM Parameters block */
2082 status = hw->eeprom.ops.read(hw, (fw_offset + 2058 status = hw->eeprom.ops.read(hw, (fw_offset +
2083 IXGBE_FW_LESM_PARAMETERS_PTR), 2059 IXGBE_FW_LESM_PARAMETERS_PTR),
2084 &fw_lesm_param_offset); 2060 &fw_lesm_param_offset);
2085 2061
2086 if ((status != 0) || 2062 if (status ||
2087 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2063 fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF)
2088 goto out; 2064 return false;
2089 2065
2090 /* get the lesm state word */ 2066 /* get the lesm state word */
2091 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2067 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2092 IXGBE_FW_LESM_STATE_1), 2068 IXGBE_FW_LESM_STATE_1),
2093 &fw_lesm_state); 2069 &fw_lesm_state);
2094 2070
2095 if ((status == 0) && 2071 if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2096 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2072 return true;
2097 lesm_enabled = true;
2098 2073
2099out: 2074 return false;
2100 return lesm_enabled;
2101} 2075}
2102 2076
2103/** 2077/**
@@ -2115,22 +2089,16 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2115 u16 words, u16 *data) 2089 u16 words, u16 *data)
2116{ 2090{
2117 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2091 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2118 s32 ret_val = IXGBE_ERR_CONFIG;
2119 2092
2120 /* 2093 /* If EEPROM is detected and can be addressed using 14 bits,
2121 * If EEPROM is detected and can be addressed using 14 bits,
2122 * use EERD otherwise use bit bang 2094 * use EERD otherwise use bit bang
2123 */ 2095 */
2124 if ((eeprom->type == ixgbe_eeprom_spi) && 2096 if (eeprom->type == ixgbe_eeprom_spi &&
2125 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) 2097 offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)
2126 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, 2098 return ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
2127 data);
2128 else
2129 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2130 words,
2131 data);
2132 2099
2133 return ret_val; 2100 return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words,
2101 data);
2134} 2102}
2135 2103
2136/** 2104/**
@@ -2147,19 +2115,15 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2147 u16 offset, u16 *data) 2115 u16 offset, u16 *data)
2148{ 2116{
2149 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2117 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2150 s32 ret_val = IXGBE_ERR_CONFIG;
2151 2118
2152 /* 2119 /*
2153 * If EEPROM is detected and can be addressed using 14 bits, 2120 * If EEPROM is detected and can be addressed using 14 bits,
2154 * use EERD otherwise use bit bang 2121 * use EERD otherwise use bit bang
2155 */ 2122 */
2156 if ((eeprom->type == ixgbe_eeprom_spi) && 2123 if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR)
2157 (offset <= IXGBE_EERD_MAX_ADDR)) 2124 return ixgbe_read_eerd_generic(hw, offset, data);
2158 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2159 else
2160 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2161 2125
2162 return ret_val; 2126 return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2163} 2127}
2164 2128
2165/** 2129/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index db759f98f9f0..b5f484bf3fda 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -122,8 +122,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
122 */ 122 */
123 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 123 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
124 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 124 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
125 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 125 return IXGBE_ERR_INVALID_LINK_SETTINGS;
126 goto out;
127 } 126 }
128 127
129 /* 128 /*
@@ -143,7 +142,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
143 /* some MAC's need RMW protection on AUTOC */ 142 /* some MAC's need RMW protection on AUTOC */
144 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp); 143 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
145 if (ret_val) 144 if (ret_val)
146 goto out; 145 return ret_val;
147 146
148 /* only backplane uses autoc so fall though */ 147 /* only backplane uses autoc so fall though */
149 case ixgbe_media_type_fiber: 148 case ixgbe_media_type_fiber:
@@ -214,8 +213,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
214 break; 213 break;
215 default: 214 default:
216 hw_dbg(hw, "Flow control param set incorrectly\n"); 215 hw_dbg(hw, "Flow control param set incorrectly\n");
217 ret_val = IXGBE_ERR_CONFIG; 216 return IXGBE_ERR_CONFIG;
218 goto out;
219 } 217 }
220 218
221 if (hw->mac.type != ixgbe_mac_X540) { 219 if (hw->mac.type != ixgbe_mac_X540) {
@@ -246,7 +244,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
246 */ 244 */
247 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 245 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
248 if (ret_val) 246 if (ret_val)
249 goto out; 247 return ret_val;
250 248
251 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 249 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
252 ixgbe_device_supports_autoneg_fc(hw)) { 250 ixgbe_device_supports_autoneg_fc(hw)) {
@@ -255,7 +253,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
255 } 253 }
256 254
257 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 255 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
258out:
259 return ret_val; 256 return ret_val;
260} 257}
261 258
@@ -294,12 +291,11 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
294 /* Setup flow control */ 291 /* Setup flow control */
295 ret_val = ixgbe_setup_fc(hw); 292 ret_val = ixgbe_setup_fc(hw);
296 if (!ret_val) 293 if (!ret_val)
297 goto out; 294 return 0;
298 295
299 /* Clear adapter stopped flag */ 296 /* Clear adapter stopped flag */
300 hw->adapter_stopped = false; 297 hw->adapter_stopped = false;
301 298
302out:
303 return ret_val; 299 return ret_val;
304} 300}
305 301
@@ -836,20 +832,16 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
836s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 832s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
837 u16 words, u16 *data) 833 u16 words, u16 *data)
838{ 834{
839 s32 status = 0; 835 s32 status;
840 u16 i, count; 836 u16 i, count;
841 837
842 hw->eeprom.ops.init_params(hw); 838 hw->eeprom.ops.init_params(hw);
843 839
844 if (words == 0) { 840 if (words == 0)
845 status = IXGBE_ERR_INVALID_ARGUMENT; 841 return IXGBE_ERR_INVALID_ARGUMENT;
846 goto out;
847 }
848 842
849 if (offset + words > hw->eeprom.word_size) { 843 if (offset + words > hw->eeprom.word_size)
850 status = IXGBE_ERR_EEPROM; 844 return IXGBE_ERR_EEPROM;
851 goto out;
852 }
853 845
854 /* 846 /*
855 * The EEPROM page size cannot be queried from the chip. We do lazy 847 * The EEPROM page size cannot be queried from the chip. We do lazy
@@ -874,7 +866,6 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
874 break; 866 break;
875 } 867 }
876 868
877out:
878 return status; 869 return status;
879} 870}
880 871
@@ -899,64 +890,61 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
899 890
900 /* Prepare the EEPROM for writing */ 891 /* Prepare the EEPROM for writing */
901 status = ixgbe_acquire_eeprom(hw); 892 status = ixgbe_acquire_eeprom(hw);
893 if (status)
894 return status;
902 895
903 if (status == 0) { 896 if (ixgbe_ready_eeprom(hw) != 0) {
904 if (ixgbe_ready_eeprom(hw) != 0) { 897 ixgbe_release_eeprom(hw);
905 ixgbe_release_eeprom(hw); 898 return IXGBE_ERR_EEPROM;
906 status = IXGBE_ERR_EEPROM;
907 }
908 } 899 }
909 900
910 if (status == 0) { 901 for (i = 0; i < words; i++) {
911 for (i = 0; i < words; i++) { 902 ixgbe_standby_eeprom(hw);
912 ixgbe_standby_eeprom(hw); 903
904 /* Send the WRITE ENABLE command (8 bit opcode) */
905 ixgbe_shift_out_eeprom_bits(hw,
906 IXGBE_EEPROM_WREN_OPCODE_SPI,
907 IXGBE_EEPROM_OPCODE_BITS);
913 908
914 /* Send the WRITE ENABLE command (8 bit opcode ) */ 909 ixgbe_standby_eeprom(hw);
915 ixgbe_shift_out_eeprom_bits(hw,
916 IXGBE_EEPROM_WREN_OPCODE_SPI,
917 IXGBE_EEPROM_OPCODE_BITS);
918 910
919 ixgbe_standby_eeprom(hw); 911 /* Some SPI eeproms use the 8th address bit embedded
912 * in the opcode
913 */
914 if ((hw->eeprom.address_bits == 8) &&
915 ((offset + i) >= 128))
916 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
920 917
921 /* 918 /* Send the Write command (8-bit opcode + addr) */
922 * Some SPI eeproms use the 8th address bit embedded 919 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
923 * in the opcode 920 IXGBE_EEPROM_OPCODE_BITS);
924 */ 921 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
925 if ((hw->eeprom.address_bits == 8) && 922 hw->eeprom.address_bits);
926 ((offset + i) >= 128)) 923
927 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 924 page_size = hw->eeprom.word_page_size;
928 925
929 /* Send the Write command (8-bit opcode + addr) */ 926 /* Send the data in burst via SPI */
930 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 927 do {
931 IXGBE_EEPROM_OPCODE_BITS); 928 word = data[i];
932 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 929 word = (word >> 8) | (word << 8);
933 hw->eeprom.address_bits); 930 ixgbe_shift_out_eeprom_bits(hw, word, 16);
934 931
935 page_size = hw->eeprom.word_page_size; 932 if (page_size == 0)
936 933 break;
937 /* Send the data in burst via SPI*/ 934
938 do { 935 /* do not wrap around page */
939 word = data[i]; 936 if (((offset + i) & (page_size - 1)) ==
940 word = (word >> 8) | (word << 8); 937 (page_size - 1))
941 ixgbe_shift_out_eeprom_bits(hw, word, 16); 938 break;
942 939 } while (++i < words);
943 if (page_size == 0) 940
944 break; 941 ixgbe_standby_eeprom(hw);
945 942 usleep_range(10000, 20000);
946 /* do not wrap around page */
947 if (((offset + i) & (page_size - 1)) ==
948 (page_size - 1))
949 break;
950 } while (++i < words);
951
952 ixgbe_standby_eeprom(hw);
953 usleep_range(10000, 20000);
954 }
955 /* Done with writing - release the EEPROM */
956 ixgbe_release_eeprom(hw);
957 } 943 }
944 /* Done with writing - release the EEPROM */
945 ixgbe_release_eeprom(hw);
958 946
959 return status; 947 return 0;
960} 948}
961 949
962/** 950/**
@@ -970,19 +958,12 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
970 **/ 958 **/
971s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 959s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
972{ 960{
973 s32 status;
974
975 hw->eeprom.ops.init_params(hw); 961 hw->eeprom.ops.init_params(hw);
976 962
977 if (offset >= hw->eeprom.word_size) { 963 if (offset >= hw->eeprom.word_size)
978 status = IXGBE_ERR_EEPROM; 964 return IXGBE_ERR_EEPROM;
979 goto out;
980 }
981 965
982 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 966 return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
983
984out:
985 return status;
986} 967}
987 968
988/** 969/**
@@ -997,20 +978,16 @@ out:
997s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 978s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
998 u16 words, u16 *data) 979 u16 words, u16 *data)
999{ 980{
1000 s32 status = 0; 981 s32 status;
1001 u16 i, count; 982 u16 i, count;
1002 983
1003 hw->eeprom.ops.init_params(hw); 984 hw->eeprom.ops.init_params(hw);
1004 985
1005 if (words == 0) { 986 if (words == 0)
1006 status = IXGBE_ERR_INVALID_ARGUMENT; 987 return IXGBE_ERR_INVALID_ARGUMENT;
1007 goto out;
1008 }
1009 988
1010 if (offset + words > hw->eeprom.word_size) { 989 if (offset + words > hw->eeprom.word_size)
1011 status = IXGBE_ERR_EEPROM; 990 return IXGBE_ERR_EEPROM;
1012 goto out;
1013 }
1014 991
1015 /* 992 /*
1016 * We cannot hold synchronization semaphores for too long 993 * We cannot hold synchronization semaphores for too long
@@ -1024,12 +1001,11 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1024 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1001 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1025 count, &data[i]); 1002 count, &data[i]);
1026 1003
1027 if (status != 0) 1004 if (status)
1028 break; 1005 return status;
1029 } 1006 }
1030 1007
1031out: 1008 return 0;
1032 return status;
1033} 1009}
1034 1010
1035/** 1011/**
@@ -1051,41 +1027,38 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1051 1027
1052 /* Prepare the EEPROM for reading */ 1028 /* Prepare the EEPROM for reading */
1053 status = ixgbe_acquire_eeprom(hw); 1029 status = ixgbe_acquire_eeprom(hw);
1030 if (status)
1031 return status;
1054 1032
1055 if (status == 0) { 1033 if (ixgbe_ready_eeprom(hw) != 0) {
1056 if (ixgbe_ready_eeprom(hw) != 0) { 1034 ixgbe_release_eeprom(hw);
1057 ixgbe_release_eeprom(hw); 1035 return IXGBE_ERR_EEPROM;
1058 status = IXGBE_ERR_EEPROM;
1059 }
1060 } 1036 }
1061 1037
1062 if (status == 0) { 1038 for (i = 0; i < words; i++) {
1063 for (i = 0; i < words; i++) { 1039 ixgbe_standby_eeprom(hw);
1064 ixgbe_standby_eeprom(hw); 1040 /* Some SPI eeproms use the 8th address bit embedded
1065 /* 1041 * in the opcode
1066 * Some SPI eeproms use the 8th address bit embedded 1042 */
1067 * in the opcode 1043 if ((hw->eeprom.address_bits == 8) &&
1068 */ 1044 ((offset + i) >= 128))
1069 if ((hw->eeprom.address_bits == 8) && 1045 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1070 ((offset + i) >= 128))
1071 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1072
1073 /* Send the READ command (opcode + addr) */
1074 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1075 IXGBE_EEPROM_OPCODE_BITS);
1076 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1077 hw->eeprom.address_bits);
1078
1079 /* Read the data. */
1080 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1081 data[i] = (word_in >> 8) | (word_in << 8);
1082 }
1083 1046
1084 /* End this read operation */ 1047 /* Send the READ command (opcode + addr) */
1085 ixgbe_release_eeprom(hw); 1048 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1049 IXGBE_EEPROM_OPCODE_BITS);
1050 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1051 hw->eeprom.address_bits);
1052
1053 /* Read the data. */
1054 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1055 data[i] = (word_in >> 8) | (word_in << 8);
1086 } 1056 }
1087 1057
1088 return status; 1058 /* End this read operation */
1059 ixgbe_release_eeprom(hw);
1060
1061 return 0;
1089} 1062}
1090 1063
1091/** 1064/**
@@ -1099,19 +1072,12 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1099s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1072s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1100 u16 *data) 1073 u16 *data)
1101{ 1074{
1102 s32 status;
1103
1104 hw->eeprom.ops.init_params(hw); 1075 hw->eeprom.ops.init_params(hw);
1105 1076
1106 if (offset >= hw->eeprom.word_size) { 1077 if (offset >= hw->eeprom.word_size)
1107 status = IXGBE_ERR_EEPROM; 1078 return IXGBE_ERR_EEPROM;
1108 goto out;
1109 }
1110
1111 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1112 1079
1113out: 1080 return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1114 return status;
1115} 1081}
1116 1082
1117/** 1083/**
@@ -1127,20 +1093,16 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1127 u16 words, u16 *data) 1093 u16 words, u16 *data)
1128{ 1094{
1129 u32 eerd; 1095 u32 eerd;
1130 s32 status = 0; 1096 s32 status;
1131 u32 i; 1097 u32 i;
1132 1098
1133 hw->eeprom.ops.init_params(hw); 1099 hw->eeprom.ops.init_params(hw);
1134 1100
1135 if (words == 0) { 1101 if (words == 0)
1136 status = IXGBE_ERR_INVALID_ARGUMENT; 1102 return IXGBE_ERR_INVALID_ARGUMENT;
1137 goto out;
1138 }
1139 1103
1140 if (offset >= hw->eeprom.word_size) { 1104 if (offset >= hw->eeprom.word_size)
1141 status = IXGBE_ERR_EEPROM; 1105 return IXGBE_ERR_EEPROM;
1142 goto out;
1143 }
1144 1106
1145 for (i = 0; i < words; i++) { 1107 for (i = 0; i < words; i++) {
1146 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1108 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1154,11 +1116,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1154 IXGBE_EEPROM_RW_REG_DATA); 1116 IXGBE_EEPROM_RW_REG_DATA);
1155 } else { 1117 } else {
1156 hw_dbg(hw, "Eeprom read timed out\n"); 1118 hw_dbg(hw, "Eeprom read timed out\n");
1157 goto out; 1119 return status;
1158 } 1120 }
1159 } 1121 }
1160out: 1122
1161 return status; 1123 return 0;
1162} 1124}
1163 1125
1164/** 1126/**
@@ -1174,7 +1136,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1174 u16 offset) 1136 u16 offset)
1175{ 1137{
1176 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1138 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1177 s32 status = 0; 1139 s32 status;
1178 u16 i; 1140 u16 i;
1179 1141
1180 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1142 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
@@ -1184,12 +1146,12 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1184 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1146 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1185 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1147 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1186 hw->eeprom.word_page_size = 0; 1148 hw->eeprom.word_page_size = 0;
1187 if (status != 0) 1149 if (status)
1188 goto out; 1150 return status;
1189 1151
1190 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1152 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1191 if (status != 0) 1153 if (status)
1192 goto out; 1154 return status;
1193 1155
1194 /* 1156 /*
1195 * When writing in burst more than the actual page size 1157 * When writing in burst more than the actual page size
@@ -1199,8 +1161,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1199 1161
1200 hw_dbg(hw, "Detected EEPROM page size = %d words.\n", 1162 hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1201 hw->eeprom.word_page_size); 1163 hw->eeprom.word_page_size);
1202out: 1164 return 0;
1203 return status;
1204} 1165}
1205 1166
1206/** 1167/**
@@ -1229,20 +1190,16 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1229 u16 words, u16 *data) 1190 u16 words, u16 *data)
1230{ 1191{
1231 u32 eewr; 1192 u32 eewr;
1232 s32 status = 0; 1193 s32 status;
1233 u16 i; 1194 u16 i;
1234 1195
1235 hw->eeprom.ops.init_params(hw); 1196 hw->eeprom.ops.init_params(hw);
1236 1197
1237 if (words == 0) { 1198 if (words == 0)
1238 status = IXGBE_ERR_INVALID_ARGUMENT; 1199 return IXGBE_ERR_INVALID_ARGUMENT;
1239 goto out;
1240 }
1241 1200
1242 if (offset >= hw->eeprom.word_size) { 1201 if (offset >= hw->eeprom.word_size)
1243 status = IXGBE_ERR_EEPROM; 1202 return IXGBE_ERR_EEPROM;
1244 goto out;
1245 }
1246 1203
1247 for (i = 0; i < words; i++) { 1204 for (i = 0; i < words; i++) {
1248 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1205 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1250,22 +1207,21 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1250 IXGBE_EEPROM_RW_REG_START; 1207 IXGBE_EEPROM_RW_REG_START;
1251 1208
1252 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1209 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1253 if (status != 0) { 1210 if (status) {
1254 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1211 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1255 goto out; 1212 return status;
1256 } 1213 }
1257 1214
1258 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1215 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1259 1216
1260 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1217 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1261 if (status != 0) { 1218 if (status) {
1262 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1219 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1263 goto out; 1220 return status;
1264 } 1221 }
1265 } 1222 }
1266 1223
1267out: 1224 return 0;
1268 return status;
1269} 1225}
1270 1226
1271/** 1227/**
@@ -1293,7 +1249,6 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1293{ 1249{
1294 u32 i; 1250 u32 i;
1295 u32 reg; 1251 u32 reg;
1296 s32 status = IXGBE_ERR_EEPROM;
1297 1252
1298 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1253 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1299 if (ee_reg == IXGBE_NVM_POLL_READ) 1254 if (ee_reg == IXGBE_NVM_POLL_READ)
@@ -1302,12 +1257,11 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1302 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1257 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1303 1258
1304 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1259 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1305 status = 0; 1260 return 0;
1306 break;
1307 } 1261 }
1308 udelay(5); 1262 udelay(5);
1309 } 1263 }
1310 return status; 1264 return IXGBE_ERR_EEPROM;
1311} 1265}
1312 1266
1313/** 1267/**
@@ -1319,47 +1273,42 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1319 **/ 1273 **/
1320static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1274static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1321{ 1275{
1322 s32 status = 0;
1323 u32 eec; 1276 u32 eec;
1324 u32 i; 1277 u32 i;
1325 1278
1326 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 1279 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
1327 status = IXGBE_ERR_SWFW_SYNC; 1280 return IXGBE_ERR_SWFW_SYNC;
1328 1281
1329 if (status == 0) { 1282 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1330 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1331
1332 /* Request EEPROM Access */
1333 eec |= IXGBE_EEC_REQ;
1334 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1335 1283
1336 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1284 /* Request EEPROM Access */
1337 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1285 eec |= IXGBE_EEC_REQ;
1338 if (eec & IXGBE_EEC_GNT) 1286 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1339 break;
1340 udelay(5);
1341 }
1342 1287
1343 /* Release if grant not acquired */ 1288 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1344 if (!(eec & IXGBE_EEC_GNT)) { 1289 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1345 eec &= ~IXGBE_EEC_REQ; 1290 if (eec & IXGBE_EEC_GNT)
1346 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1291 break;
1347 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 1292 udelay(5);
1293 }
1348 1294
1349 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1295 /* Release if grant not acquired */
1350 status = IXGBE_ERR_EEPROM; 1296 if (!(eec & IXGBE_EEC_GNT)) {
1351 } 1297 eec &= ~IXGBE_EEC_REQ;
1298 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1299 hw_dbg(hw, "Could not acquire EEPROM grant\n");
1352 1300
1353 /* Setup EEPROM for Read/Write */ 1301 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1354 if (status == 0) { 1302 return IXGBE_ERR_EEPROM;
1355 /* Clear CS and SK */
1356 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1357 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1358 IXGBE_WRITE_FLUSH(hw);
1359 udelay(1);
1360 }
1361 } 1303 }
1362 return status; 1304
1305 /* Setup EEPROM for Read/Write */
1306 /* Clear CS and SK */
1307 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1308 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1309 IXGBE_WRITE_FLUSH(hw);
1310 udelay(1);
1311 return 0;
1363} 1312}
1364 1313
1365/** 1314/**
@@ -1370,7 +1319,6 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1370 **/ 1319 **/
1371static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1320static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1372{ 1321{
1373 s32 status = IXGBE_ERR_EEPROM;
1374 u32 timeout = 2000; 1322 u32 timeout = 2000;
1375 u32 i; 1323 u32 i;
1376 u32 swsm; 1324 u32 swsm;
@@ -1382,17 +1330,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1382 * set and we have the semaphore 1330 * set and we have the semaphore
1383 */ 1331 */
1384 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1332 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1385 if (!(swsm & IXGBE_SWSM_SMBI)) { 1333 if (!(swsm & IXGBE_SWSM_SMBI))
1386 status = 0;
1387 break; 1334 break;
1388 }
1389 usleep_range(50, 100); 1335 usleep_range(50, 100);
1390 } 1336 }
1391 1337
1392 if (i == timeout) { 1338 if (i == timeout) {
1393 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); 1339 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
1394 /* 1340 /* this release is particularly important because our attempts
1395 * this release is particularly important because our attempts
1396 * above to get the semaphore may have succeeded, and if there 1341 * above to get the semaphore may have succeeded, and if there
1397 * was a timeout, we should unconditionally clear the semaphore 1342 * was a timeout, we should unconditionally clear the semaphore
1398 * bits to free the driver to make progress 1343 * bits to free the driver to make progress
@@ -1400,50 +1345,45 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1400 ixgbe_release_eeprom_semaphore(hw); 1345 ixgbe_release_eeprom_semaphore(hw);
1401 1346
1402 usleep_range(50, 100); 1347 usleep_range(50, 100);
1403 /* 1348 /* one last try
1404 * one last try
1405 * If the SMBI bit is 0 when we read it, then the bit will be 1349 * If the SMBI bit is 0 when we read it, then the bit will be
1406 * set and we have the semaphore 1350 * set and we have the semaphore
1407 */ 1351 */
1408 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1352 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1409 if (!(swsm & IXGBE_SWSM_SMBI)) 1353 if (swsm & IXGBE_SWSM_SMBI) {
1410 status = 0; 1354 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1355 return IXGBE_ERR_EEPROM;
1356 }
1411 } 1357 }
1412 1358
1413 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1359 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1414 if (status == 0) { 1360 for (i = 0; i < timeout; i++) {
1415 for (i = 0; i < timeout; i++) { 1361 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1416 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1417 1362
1418 /* Set the SW EEPROM semaphore bit to request access */ 1363 /* Set the SW EEPROM semaphore bit to request access */
1419 swsm |= IXGBE_SWSM_SWESMBI; 1364 swsm |= IXGBE_SWSM_SWESMBI;
1420 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1365 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1421 1366
1422 /* 1367 /* If we set the bit successfully then we got the
1423 * If we set the bit successfully then we got the 1368 * semaphore.
1424 * semaphore. 1369 */
1425 */ 1370 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1426 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1371 if (swsm & IXGBE_SWSM_SWESMBI)
1427 if (swsm & IXGBE_SWSM_SWESMBI) 1372 break;
1428 break;
1429 1373
1430 usleep_range(50, 100); 1374 usleep_range(50, 100);
1431 } 1375 }
1432 1376
1433 /* 1377 /* Release semaphores and return error if SW EEPROM semaphore
1434 * Release semaphores and return error if SW EEPROM semaphore 1378 * was not granted because we don't have access to the EEPROM
1435 * was not granted because we don't have access to the EEPROM 1379 */
1436 */ 1380 if (i >= timeout) {
1437 if (i >= timeout) { 1381 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
1438 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); 1382 ixgbe_release_eeprom_semaphore(hw);
1439 ixgbe_release_eeprom_semaphore(hw); 1383 return IXGBE_ERR_EEPROM;
1440 status = IXGBE_ERR_EEPROM;
1441 }
1442 } else {
1443 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1444 } 1384 }
1445 1385
1446 return status; 1386 return 0;
1447} 1387}
1448 1388
1449/** 1389/**
@@ -1470,7 +1410,6 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1470 **/ 1410 **/
1471static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1411static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1472{ 1412{
1473 s32 status = 0;
1474 u16 i; 1413 u16 i;
1475 u8 spi_stat_reg; 1414 u8 spi_stat_reg;
1476 1415
@@ -1497,10 +1436,10 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1497 */ 1436 */
1498 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1437 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1499 hw_dbg(hw, "SPI EEPROM Status error\n"); 1438 hw_dbg(hw, "SPI EEPROM Status error\n");
1500 status = IXGBE_ERR_EEPROM; 1439 return IXGBE_ERR_EEPROM;
1501 } 1440 }
1502 1441
1503 return status; 1442 return 0;
1504} 1443}
1505 1444
1506/** 1445/**
@@ -2099,17 +2038,14 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2099 **/ 2038 **/
2100s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2039s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2101{ 2040{
2102 s32 ret_val = 0;
2103 u32 mflcn_reg, fccfg_reg; 2041 u32 mflcn_reg, fccfg_reg;
2104 u32 reg; 2042 u32 reg;
2105 u32 fcrtl, fcrth; 2043 u32 fcrtl, fcrth;
2106 int i; 2044 int i;
2107 2045
2108 /* Validate the water mark configuration. */ 2046 /* Validate the water mark configuration. */
2109 if (!hw->fc.pause_time) { 2047 if (!hw->fc.pause_time)
2110 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2048 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2111 goto out;
2112 }
2113 2049
2114 /* Low water mark of zero causes XOFF floods */ 2050 /* Low water mark of zero causes XOFF floods */
2115 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2051 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -2118,8 +2054,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2118 if (!hw->fc.low_water[i] || 2054 if (!hw->fc.low_water[i] ||
2119 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2055 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2120 hw_dbg(hw, "Invalid water mark configuration\n"); 2056 hw_dbg(hw, "Invalid water mark configuration\n");
2121 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2057 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2122 goto out;
2123 } 2058 }
2124 } 2059 }
2125 } 2060 }
@@ -2176,8 +2111,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2176 break; 2111 break;
2177 default: 2112 default:
2178 hw_dbg(hw, "Flow control param set incorrectly\n"); 2113 hw_dbg(hw, "Flow control param set incorrectly\n");
2179 ret_val = IXGBE_ERR_CONFIG; 2114 return IXGBE_ERR_CONFIG;
2180 goto out;
2181 } 2115 }
2182 2116
2183 /* Set 802.3x based flow control settings. */ 2117 /* Set 802.3x based flow control settings. */
@@ -2213,8 +2147,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2213 2147
2214 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2148 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2215 2149
2216out: 2150 return 0;
2217 return ret_val;
2218} 2151}
2219 2152
2220/** 2153/**
@@ -2275,7 +2208,7 @@ static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2275static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2208static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2276{ 2209{
2277 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2210 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2278 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2211 s32 ret_val;
2279 2212
2280 /* 2213 /*
2281 * On multispeed fiber at 1g, bail out if 2214 * On multispeed fiber at 1g, bail out if
@@ -2286,7 +2219,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2286 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2219 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2287 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2220 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2288 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) 2221 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2289 goto out; 2222 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2290 2223
2291 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2224 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2292 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2225 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2297,7 +2230,6 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2297 IXGBE_PCS1GANA_SYM_PAUSE, 2230 IXGBE_PCS1GANA_SYM_PAUSE,
2298 IXGBE_PCS1GANA_ASM_PAUSE); 2231 IXGBE_PCS1GANA_ASM_PAUSE);
2299 2232
2300out:
2301 return ret_val; 2233 return ret_val;
2302} 2234}
2303 2235
@@ -2310,7 +2242,7 @@ out:
2310static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2242static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2311{ 2243{
2312 u32 links2, anlp1_reg, autoc_reg, links; 2244 u32 links2, anlp1_reg, autoc_reg, links;
2313 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2245 s32 ret_val;
2314 2246
2315 /* 2247 /*
2316 * On backplane, bail out if 2248 * On backplane, bail out if
@@ -2319,12 +2251,12 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2319 */ 2251 */
2320 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2252 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2321 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) 2253 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2322 goto out; 2254 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2323 2255
2324 if (hw->mac.type == ixgbe_mac_82599EB) { 2256 if (hw->mac.type == ixgbe_mac_82599EB) {
2325 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2257 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2326 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) 2258 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2327 goto out; 2259 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2328 } 2260 }
2329 /* 2261 /*
2330 * Read the 10g AN autoc and LP ability registers and resolve 2262 * Read the 10g AN autoc and LP ability registers and resolve
@@ -2337,7 +2269,6 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2337 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2269 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2338 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2270 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2339 2271
2340out:
2341 return ret_val; 2272 return ret_val;
2342} 2273}
2343 2274
@@ -2483,7 +2414,6 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2483 **/ 2414 **/
2484static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2415static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2485{ 2416{
2486 s32 status = 0;
2487 u32 i, poll; 2417 u32 i, poll;
2488 u16 value; 2418 u16 value;
2489 2419
@@ -2493,13 +2423,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2493 /* Exit if master requests are blocked */ 2423 /* Exit if master requests are blocked */
2494 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 2424 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2495 ixgbe_removed(hw->hw_addr)) 2425 ixgbe_removed(hw->hw_addr))
2496 goto out; 2426 return 0;
2497 2427
2498 /* Poll for master request bit to clear */ 2428 /* Poll for master request bit to clear */
2499 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2429 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2500 udelay(100); 2430 udelay(100);
2501 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2431 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2502 goto out; 2432 return 0;
2503 } 2433 }
2504 2434
2505 /* 2435 /*
@@ -2522,16 +2452,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2522 udelay(100); 2452 udelay(100);
2523 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 2453 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
2524 if (ixgbe_removed(hw->hw_addr)) 2454 if (ixgbe_removed(hw->hw_addr))
2525 goto out; 2455 return 0;
2526 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2456 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2527 goto out; 2457 return 0;
2528 } 2458 }
2529 2459
2530 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); 2460 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2531 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2461 return IXGBE_ERR_MASTER_REQUESTS_PENDING;
2532
2533out:
2534 return status;
2535} 2462}
2536 2463
2537/** 2464/**
@@ -2706,8 +2633,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2706 bool link_up = false; 2633 bool link_up = false;
2707 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2634 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2708 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2635 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2709 s32 ret_val = 0;
2710 bool locked = false; 2636 bool locked = false;
2637 s32 ret_val;
2711 2638
2712 /* 2639 /*
2713 * Link must be up to auto-blink the LEDs; 2640 * Link must be up to auto-blink the LEDs;
@@ -2718,14 +2645,14 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2718 if (!link_up) { 2645 if (!link_up) {
2719 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2646 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2720 if (ret_val) 2647 if (ret_val)
2721 goto out; 2648 return ret_val;
2722 2649
2723 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2650 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2724 autoc_reg |= IXGBE_AUTOC_FLU; 2651 autoc_reg |= IXGBE_AUTOC_FLU;
2725 2652
2726 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2653 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2727 if (ret_val) 2654 if (ret_val)
2728 goto out; 2655 return ret_val;
2729 2656
2730 IXGBE_WRITE_FLUSH(hw); 2657 IXGBE_WRITE_FLUSH(hw);
2731 2658
@@ -2737,8 +2664,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2737 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2664 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2738 IXGBE_WRITE_FLUSH(hw); 2665 IXGBE_WRITE_FLUSH(hw);
2739 2666
2740out: 2667 return 0;
2741 return ret_val;
2742} 2668}
2743 2669
2744/** 2670/**
@@ -2750,19 +2676,19 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2750{ 2676{
2751 u32 autoc_reg = 0; 2677 u32 autoc_reg = 0;
2752 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2678 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2753 s32 ret_val = 0;
2754 bool locked = false; 2679 bool locked = false;
2680 s32 ret_val;
2755 2681
2756 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2682 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2757 if (ret_val) 2683 if (ret_val)
2758 goto out; 2684 return ret_val;
2759 2685
2760 autoc_reg &= ~IXGBE_AUTOC_FLU; 2686 autoc_reg &= ~IXGBE_AUTOC_FLU;
2761 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2687 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2762 2688
2763 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2689 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2764 if (ret_val) 2690 if (ret_val)
2765 goto out; 2691 return ret_val;
2766 2692
2767 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2693 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2768 led_reg &= ~IXGBE_LED_BLINK(index); 2694 led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2770,8 +2696,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2770 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2696 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2771 IXGBE_WRITE_FLUSH(hw); 2697 IXGBE_WRITE_FLUSH(hw);
2772 2698
2773out: 2699 return 0;
2774 return ret_val;
2775} 2700}
2776 2701
2777/** 2702/**
@@ -2863,7 +2788,7 @@ san_mac_addr_clr:
2863 **/ 2788 **/
2864u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2789u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2865{ 2790{
2866 u16 msix_count = 1; 2791 u16 msix_count;
2867 u16 max_msix_count; 2792 u16 max_msix_count;
2868 u16 pcie_offset; 2793 u16 pcie_offset;
2869 2794
@@ -2878,7 +2803,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2878 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2803 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2879 break; 2804 break;
2880 default: 2805 default:
2881 return msix_count; 2806 return 1;
2882 } 2807 }
2883 2808
2884 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); 2809 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
@@ -2916,10 +2841,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2916 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2841 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2917 2842
2918 if (ixgbe_removed(hw->hw_addr)) 2843 if (ixgbe_removed(hw->hw_addr))
2919 goto done; 2844 return 0;
2920 2845
2921 if (!mpsar_lo && !mpsar_hi) 2846 if (!mpsar_lo && !mpsar_hi)
2922 goto done; 2847 return 0;
2923 2848
2924 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2849 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2925 if (mpsar_lo) { 2850 if (mpsar_lo) {
@@ -2941,7 +2866,6 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2941 /* was that the last pool using this rar? */ 2866 /* was that the last pool using this rar? */
2942 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2867 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2943 hw->mac.ops.clear_rar(hw, rar); 2868 hw->mac.ops.clear_rar(hw, rar);
2944done:
2945 return 0; 2869 return 0;
2946} 2870}
2947 2871
@@ -3310,14 +3234,14 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3310 3234
3311 if ((alt_san_mac_blk_offset == 0) || 3235 if ((alt_san_mac_blk_offset == 0) ||
3312 (alt_san_mac_blk_offset == 0xFFFF)) 3236 (alt_san_mac_blk_offset == 0xFFFF))
3313 goto wwn_prefix_out; 3237 return 0;
3314 3238
3315 /* check capability in alternative san mac address block */ 3239 /* check capability in alternative san mac address block */
3316 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3240 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3317 if (hw->eeprom.ops.read(hw, offset, &caps)) 3241 if (hw->eeprom.ops.read(hw, offset, &caps))
3318 goto wwn_prefix_err; 3242 goto wwn_prefix_err;
3319 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3243 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3320 goto wwn_prefix_out; 3244 return 0;
3321 3245
3322 /* get the corresponding prefix for WWNN/WWPN */ 3246 /* get the corresponding prefix for WWNN/WWPN */
3323 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3247 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
@@ -3328,7 +3252,6 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3328 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 3252 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
3329 goto wwn_prefix_err; 3253 goto wwn_prefix_err;
3330 3254
3331wwn_prefix_out:
3332 return 0; 3255 return 0;
3333 3256
3334wwn_prefix_err: 3257wwn_prefix_err:
@@ -3522,21 +3445,17 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3522 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3445 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3523 u8 buf_len, dword_len; 3446 u8 buf_len, dword_len;
3524 3447
3525 s32 ret_val = 0;
3526
3527 if (length == 0 || length & 0x3 || 3448 if (length == 0 || length & 0x3 ||
3528 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3449 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3529 hw_dbg(hw, "Buffer length failure.\n"); 3450 hw_dbg(hw, "Buffer length failure.\n");
3530 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3451 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3531 goto out;
3532 } 3452 }
3533 3453
3534 /* Check that the host interface is enabled. */ 3454 /* Check that the host interface is enabled. */
3535 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3455 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3536 if ((hicr & IXGBE_HICR_EN) == 0) { 3456 if ((hicr & IXGBE_HICR_EN) == 0) {
3537 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); 3457 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
3538 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3458 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3539 goto out;
3540 } 3459 }
3541 3460
3542 /* Calculate length in DWORDs */ 3461 /* Calculate length in DWORDs */
@@ -3564,8 +3483,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3564 if (i == IXGBE_HI_COMMAND_TIMEOUT || 3483 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3565 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { 3484 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3566 hw_dbg(hw, "Command has failed with no status valid.\n"); 3485 hw_dbg(hw, "Command has failed with no status valid.\n");
3567 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3486 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3568 goto out;
3569 } 3487 }
3570 3488
3571 /* Calculate length in DWORDs */ 3489 /* Calculate length in DWORDs */
@@ -3580,12 +3498,11 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3580 /* If there is any thing in data position pull it in */ 3498 /* If there is any thing in data position pull it in */
3581 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; 3499 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3582 if (buf_len == 0) 3500 if (buf_len == 0)
3583 goto out; 3501 return 0;
3584 3502
3585 if (length < (buf_len + hdr_size)) { 3503 if (length < (buf_len + hdr_size)) {
3586 hw_dbg(hw, "Buffer not large enough for reply message.\n"); 3504 hw_dbg(hw, "Buffer not large enough for reply message.\n");
3587 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 3505 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3588 goto out;
3589 } 3506 }
3590 3507
3591 /* Calculate length in DWORDs, add 3 for odd lengths */ 3508 /* Calculate length in DWORDs, add 3 for odd lengths */
@@ -3597,8 +3514,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3597 le32_to_cpus(&buffer[bi]); 3514 le32_to_cpus(&buffer[bi]);
3598 } 3515 }
3599 3516
3600out: 3517 return 0;
3601 return ret_val;
3602} 3518}
3603 3519
3604/** 3520/**
@@ -3619,12 +3535,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3619{ 3535{
3620 struct ixgbe_hic_drv_info fw_cmd; 3536 struct ixgbe_hic_drv_info fw_cmd;
3621 int i; 3537 int i;
3622 s32 ret_val = 0; 3538 s32 ret_val;
3623 3539
3624 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) { 3540 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
3625 ret_val = IXGBE_ERR_SWFW_SYNC; 3541 return IXGBE_ERR_SWFW_SYNC;
3626 goto out;
3627 }
3628 3542
3629 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 3543 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3630 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 3544 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
@@ -3656,7 +3570,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3656 } 3570 }
3657 3571
3658 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3572 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3659out:
3660 return ret_val; 3573 return ret_val;
3661} 3574}
3662 3575
@@ -3725,28 +3638,23 @@ static const u8 ixgbe_emc_therm_limit[4] = {
3725static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, 3638static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3726 u16 *ets_offset) 3639 u16 *ets_offset)
3727{ 3640{
3728 s32 status = 0; 3641 s32 status;
3729 3642
3730 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); 3643 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3731 if (status) 3644 if (status)
3732 goto out; 3645 return status;
3733 3646
3734 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) { 3647 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
3735 status = IXGBE_NOT_IMPLEMENTED; 3648 return IXGBE_NOT_IMPLEMENTED;
3736 goto out;
3737 }
3738 3649
3739 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); 3650 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3740 if (status) 3651 if (status)
3741 goto out; 3652 return status;
3742 3653
3743 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) { 3654 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
3744 status = IXGBE_NOT_IMPLEMENTED; 3655 return IXGBE_NOT_IMPLEMENTED;
3745 goto out;
3746 }
3747 3656
3748out: 3657 return 0;
3749 return status;
3750} 3658}
3751 3659
3752/** 3660/**
@@ -3757,7 +3665,7 @@ out:
3757 **/ 3665 **/
3758s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) 3666s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3759{ 3667{
3760 s32 status = 0; 3668 s32 status;
3761 u16 ets_offset; 3669 u16 ets_offset;
3762 u16 ets_cfg; 3670 u16 ets_cfg;
3763 u16 ets_sensor; 3671 u16 ets_sensor;
@@ -3766,14 +3674,12 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3766 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3674 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3767 3675
3768 /* Only support thermal sensors attached to physical port 0 */ 3676 /* Only support thermal sensors attached to physical port 0 */
3769 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { 3677 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
3770 status = IXGBE_NOT_IMPLEMENTED; 3678 return IXGBE_NOT_IMPLEMENTED;
3771 goto out;
3772 }
3773 3679
3774 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3680 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3775 if (status) 3681 if (status)
3776 goto out; 3682 return status;
3777 3683
3778 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3684 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3779 if (num_sensors > IXGBE_MAX_SENSORS) 3685 if (num_sensors > IXGBE_MAX_SENSORS)
@@ -3786,7 +3692,7 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3786 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), 3692 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3787 &ets_sensor); 3693 &ets_sensor);
3788 if (status) 3694 if (status)
3789 goto out; 3695 return status;
3790 3696
3791 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3697 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3792 IXGBE_ETS_DATA_INDEX_SHIFT); 3698 IXGBE_ETS_DATA_INDEX_SHIFT);
@@ -3799,11 +3705,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3799 IXGBE_I2C_THERMAL_SENSOR_ADDR, 3705 IXGBE_I2C_THERMAL_SENSOR_ADDR,
3800 &data->sensor[i].temp); 3706 &data->sensor[i].temp);
3801 if (status) 3707 if (status)
3802 goto out; 3708 return status;
3803 } 3709 }
3804 } 3710 }
3805out: 3711
3806 return status; 3712 return 0;
3807} 3713}
3808 3714
3809/** 3715/**
@@ -3815,7 +3721,7 @@ out:
3815 **/ 3721 **/
3816s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) 3722s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3817{ 3723{
3818 s32 status = 0; 3724 s32 status;
3819 u16 ets_offset; 3725 u16 ets_offset;
3820 u16 ets_cfg; 3726 u16 ets_cfg;
3821 u16 ets_sensor; 3727 u16 ets_sensor;
@@ -3828,14 +3734,12 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3828 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); 3734 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3829 3735
3830 /* Only support thermal sensors attached to physical port 0 */ 3736 /* Only support thermal sensors attached to physical port 0 */
3831 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { 3737 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
3832 status = IXGBE_NOT_IMPLEMENTED; 3738 return IXGBE_NOT_IMPLEMENTED;
3833 goto out;
3834 }
3835 3739
3836 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3740 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3837 if (status) 3741 if (status)
3838 goto out; 3742 return status;
3839 3743
3840 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> 3744 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3841 IXGBE_ETS_LTHRES_DELTA_SHIFT); 3745 IXGBE_ETS_LTHRES_DELTA_SHIFT);
@@ -3869,7 +3773,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3869 data->sensor[i].caution_thresh = therm_limit; 3773 data->sensor[i].caution_thresh = therm_limit;
3870 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; 3774 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
3871 } 3775 }
3872out: 3776
3873 return status; 3777 return 0;
3874} 3778}
3875 3779
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index a689ee0d4bed..48f35fc963f8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -87,7 +87,6 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
87 int min_credit; 87 int min_credit;
88 int min_multiplier; 88 int min_multiplier;
89 int min_percent = 100; 89 int min_percent = 100;
90 s32 ret_val = 0;
91 /* Initialization values default for Tx settings */ 90 /* Initialization values default for Tx settings */
92 u32 credit_refill = 0; 91 u32 credit_refill = 0;
93 u32 credit_max = 0; 92 u32 credit_max = 0;
@@ -95,10 +94,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
95 u8 bw_percent = 0; 94 u8 bw_percent = 0;
96 u8 i; 95 u8 i;
97 96
98 if (dcb_config == NULL) { 97 if (!dcb_config)
99 ret_val = DCB_ERR_CONFIG; 98 return DCB_ERR_CONFIG;
100 goto out;
101 }
102 99
103 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / 100 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
104 DCB_CREDIT_QUANTUM; 101 DCB_CREDIT_QUANTUM;
@@ -174,8 +171,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
174 p->data_credits_max = (u16)credit_max; 171 p->data_credits_max = (u16)credit_max;
175 } 172 }
176 173
177out: 174 return 0;
178 return ret_val;
179} 175}
180 176
181void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) 177void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
@@ -236,7 +232,7 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
236 232
237 /* If tc is 0 then DCB is likely not enabled or supported */ 233 /* If tc is 0 then DCB is likely not enabled or supported */
238 if (!tc) 234 if (!tc)
239 goto out; 235 return 0;
240 236
241 /* 237 /*
242 * Test from maximum TC to 1 and report the first match we find. If 238 * Test from maximum TC to 1 and report the first match we find. If
@@ -247,7 +243,7 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
247 if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) 243 if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
248 break; 244 break;
249 } 245 }
250out: 246
251 return tc; 247 return tc;
252} 248}
253 249
@@ -269,7 +265,6 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
269s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, 265s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
270 struct ixgbe_dcb_config *dcb_config) 266 struct ixgbe_dcb_config *dcb_config)
271{ 267{
272 s32 ret = 0;
273 u8 pfc_en; 268 u8 pfc_en;
274 u8 ptype[MAX_TRAFFIC_CLASS]; 269 u8 ptype[MAX_TRAFFIC_CLASS];
275 u8 bwgid[MAX_TRAFFIC_CLASS]; 270 u8 bwgid[MAX_TRAFFIC_CLASS];
@@ -287,37 +282,31 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
287 282
288 switch (hw->mac.type) { 283 switch (hw->mac.type) {
289 case ixgbe_mac_82598EB: 284 case ixgbe_mac_82598EB:
290 ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, 285 return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
291 bwgid, ptype); 286 bwgid, ptype);
292 break;
293 case ixgbe_mac_82599EB: 287 case ixgbe_mac_82599EB:
294 case ixgbe_mac_X540: 288 case ixgbe_mac_X540:
295 ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, 289 return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
296 bwgid, ptype, prio_tc); 290 bwgid, ptype, prio_tc);
297 break;
298 default: 291 default:
299 break; 292 break;
300 } 293 }
301 return ret; 294 return 0;
302} 295}
303 296
304/* Helper routines to abstract HW specifics from DCB netlink ops */ 297/* Helper routines to abstract HW specifics from DCB netlink ops */
305s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) 298s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
306{ 299{
307 int ret = -EINVAL;
308
309 switch (hw->mac.type) { 300 switch (hw->mac.type) {
310 case ixgbe_mac_82598EB: 301 case ixgbe_mac_82598EB:
311 ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); 302 return ixgbe_dcb_config_pfc_82598(hw, pfc_en);
312 break;
313 case ixgbe_mac_82599EB: 303 case ixgbe_mac_82599EB:
314 case ixgbe_mac_X540: 304 case ixgbe_mac_X540:
315 ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); 305 return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
316 break;
317 default: 306 default:
318 break; 307 break;
319 } 308 }
320 return ret; 309 return -EINVAL;
321} 310}
322 311
323s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) 312s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 75bcb2e08491..58a7f5312a96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -153,7 +153,6 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
153static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) 153static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
154{ 154{
155 struct ixgbe_adapter *adapter = netdev_priv(netdev); 155 struct ixgbe_adapter *adapter = netdev_priv(netdev);
156 int err = 0;
157 156
158 /* Fail command if not in CEE mode */ 157 /* Fail command if not in CEE mode */
159 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 158 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
@@ -161,12 +160,10 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
161 160
162 /* verify there is something to do, if not then exit */ 161 /* verify there is something to do, if not then exit */
163 if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 162 if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
164 goto out; 163 return 0;
165 164
166 err = ixgbe_setup_tc(netdev, 165 return !!ixgbe_setup_tc(netdev,
167 state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); 166 state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
168out:
169 return !!err;
170} 167}
171 168
172static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, 169static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -331,12 +328,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
331 328
332 /* Fail command if not in CEE mode */ 329 /* Fail command if not in CEE mode */
333 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 330 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
334 return ret; 331 return DCB_NO_HW_CHG;
335 332
336 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, 333 adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
337 MAX_TRAFFIC_CLASS); 334 MAX_TRAFFIC_CLASS);
338 if (!adapter->dcb_set_bitmap) 335 if (!adapter->dcb_set_bitmap)
339 return ret; 336 return DCB_NO_HW_CHG;
340 337
341 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { 338 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
342 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; 339 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
@@ -536,7 +533,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
536{ 533{
537 struct ixgbe_adapter *adapter = netdev_priv(dev); 534 struct ixgbe_adapter *adapter = netdev_priv(dev);
538 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; 535 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
539 int i, err = 0; 536 int i, err;
540 __u8 max_tc = 0; 537 __u8 max_tc = 0;
541 __u8 map_chg = 0; 538 __u8 map_chg = 0;
542 539
@@ -573,17 +570,15 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
573 if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) 570 if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
574 return -EINVAL; 571 return -EINVAL;
575 572
576 if (max_tc != netdev_get_num_tc(dev)) 573 if (max_tc != netdev_get_num_tc(dev)) {
577 err = ixgbe_setup_tc(dev, max_tc); 574 err = ixgbe_setup_tc(dev, max_tc);
578 else if (map_chg) 575 if (err)
576 return err;
577 } else if (map_chg) {
579 ixgbe_dcbnl_devreset(dev); 578 ixgbe_dcbnl_devreset(dev);
579 }
580 580
581 if (err) 581 return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
582 goto err_out;
583
584 err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
585err_out:
586 return err;
587} 582}
588 583
589static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, 584static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
@@ -647,10 +642,10 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
647 struct dcb_app *app) 642 struct dcb_app *app)
648{ 643{
649 struct ixgbe_adapter *adapter = netdev_priv(dev); 644 struct ixgbe_adapter *adapter = netdev_priv(dev);
650 int err = -EINVAL; 645 int err;
651 646
652 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) 647 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
653 return err; 648 return -EINVAL;
654 649
655 err = dcb_ieee_setapp(dev, app); 650 err = dcb_ieee_setapp(dev, app);
656 if (err) 651 if (err)
@@ -662,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
662 u8 app_mask = dcb_ieee_getapp_mask(dev, app); 657 u8 app_mask = dcb_ieee_getapp_mask(dev, app);
663 658
664 if (app_mask & (1 << adapter->fcoe.up)) 659 if (app_mask & (1 << adapter->fcoe.up))
665 return err; 660 return 0;
666 661
667 adapter->fcoe.up = app->priority; 662 adapter->fcoe.up = app->priority;
668 ixgbe_dcbnl_devreset(dev); 663 ixgbe_dcbnl_devreset(dev);
@@ -705,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
705 u8 app_mask = dcb_ieee_getapp_mask(dev, app); 700 u8 app_mask = dcb_ieee_getapp_mask(dev, app);
706 701
707 if (app_mask & (1 << adapter->fcoe.up)) 702 if (app_mask & (1 << adapter->fcoe.up))
708 return err; 703 return 0;
709 704
710 adapter->fcoe.up = app_mask ? 705 adapter->fcoe.up = app_mask ?
711 ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; 706 ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 25a3dfef33e8..2ad91cb04dab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -67,23 +67,23 @@ static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
67 */ 67 */
68int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) 68int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
69{ 69{
70 int len = 0; 70 int len;
71 struct ixgbe_fcoe *fcoe; 71 struct ixgbe_fcoe *fcoe;
72 struct ixgbe_adapter *adapter; 72 struct ixgbe_adapter *adapter;
73 struct ixgbe_fcoe_ddp *ddp; 73 struct ixgbe_fcoe_ddp *ddp;
74 u32 fcbuff; 74 u32 fcbuff;
75 75
76 if (!netdev) 76 if (!netdev)
77 goto out_ddp_put; 77 return 0;
78 78
79 if (xid >= IXGBE_FCOE_DDP_MAX) 79 if (xid >= IXGBE_FCOE_DDP_MAX)
80 goto out_ddp_put; 80 return 0;
81 81
82 adapter = netdev_priv(netdev); 82 adapter = netdev_priv(netdev);
83 fcoe = &adapter->fcoe; 83 fcoe = &adapter->fcoe;
84 ddp = &fcoe->ddp[xid]; 84 ddp = &fcoe->ddp[xid];
85 if (!ddp->udl) 85 if (!ddp->udl)
86 goto out_ddp_put; 86 return 0;
87 87
88 len = ddp->len; 88 len = ddp->len;
89 /* if there an error, force to invalidate ddp context */ 89 /* if there an error, force to invalidate ddp context */
@@ -114,7 +114,6 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
114 114
115 ixgbe_fcoe_clear_ddp(ddp); 115 ixgbe_fcoe_clear_ddp(ddp);
116 116
117out_ddp_put:
118 return len; 117 return len;
119} 118}
120 119
@@ -394,17 +393,17 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
394 xid = be16_to_cpu(fh->fh_rx_id); 393 xid = be16_to_cpu(fh->fh_rx_id);
395 394
396 if (xid >= IXGBE_FCOE_DDP_MAX) 395 if (xid >= IXGBE_FCOE_DDP_MAX)
397 goto ddp_out; 396 return -EINVAL;
398 397
399 fcoe = &adapter->fcoe; 398 fcoe = &adapter->fcoe;
400 ddp = &fcoe->ddp[xid]; 399 ddp = &fcoe->ddp[xid];
401 if (!ddp->udl) 400 if (!ddp->udl)
402 goto ddp_out; 401 return -EINVAL;
403 402
404 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | 403 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
405 IXGBE_RXDADV_ERR_FCERR); 404 IXGBE_RXDADV_ERR_FCERR);
406 if (ddp_err) 405 if (ddp_err)
407 goto ddp_out; 406 return -EINVAL;
408 407
409 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { 408 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
410 /* return 0 to bypass going to ULD for DDPed data */ 409 /* return 0 to bypass going to ULD for DDPed data */
@@ -447,7 +446,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
447 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); 446 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
448 crc->fcoe_eof = FC_EOF_T; 447 crc->fcoe_eof = FC_EOF_T;
449 } 448 }
450ddp_out: 449
451 return rc; 450 return rc;
452} 451}
453 452
@@ -878,7 +877,6 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
878 */ 877 */
879int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 878int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
880{ 879{
881 int rc = -EINVAL;
882 u16 prefix = 0xffff; 880 u16 prefix = 0xffff;
883 struct ixgbe_adapter *adapter = netdev_priv(netdev); 881 struct ixgbe_adapter *adapter = netdev_priv(netdev);
884 struct ixgbe_mac_info *mac = &adapter->hw.mac; 882 struct ixgbe_mac_info *mac = &adapter->hw.mac;
@@ -903,9 +901,9 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
903 ((u64) mac->san_addr[3] << 16) | 901 ((u64) mac->san_addr[3] << 16) |
904 ((u64) mac->san_addr[4] << 8) | 902 ((u64) mac->san_addr[4] << 8) |
905 ((u64) mac->san_addr[5]); 903 ((u64) mac->san_addr[5]);
906 rc = 0; 904 return 0;
907 } 905 }
908 return rc; 906 return -EINVAL;
909} 907}
910 908
911/** 909/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e1f83ee03c6a..5384ed30298a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -570,7 +570,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
570 570
571 /* Print TX Ring Summary */ 571 /* Print TX Ring Summary */
572 if (!netdev || !netif_running(netdev)) 572 if (!netdev || !netif_running(netdev))
573 goto exit; 573 return;
574 574
575 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 575 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
576 pr_info(" %s %s %s %s\n", 576 pr_info(" %s %s %s %s\n",
@@ -685,7 +685,7 @@ rx_ring_summary:
685 685
686 /* Print RX Rings */ 686 /* Print RX Rings */
687 if (!netif_msg_rx_status(adapter)) 687 if (!netif_msg_rx_status(adapter))
688 goto exit; 688 return;
689 689
690 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 690 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
691 691
@@ -787,9 +787,6 @@ rx_ring_summary:
787 787
788 } 788 }
789 } 789 }
790
791exit:
792 return;
793} 790}
794 791
795static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 792static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
@@ -1011,7 +1008,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1011 u32 tx_done = ixgbe_get_tx_completed(tx_ring); 1008 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1012 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 1009 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1013 u32 tx_pending = ixgbe_get_tx_pending(tx_ring); 1010 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1014 bool ret = false;
1015 1011
1016 clear_check_for_tx_hang(tx_ring); 1012 clear_check_for_tx_hang(tx_ring);
1017 1013
@@ -1027,18 +1023,16 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1027 * run the check_tx_hang logic with a transmit completion 1023 * run the check_tx_hang logic with a transmit completion
1028 * pending but without time to complete it yet. 1024 * pending but without time to complete it yet.
1029 */ 1025 */
1030 if ((tx_done_old == tx_done) && tx_pending) { 1026 if (tx_done_old == tx_done && tx_pending)
1031 /* make sure it is true for two checks in a row */ 1027 /* make sure it is true for two checks in a row */
1032 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, 1028 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1033 &tx_ring->state); 1029 &tx_ring->state);
1034 } else { 1030 /* update completed stats and continue */
1035 /* update completed stats and continue */ 1031 tx_ring->tx_stats.tx_done_old = tx_done;
1036 tx_ring->tx_stats.tx_done_old = tx_done; 1032 /* reset the countdown */
1037 /* reset the countdown */ 1033 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1038 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1039 }
1040 1034
1041 return ret; 1035 return false;
1042} 1036}
1043 1037
1044/** 1038/**
@@ -4701,18 +4695,18 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
4701 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); 4695 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
4702 4696
4703 if (ret) 4697 if (ret)
4704 goto link_cfg_out; 4698 return ret;
4705 4699
4706 speed = hw->phy.autoneg_advertised; 4700 speed = hw->phy.autoneg_advertised;
4707 if ((!speed) && (hw->mac.ops.get_link_capabilities)) 4701 if ((!speed) && (hw->mac.ops.get_link_capabilities))
4708 ret = hw->mac.ops.get_link_capabilities(hw, &speed, 4702 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
4709 &autoneg); 4703 &autoneg);
4710 if (ret) 4704 if (ret)
4711 goto link_cfg_out; 4705 return ret;
4712 4706
4713 if (hw->mac.ops.setup_link) 4707 if (hw->mac.ops.setup_link)
4714 ret = hw->mac.ops.setup_link(hw, speed, link_up); 4708 ret = hw->mac.ops.setup_link(hw, speed, link_up);
4715link_cfg_out: 4709
4716 return ret; 4710 return ret;
4717} 4711}
4718 4712
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 50479575e131..cc8f0128286c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -43,16 +43,15 @@
43s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) 43s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
44{ 44{
45 struct ixgbe_mbx_info *mbx = &hw->mbx; 45 struct ixgbe_mbx_info *mbx = &hw->mbx;
46 s32 ret_val = IXGBE_ERR_MBX;
47 46
48 /* limit read to size of mailbox */ 47 /* limit read to size of mailbox */
49 if (size > mbx->size) 48 if (size > mbx->size)
50 size = mbx->size; 49 size = mbx->size;
51 50
52 if (mbx->ops.read) 51 if (!mbx->ops.read)
53 ret_val = mbx->ops.read(hw, msg, size, mbx_id); 52 return IXGBE_ERR_MBX;
54 53
55 return ret_val; 54 return mbx->ops.read(hw, msg, size, mbx_id);
56} 55}
57 56
58/** 57/**
@@ -87,12 +86,11 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
87s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) 86s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
88{ 87{
89 struct ixgbe_mbx_info *mbx = &hw->mbx; 88 struct ixgbe_mbx_info *mbx = &hw->mbx;
90 s32 ret_val = IXGBE_ERR_MBX;
91 89
92 if (mbx->ops.check_for_msg) 90 if (!mbx->ops.check_for_msg)
93 ret_val = mbx->ops.check_for_msg(hw, mbx_id); 91 return IXGBE_ERR_MBX;
94 92
95 return ret_val; 93 return mbx->ops.check_for_msg(hw, mbx_id);
96} 94}
97 95
98/** 96/**
@@ -105,12 +103,11 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
105s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) 103s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
106{ 104{
107 struct ixgbe_mbx_info *mbx = &hw->mbx; 105 struct ixgbe_mbx_info *mbx = &hw->mbx;
108 s32 ret_val = IXGBE_ERR_MBX;
109 106
110 if (mbx->ops.check_for_ack) 107 if (!mbx->ops.check_for_ack)
111 ret_val = mbx->ops.check_for_ack(hw, mbx_id); 108 return IXGBE_ERR_MBX;
112 109
113 return ret_val; 110 return mbx->ops.check_for_ack(hw, mbx_id);
114} 111}
115 112
116/** 113/**
@@ -123,12 +120,11 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
123s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) 120s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
124{ 121{
125 struct ixgbe_mbx_info *mbx = &hw->mbx; 122 struct ixgbe_mbx_info *mbx = &hw->mbx;
126 s32 ret_val = IXGBE_ERR_MBX;
127 123
128 if (mbx->ops.check_for_rst) 124 if (!mbx->ops.check_for_rst)
129 ret_val = mbx->ops.check_for_rst(hw, mbx_id); 125 return IXGBE_ERR_MBX;
130 126
131 return ret_val; 127 return mbx->ops.check_for_rst(hw, mbx_id);
132} 128}
133 129
134/** 130/**
@@ -144,17 +140,16 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
144 int countdown = mbx->timeout; 140 int countdown = mbx->timeout;
145 141
146 if (!countdown || !mbx->ops.check_for_msg) 142 if (!countdown || !mbx->ops.check_for_msg)
147 goto out; 143 return IXGBE_ERR_MBX;
148 144
149 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { 145 while (mbx->ops.check_for_msg(hw, mbx_id)) {
150 countdown--; 146 countdown--;
151 if (!countdown) 147 if (!countdown)
152 break; 148 return IXGBE_ERR_MBX;
153 udelay(mbx->usec_delay); 149 udelay(mbx->usec_delay);
154 } 150 }
155 151
156out: 152 return 0;
157 return countdown ? 0 : IXGBE_ERR_MBX;
158} 153}
159 154
160/** 155/**
@@ -170,17 +165,16 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
170 int countdown = mbx->timeout; 165 int countdown = mbx->timeout;
171 166
172 if (!countdown || !mbx->ops.check_for_ack) 167 if (!countdown || !mbx->ops.check_for_ack)
173 goto out; 168 return IXGBE_ERR_MBX;
174 169
175 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { 170 while (mbx->ops.check_for_ack(hw, mbx_id)) {
176 countdown--; 171 countdown--;
177 if (!countdown) 172 if (!countdown)
178 break; 173 return IXGBE_ERR_MBX;
179 udelay(mbx->usec_delay); 174 udelay(mbx->usec_delay);
180 } 175 }
181 176
182out: 177 return 0;
183 return countdown ? 0 : IXGBE_ERR_MBX;
184} 178}
185 179
186/** 180/**
@@ -197,18 +191,17 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
197 u16 mbx_id) 191 u16 mbx_id)
198{ 192{
199 struct ixgbe_mbx_info *mbx = &hw->mbx; 193 struct ixgbe_mbx_info *mbx = &hw->mbx;
200 s32 ret_val = IXGBE_ERR_MBX; 194 s32 ret_val;
201 195
202 if (!mbx->ops.read) 196 if (!mbx->ops.read)
203 goto out; 197 return IXGBE_ERR_MBX;
204 198
205 ret_val = ixgbe_poll_for_msg(hw, mbx_id); 199 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
200 if (ret_val)
201 return ret_val;
206 202
207 /* if ack received read message, otherwise we timed out */ 203 /* if ack received read message */
208 if (!ret_val) 204 return mbx->ops.read(hw, msg, size, mbx_id);
209 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
210out:
211 return ret_val;
212} 205}
213 206
214/** 207/**
@@ -225,33 +218,31 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
225 u16 mbx_id) 218 u16 mbx_id)
226{ 219{
227 struct ixgbe_mbx_info *mbx = &hw->mbx; 220 struct ixgbe_mbx_info *mbx = &hw->mbx;
228 s32 ret_val = IXGBE_ERR_MBX; 221 s32 ret_val;
229 222
230 /* exit if either we can't write or there isn't a defined timeout */ 223 /* exit if either we can't write or there isn't a defined timeout */
231 if (!mbx->ops.write || !mbx->timeout) 224 if (!mbx->ops.write || !mbx->timeout)
232 goto out; 225 return IXGBE_ERR_MBX;
233 226
234 /* send msg */ 227 /* send msg */
235 ret_val = mbx->ops.write(hw, msg, size, mbx_id); 228 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
229 if (ret_val)
230 return ret_val;
236 231
237 /* if msg sent wait until we receive an ack */ 232 /* if msg sent wait until we receive an ack */
238 if (!ret_val) 233 return ixgbe_poll_for_ack(hw, mbx_id);
239 ret_val = ixgbe_poll_for_ack(hw, mbx_id);
240out:
241 return ret_val;
242} 234}
243 235
244static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) 236static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
245{ 237{
246 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); 238 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
247 s32 ret_val = IXGBE_ERR_MBX;
248 239
249 if (mbvficr & mask) { 240 if (mbvficr & mask) {
250 ret_val = 0;
251 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); 241 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
242 return 0;
252 } 243 }
253 244
254 return ret_val; 245 return IXGBE_ERR_MBX;
255} 246}
256 247
257/** 248/**
@@ -263,17 +254,16 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
263 **/ 254 **/
264static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) 255static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
265{ 256{
266 s32 ret_val = IXGBE_ERR_MBX;
267 s32 index = IXGBE_MBVFICR_INDEX(vf_number); 257 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
268 u32 vf_bit = vf_number % 16; 258 u32 vf_bit = vf_number % 16;
269 259
270 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, 260 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
271 index)) { 261 index)) {
272 ret_val = 0;
273 hw->mbx.stats.reqs++; 262 hw->mbx.stats.reqs++;
263 return 0;
274 } 264 }
275 265
276 return ret_val; 266 return IXGBE_ERR_MBX;
277} 267}
278 268
279/** 269/**
@@ -285,17 +275,16 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
285 **/ 275 **/
286static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) 276static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
287{ 277{
288 s32 ret_val = IXGBE_ERR_MBX;
289 s32 index = IXGBE_MBVFICR_INDEX(vf_number); 278 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
290 u32 vf_bit = vf_number % 16; 279 u32 vf_bit = vf_number % 16;
291 280
292 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, 281 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
293 index)) { 282 index)) {
294 ret_val = 0;
295 hw->mbx.stats.acks++; 283 hw->mbx.stats.acks++;
284 return 0;
296 } 285 }
297 286
298 return ret_val; 287 return IXGBE_ERR_MBX;
299} 288}
300 289
301/** 290/**
@@ -310,7 +299,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
310 u32 reg_offset = (vf_number < 32) ? 0 : 1; 299 u32 reg_offset = (vf_number < 32) ? 0 : 1;
311 u32 vf_shift = vf_number % 32; 300 u32 vf_shift = vf_number % 32;
312 u32 vflre = 0; 301 u32 vflre = 0;
313 s32 ret_val = IXGBE_ERR_MBX;
314 302
315 switch (hw->mac.type) { 303 switch (hw->mac.type) {
316 case ixgbe_mac_82599EB: 304 case ixgbe_mac_82599EB:
@@ -324,12 +312,12 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
324 } 312 }
325 313
326 if (vflre & (1 << vf_shift)) { 314 if (vflre & (1 << vf_shift)) {
327 ret_val = 0;
328 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); 315 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
329 hw->mbx.stats.rsts++; 316 hw->mbx.stats.rsts++;
317 return 0;
330 } 318 }
331 319
332 return ret_val; 320 return IXGBE_ERR_MBX;
333} 321}
334 322
335/** 323/**
@@ -341,7 +329,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
341 **/ 329 **/
342static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) 330static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
343{ 331{
344 s32 ret_val = IXGBE_ERR_MBX;
345 u32 p2v_mailbox; 332 u32 p2v_mailbox;
346 333
347 /* Take ownership of the buffer */ 334 /* Take ownership of the buffer */
@@ -350,9 +337,9 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
350 /* reserve mailbox for vf use */ 337 /* reserve mailbox for vf use */
351 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); 338 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
352 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) 339 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
353 ret_val = 0; 340 return 0;
354 341
355 return ret_val; 342 return IXGBE_ERR_MBX;
356} 343}
357 344
358/** 345/**
@@ -373,7 +360,7 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
373 /* lock the mailbox to prevent pf/vf race condition */ 360 /* lock the mailbox to prevent pf/vf race condition */
374 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); 361 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
375 if (ret_val) 362 if (ret_val)
376 goto out_no_write; 363 return ret_val;
377 364
378 /* flush msg and acks as we are overwriting the message buffer */ 365 /* flush msg and acks as we are overwriting the message buffer */
379 ixgbe_check_for_msg_pf(hw, vf_number); 366 ixgbe_check_for_msg_pf(hw, vf_number);
@@ -389,9 +376,7 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
389 /* update stats */ 376 /* update stats */
390 hw->mbx.stats.msgs_tx++; 377 hw->mbx.stats.msgs_tx++;
391 378
392out_no_write: 379 return 0;
393 return ret_val;
394
395} 380}
396 381
397/** 382/**
@@ -414,7 +399,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
414 /* lock the mailbox to prevent pf/vf race condition */ 399 /* lock the mailbox to prevent pf/vf race condition */
415 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); 400 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
416 if (ret_val) 401 if (ret_val)
417 goto out_no_read; 402 return ret_val;
418 403
419 /* copy the message to the mailbox memory buffer */ 404 /* copy the message to the mailbox memory buffer */
420 for (i = 0; i < size; i++) 405 for (i = 0; i < size; i++)
@@ -426,8 +411,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
426 /* update stats */ 411 /* update stats */
427 hw->mbx.stats.msgs_rx++; 412 hw->mbx.stats.msgs_rx++;
428 413
429out_no_read: 414 return 0;
430 return ret_val;
431} 415}
432 416
433#ifdef CONFIG_PCI_IOV 417#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index ff68b7a9deff..11f02ea78c4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -57,7 +57,6 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
57 **/ 57 **/
58s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) 58s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
59{ 59{
60 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
61 u32 phy_addr; 60 u32 phy_addr;
62 u16 ext_ability = 0; 61 u16 ext_ability = 0;
63 62
@@ -84,18 +83,14 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
84 ixgbe_phy_generic; 83 ixgbe_phy_generic;
85 } 84 }
86 85
87 status = 0; 86 return 0;
88 break;
89 } 87 }
90 } 88 }
91 /* clear value if nothing found */ 89 /* clear value if nothing found */
92 if (status != 0) 90 hw->phy.mdio.prtad = 0;
93 hw->phy.mdio.prtad = 0; 91 return IXGBE_ERR_PHY_ADDR_INVALID;
94 } else {
95 status = 0;
96 } 92 }
97 93 return 0;
98 return status;
99} 94}
100 95
101/** 96/**
@@ -192,16 +187,16 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
192 status = ixgbe_identify_phy_generic(hw); 187 status = ixgbe_identify_phy_generic(hw);
193 188
194 if (status != 0 || hw->phy.type == ixgbe_phy_none) 189 if (status != 0 || hw->phy.type == ixgbe_phy_none)
195 goto out; 190 return status;
196 191
197 /* Don't reset PHY if it's shut down due to overtemp. */ 192 /* Don't reset PHY if it's shut down due to overtemp. */
198 if (!hw->phy.reset_if_overtemp && 193 if (!hw->phy.reset_if_overtemp &&
199 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 194 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
200 goto out; 195 return 0;
201 196
202 /* Blocked by MNG FW so bail */ 197 /* Blocked by MNG FW so bail */
203 if (ixgbe_check_reset_blocked(hw)) 198 if (ixgbe_check_reset_blocked(hw))
204 goto out; 199 return 0;
205 200
206 /* 201 /*
207 * Perform soft PHY reset to the PHY_XS. 202 * Perform soft PHY reset to the PHY_XS.
@@ -227,12 +222,11 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
227 } 222 }
228 223
229 if (ctrl & MDIO_CTRL1_RESET) { 224 if (ctrl & MDIO_CTRL1_RESET) {
230 status = IXGBE_ERR_RESET_FAILED;
231 hw_dbg(hw, "PHY reset polling failed to complete.\n"); 225 hw_dbg(hw, "PHY reset polling failed to complete.\n");
226 return IXGBE_ERR_RESET_FAILED;
232 } 227 }
233 228
234out: 229 return 0;
235 return status;
236} 230}
237 231
238/** 232/**
@@ -333,7 +327,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
333 phy_data); 327 phy_data);
334 hw->mac.ops.release_swfw_sync(hw, gssr); 328 hw->mac.ops.release_swfw_sync(hw, gssr);
335 } else { 329 } else {
336 status = IXGBE_ERR_SWFW_SYNC; 330 return IXGBE_ERR_SWFW_SYNC;
337 } 331 }
338 332
339 return status; 333 return status;
@@ -436,7 +430,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
436 phy_data); 430 phy_data);
437 hw->mac.ops.release_swfw_sync(hw, gssr); 431 hw->mac.ops.release_swfw_sync(hw, gssr);
438 } else { 432 } else {
439 status = IXGBE_ERR_SWFW_SYNC; 433 return IXGBE_ERR_SWFW_SYNC;
440 } 434 }
441 435
442 return status; 436 return status;
@@ -509,7 +503,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
509 503
510 /* Blocked by MNG FW so don't reset PHY */ 504 /* Blocked by MNG FW so don't reset PHY */
511 if (ixgbe_check_reset_blocked(hw)) 505 if (ixgbe_check_reset_blocked(hw))
512 return status; 506 return 0;
513 507
514 /* Restart PHY autonegotiation and wait for completion */ 508 /* Restart PHY autonegotiation and wait for completion */
515 hw->phy.ops.read_reg(hw, MDIO_CTRL1, 509 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
@@ -535,8 +529,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
535 } 529 }
536 530
537 if (time_out == max_time_out) { 531 if (time_out == max_time_out) {
538 status = IXGBE_ERR_LINK_SETUP;
539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n"); 532 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
533 return IXGBE_ERR_LINK_SETUP;
540 } 534 }
541 535
542 return status; 536 return status;
@@ -585,7 +579,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
585 ixgbe_link_speed *speed, 579 ixgbe_link_speed *speed,
586 bool *autoneg) 580 bool *autoneg)
587{ 581{
588 s32 status = IXGBE_ERR_LINK_SETUP; 582 s32 status;
589 u16 speed_ability; 583 u16 speed_ability;
590 584
591 *speed = 0; 585 *speed = 0;
@@ -616,7 +610,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
616s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 610s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
617 bool *link_up) 611 bool *link_up)
618{ 612{
619 s32 status = 0; 613 s32 status;
620 u32 time_out; 614 u32 time_out;
621 u32 max_time_out = 10; 615 u32 max_time_out = 10;
622 u16 phy_link = 0; 616 u16 phy_link = 0;
@@ -662,7 +656,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
662 **/ 656 **/
663s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) 657s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
664{ 658{
665 s32 status = 0; 659 s32 status;
666 u32 time_out; 660 u32 time_out;
667 u32 max_time_out = 10; 661 u32 max_time_out = 10;
668 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 662 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
@@ -719,7 +713,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
719 713
720 /* Blocked by MNG FW so don't reset PHY */ 714 /* Blocked by MNG FW so don't reset PHY */
721 if (ixgbe_check_reset_blocked(hw)) 715 if (ixgbe_check_reset_blocked(hw))
722 return status; 716 return 0;
723 717
724 /* Restart PHY autonegotiation and wait for completion */ 718 /* Restart PHY autonegotiation and wait for completion */
725 hw->phy.ops.read_reg(hw, MDIO_CTRL1, 719 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
@@ -744,8 +738,8 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
744 } 738 }
745 739
746 if (time_out == max_time_out) { 740 if (time_out == max_time_out) {
747 status = IXGBE_ERR_LINK_SETUP;
748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n"); 741 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
742 return IXGBE_ERR_LINK_SETUP;
749 } 743 }
750 744
751 return status; 745 return status;
@@ -759,7 +753,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
759s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 753s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
760 u16 *firmware_version) 754 u16 *firmware_version)
761{ 755{
762 s32 status = 0; 756 s32 status;
763 757
764 status = hw->phy.ops.read_reg(hw, TNX_FW_REV, 758 status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
765 MDIO_MMD_VEND1, 759 MDIO_MMD_VEND1,
@@ -776,7 +770,7 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
776s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, 770s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
777 u16 *firmware_version) 771 u16 *firmware_version)
778{ 772{
779 s32 status = 0; 773 s32 status;
780 774
781 status = hw->phy.ops.read_reg(hw, AQ_FW_REV, 775 status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
782 MDIO_MMD_VEND1, 776 MDIO_MMD_VEND1,
@@ -795,12 +789,12 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
795 bool end_data = false; 789 bool end_data = false;
796 u16 list_offset, data_offset; 790 u16 list_offset, data_offset;
797 u16 phy_data = 0; 791 u16 phy_data = 0;
798 s32 ret_val = 0; 792 s32 ret_val;
799 u32 i; 793 u32 i;
800 794
801 /* Blocked by MNG FW so bail */ 795 /* Blocked by MNG FW so bail */
802 if (ixgbe_check_reset_blocked(hw)) 796 if (ixgbe_check_reset_blocked(hw))
803 goto out; 797 return 0;
804 798
805 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); 799 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
806 800
@@ -818,15 +812,14 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
818 812
819 if ((phy_data & MDIO_CTRL1_RESET) != 0) { 813 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
820 hw_dbg(hw, "PHY reset did not complete.\n"); 814 hw_dbg(hw, "PHY reset did not complete.\n");
821 ret_val = IXGBE_ERR_PHY; 815 return IXGBE_ERR_PHY;
822 goto out;
823 } 816 }
824 817
825 /* Get init offsets */ 818 /* Get init offsets */
826 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 819 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
827 &data_offset); 820 &data_offset);
828 if (ret_val != 0) 821 if (ret_val)
829 goto out; 822 return ret_val;
830 823
831 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); 824 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
832 data_offset++; 825 data_offset++;
@@ -876,18 +869,15 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
876 hw_dbg(hw, "SOL\n"); 869 hw_dbg(hw, "SOL\n");
877 } else { 870 } else {
878 hw_dbg(hw, "Bad control value\n"); 871 hw_dbg(hw, "Bad control value\n");
879 ret_val = IXGBE_ERR_PHY; 872 return IXGBE_ERR_PHY;
880 goto out;
881 } 873 }
882 break; 874 break;
883 default: 875 default:
884 hw_dbg(hw, "Bad control type\n"); 876 hw_dbg(hw, "Bad control type\n");
885 ret_val = IXGBE_ERR_PHY; 877 return IXGBE_ERR_PHY;
886 goto out;
887 } 878 }
888 } 879 }
889 880
890out:
891 return ret_val; 881 return ret_val;
892 882
893err_eeprom: 883err_eeprom:
@@ -903,34 +893,29 @@ err_eeprom:
903 **/ 893 **/
904s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) 894s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
905{ 895{
906 s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
907
908 switch (hw->mac.ops.get_media_type(hw)) { 896 switch (hw->mac.ops.get_media_type(hw)) {
909 case ixgbe_media_type_fiber: 897 case ixgbe_media_type_fiber:
910 status = ixgbe_identify_sfp_module_generic(hw); 898 return ixgbe_identify_sfp_module_generic(hw);
911 break;
912 case ixgbe_media_type_fiber_qsfp: 899 case ixgbe_media_type_fiber_qsfp:
913 status = ixgbe_identify_qsfp_module_generic(hw); 900 return ixgbe_identify_qsfp_module_generic(hw);
914 break;
915 default: 901 default:
916 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 902 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
917 status = IXGBE_ERR_SFP_NOT_PRESENT; 903 return IXGBE_ERR_SFP_NOT_PRESENT;
918 break;
919 } 904 }
920 905
921 return status; 906 return IXGBE_ERR_SFP_NOT_PRESENT;
922} 907}
923 908
924/** 909/**
925 * ixgbe_identify_sfp_module_generic - Identifies SFP modules 910 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
926 * @hw: pointer to hardware structure 911 * @hw: pointer to hardware structure
927* 912 *
928 * Searches for and identifies the SFP module and assigns appropriate PHY type. 913 * Searches for and identifies the SFP module and assigns appropriate PHY type.
929 **/ 914 **/
930s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 915s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
931{ 916{
932 struct ixgbe_adapter *adapter = hw->back; 917 struct ixgbe_adapter *adapter = hw->back;
933 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 918 s32 status;
934 u32 vendor_oui = 0; 919 u32 vendor_oui = 0;
935 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; 920 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
936 u8 identifier = 0; 921 u8 identifier = 0;
@@ -943,15 +928,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
943 928
944 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { 929 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
945 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 930 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
946 status = IXGBE_ERR_SFP_NOT_PRESENT; 931 return IXGBE_ERR_SFP_NOT_PRESENT;
947 goto out;
948 } 932 }
949 933
950 status = hw->phy.ops.read_i2c_eeprom(hw, 934 status = hw->phy.ops.read_i2c_eeprom(hw,
951 IXGBE_SFF_IDENTIFIER, 935 IXGBE_SFF_IDENTIFIER,
952 &identifier); 936 &identifier);
953 937
954 if (status != 0) 938 if (status)
955 goto err_read_i2c_eeprom; 939 goto err_read_i2c_eeprom;
956 940
957 /* LAN ID is needed for sfp_type determination */ 941 /* LAN ID is needed for sfp_type determination */
@@ -959,239 +943,224 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
959 943
960 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { 944 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
961 hw->phy.type = ixgbe_phy_sfp_unsupported; 945 hw->phy.type = ixgbe_phy_sfp_unsupported;
962 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 946 return IXGBE_ERR_SFP_NOT_SUPPORTED;
963 } else { 947 }
964 status = hw->phy.ops.read_i2c_eeprom(hw, 948 status = hw->phy.ops.read_i2c_eeprom(hw,
965 IXGBE_SFF_1GBE_COMP_CODES, 949 IXGBE_SFF_1GBE_COMP_CODES,
966 &comp_codes_1g); 950 &comp_codes_1g);
967 951
968 if (status != 0) 952 if (status)
969 goto err_read_i2c_eeprom; 953 goto err_read_i2c_eeprom;
970 954
971 status = hw->phy.ops.read_i2c_eeprom(hw, 955 status = hw->phy.ops.read_i2c_eeprom(hw,
972 IXGBE_SFF_10GBE_COMP_CODES, 956 IXGBE_SFF_10GBE_COMP_CODES,
973 &comp_codes_10g); 957 &comp_codes_10g);
974 958
975 if (status != 0) 959 if (status)
976 goto err_read_i2c_eeprom; 960 goto err_read_i2c_eeprom;
977 status = hw->phy.ops.read_i2c_eeprom(hw, 961 status = hw->phy.ops.read_i2c_eeprom(hw,
978 IXGBE_SFF_CABLE_TECHNOLOGY, 962 IXGBE_SFF_CABLE_TECHNOLOGY,
979 &cable_tech); 963 &cable_tech);
980 964
981 if (status != 0) 965 if (status)
982 goto err_read_i2c_eeprom; 966 goto err_read_i2c_eeprom;
983 967
984 /* ID Module 968 /* ID Module
985 * ========= 969 * =========
986 * 0 SFP_DA_CU 970 * 0 SFP_DA_CU
987 * 1 SFP_SR 971 * 1 SFP_SR
988 * 2 SFP_LR 972 * 2 SFP_LR
989 * 3 SFP_DA_CORE0 - 82599-specific 973 * 3 SFP_DA_CORE0 - 82599-specific
990 * 4 SFP_DA_CORE1 - 82599-specific 974 * 4 SFP_DA_CORE1 - 82599-specific
991 * 5 SFP_SR/LR_CORE0 - 82599-specific 975 * 5 SFP_SR/LR_CORE0 - 82599-specific
992 * 6 SFP_SR/LR_CORE1 - 82599-specific 976 * 6 SFP_SR/LR_CORE1 - 82599-specific
993 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific 977 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
994 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 978 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
995 * 9 SFP_1g_cu_CORE0 - 82599-specific 979 * 9 SFP_1g_cu_CORE0 - 82599-specific
996 * 10 SFP_1g_cu_CORE1 - 82599-specific 980 * 10 SFP_1g_cu_CORE1 - 82599-specific
997 * 11 SFP_1g_sx_CORE0 - 82599-specific 981 * 11 SFP_1g_sx_CORE0 - 82599-specific
998 * 12 SFP_1g_sx_CORE1 - 82599-specific 982 * 12 SFP_1g_sx_CORE1 - 82599-specific
999 */ 983 */
1000 if (hw->mac.type == ixgbe_mac_82598EB) { 984 if (hw->mac.type == ixgbe_mac_82598EB) {
1001 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 985 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1002 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 986 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1003 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 987 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1004 hw->phy.sfp_type = ixgbe_sfp_type_sr; 988 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1005 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 989 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1006 hw->phy.sfp_type = ixgbe_sfp_type_lr; 990 hw->phy.sfp_type = ixgbe_sfp_type_lr;
991 else
992 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
993 } else if (hw->mac.type == ixgbe_mac_82599EB) {
994 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
995 if (hw->bus.lan_id == 0)
996 hw->phy.sfp_type =
997 ixgbe_sfp_type_da_cu_core0;
1007 else 998 else
1008 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 999 hw->phy.sfp_type =
1009 } else if (hw->mac.type == ixgbe_mac_82599EB) { 1000 ixgbe_sfp_type_da_cu_core1;
1010 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { 1001 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1011 if (hw->bus.lan_id == 0) 1002 hw->phy.ops.read_i2c_eeprom(
1012 hw->phy.sfp_type = 1003 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1013 ixgbe_sfp_type_da_cu_core0; 1004 &cable_spec);
1014 else 1005 if (cable_spec &
1015 hw->phy.sfp_type = 1006 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1016 ixgbe_sfp_type_da_cu_core1;
1017 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1018 hw->phy.ops.read_i2c_eeprom(
1019 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1020 &cable_spec);
1021 if (cable_spec &
1022 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1023 if (hw->bus.lan_id == 0)
1024 hw->phy.sfp_type =
1025 ixgbe_sfp_type_da_act_lmt_core0;
1026 else
1027 hw->phy.sfp_type =
1028 ixgbe_sfp_type_da_act_lmt_core1;
1029 } else {
1030 hw->phy.sfp_type =
1031 ixgbe_sfp_type_unknown;
1032 }
1033 } else if (comp_codes_10g &
1034 (IXGBE_SFF_10GBASESR_CAPABLE |
1035 IXGBE_SFF_10GBASELR_CAPABLE)) {
1036 if (hw->bus.lan_id == 0)
1037 hw->phy.sfp_type =
1038 ixgbe_sfp_type_srlr_core0;
1039 else
1040 hw->phy.sfp_type =
1041 ixgbe_sfp_type_srlr_core1;
1042 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1043 if (hw->bus.lan_id == 0)
1044 hw->phy.sfp_type =
1045 ixgbe_sfp_type_1g_cu_core0;
1046 else
1047 hw->phy.sfp_type =
1048 ixgbe_sfp_type_1g_cu_core1;
1049 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1050 if (hw->bus.lan_id == 0)
1051 hw->phy.sfp_type =
1052 ixgbe_sfp_type_1g_sx_core0;
1053 else
1054 hw->phy.sfp_type =
1055 ixgbe_sfp_type_1g_sx_core1;
1056 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1057 if (hw->bus.lan_id == 0) 1007 if (hw->bus.lan_id == 0)
1058 hw->phy.sfp_type = 1008 hw->phy.sfp_type =
1059 ixgbe_sfp_type_1g_lx_core0; 1009 ixgbe_sfp_type_da_act_lmt_core0;
1060 else 1010 else
1061 hw->phy.sfp_type = 1011 hw->phy.sfp_type =
1062 ixgbe_sfp_type_1g_lx_core1; 1012 ixgbe_sfp_type_da_act_lmt_core1;
1063 } else { 1013 } else {
1064 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 1014 hw->phy.sfp_type =
1015 ixgbe_sfp_type_unknown;
1065 } 1016 }
1017 } else if (comp_codes_10g &
1018 (IXGBE_SFF_10GBASESR_CAPABLE |
1019 IXGBE_SFF_10GBASELR_CAPABLE)) {
1020 if (hw->bus.lan_id == 0)
1021 hw->phy.sfp_type =
1022 ixgbe_sfp_type_srlr_core0;
1023 else
1024 hw->phy.sfp_type =
1025 ixgbe_sfp_type_srlr_core1;
1026 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1027 if (hw->bus.lan_id == 0)
1028 hw->phy.sfp_type =
1029 ixgbe_sfp_type_1g_cu_core0;
1030 else
1031 hw->phy.sfp_type =
1032 ixgbe_sfp_type_1g_cu_core1;
1033 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1034 if (hw->bus.lan_id == 0)
1035 hw->phy.sfp_type =
1036 ixgbe_sfp_type_1g_sx_core0;
1037 else
1038 hw->phy.sfp_type =
1039 ixgbe_sfp_type_1g_sx_core1;
1040 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1041 if (hw->bus.lan_id == 0)
1042 hw->phy.sfp_type =
1043 ixgbe_sfp_type_1g_lx_core0;
1044 else
1045 hw->phy.sfp_type =
1046 ixgbe_sfp_type_1g_lx_core1;
1047 } else {
1048 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1066 } 1049 }
1050 }
1067 1051
1068 if (hw->phy.sfp_type != stored_sfp_type) 1052 if (hw->phy.sfp_type != stored_sfp_type)
1069 hw->phy.sfp_setup_needed = true; 1053 hw->phy.sfp_setup_needed = true;
1070
1071 /* Determine if the SFP+ PHY is dual speed or not. */
1072 hw->phy.multispeed_fiber = false;
1073 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1074 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1075 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1076 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1077 hw->phy.multispeed_fiber = true;
1078
1079 /* Determine PHY vendor */
1080 if (hw->phy.type != ixgbe_phy_nl) {
1081 hw->phy.id = identifier;
1082 status = hw->phy.ops.read_i2c_eeprom(hw,
1083 IXGBE_SFF_VENDOR_OUI_BYTE0,
1084 &oui_bytes[0]);
1085
1086 if (status != 0)
1087 goto err_read_i2c_eeprom;
1088
1089 status = hw->phy.ops.read_i2c_eeprom(hw,
1090 IXGBE_SFF_VENDOR_OUI_BYTE1,
1091 &oui_bytes[1]);
1092
1093 if (status != 0)
1094 goto err_read_i2c_eeprom;
1095
1096 status = hw->phy.ops.read_i2c_eeprom(hw,
1097 IXGBE_SFF_VENDOR_OUI_BYTE2,
1098 &oui_bytes[2]);
1099
1100 if (status != 0)
1101 goto err_read_i2c_eeprom;
1102
1103 vendor_oui =
1104 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1105 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1106 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1107
1108 switch (vendor_oui) {
1109 case IXGBE_SFF_VENDOR_OUI_TYCO:
1110 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1111 hw->phy.type =
1112 ixgbe_phy_sfp_passive_tyco;
1113 break;
1114 case IXGBE_SFF_VENDOR_OUI_FTL:
1115 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1116 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1117 else
1118 hw->phy.type = ixgbe_phy_sfp_ftl;
1119 break;
1120 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1121 hw->phy.type = ixgbe_phy_sfp_avago;
1122 break;
1123 case IXGBE_SFF_VENDOR_OUI_INTEL:
1124 hw->phy.type = ixgbe_phy_sfp_intel;
1125 break;
1126 default:
1127 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1128 hw->phy.type =
1129 ixgbe_phy_sfp_passive_unknown;
1130 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1131 hw->phy.type =
1132 ixgbe_phy_sfp_active_unknown;
1133 else
1134 hw->phy.type = ixgbe_phy_sfp_unknown;
1135 break;
1136 }
1137 }
1138 1054
1139 /* Allow any DA cable vendor */ 1055 /* Determine if the SFP+ PHY is dual speed or not. */
1140 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | 1056 hw->phy.multispeed_fiber = false;
1141 IXGBE_SFF_DA_ACTIVE_CABLE)) { 1057 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1142 status = 0; 1058 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1143 goto out; 1059 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1144 } 1060 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1061 hw->phy.multispeed_fiber = true;
1145 1062
1146 /* Verify supported 1G SFP modules */ 1063 /* Determine PHY vendor */
1147 if (comp_codes_10g == 0 && 1064 if (hw->phy.type != ixgbe_phy_nl) {
1148 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 1065 hw->phy.id = identifier;
1149 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 1066 status = hw->phy.ops.read_i2c_eeprom(hw,
1150 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || 1067 IXGBE_SFF_VENDOR_OUI_BYTE0,
1151 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || 1068 &oui_bytes[0]);
1152 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1153 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1154 hw->phy.type = ixgbe_phy_sfp_unsupported;
1155 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1156 goto out;
1157 }
1158 1069
1159 /* Anything else 82598-based is supported */ 1070 if (status != 0)
1160 if (hw->mac.type == ixgbe_mac_82598EB) { 1071 goto err_read_i2c_eeprom;
1161 status = 0;
1162 goto out;
1163 }
1164 1072
1165 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 1073 status = hw->phy.ops.read_i2c_eeprom(hw,
1166 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && 1074 IXGBE_SFF_VENDOR_OUI_BYTE1,
1167 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 1075 &oui_bytes[1]);
1168 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 1076
1169 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || 1077 if (status != 0)
1170 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || 1078 goto err_read_i2c_eeprom;
1171 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 1079
1172 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { 1080 status = hw->phy.ops.read_i2c_eeprom(hw,
1173 /* Make sure we're a supported PHY type */ 1081 IXGBE_SFF_VENDOR_OUI_BYTE2,
1174 if (hw->phy.type == ixgbe_phy_sfp_intel) { 1082 &oui_bytes[2]);
1175 status = 0; 1083
1176 } else { 1084 if (status != 0)
1177 if (hw->allow_unsupported_sfp) { 1085 goto err_read_i2c_eeprom;
1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); 1086
1179 status = 0; 1087 vendor_oui =
1180 } else { 1088 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1181 hw_dbg(hw, 1089 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1182 "SFP+ module not supported\n"); 1090 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1183 hw->phy.type = 1091
1184 ixgbe_phy_sfp_unsupported; 1092 switch (vendor_oui) {
1185 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1093 case IXGBE_SFF_VENDOR_OUI_TYCO:
1186 } 1094 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1187 } 1095 hw->phy.type =
1188 } else { 1096 ixgbe_phy_sfp_passive_tyco;
1189 status = 0; 1097 break;
1098 case IXGBE_SFF_VENDOR_OUI_FTL:
1099 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1100 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1101 else
1102 hw->phy.type = ixgbe_phy_sfp_ftl;
1103 break;
1104 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1105 hw->phy.type = ixgbe_phy_sfp_avago;
1106 break;
1107 case IXGBE_SFF_VENDOR_OUI_INTEL:
1108 hw->phy.type = ixgbe_phy_sfp_intel;
1109 break;
1110 default:
1111 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1112 hw->phy.type =
1113 ixgbe_phy_sfp_passive_unknown;
1114 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1115 hw->phy.type =
1116 ixgbe_phy_sfp_active_unknown;
1117 else
1118 hw->phy.type = ixgbe_phy_sfp_unknown;
1119 break;
1190 } 1120 }
1191 } 1121 }
1192 1122
1193out: 1123 /* Allow any DA cable vendor */
1194 return status; 1124 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1125 IXGBE_SFF_DA_ACTIVE_CABLE))
1126 return 0;
1127
1128 /* Verify supported 1G SFP modules */
1129 if (comp_codes_10g == 0 &&
1130 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1131 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1132 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1133 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1134 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1135 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1136 hw->phy.type = ixgbe_phy_sfp_unsupported;
1137 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1138 }
1139
1140 /* Anything else 82598-based is supported */
1141 if (hw->mac.type == ixgbe_mac_82598EB)
1142 return 0;
1143
1144 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1145 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1146 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1147 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1148 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1149 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1150 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1151 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1152 /* Make sure we're a supported PHY type */
1153 if (hw->phy.type == ixgbe_phy_sfp_intel)
1154 return 0;
1155 if (hw->allow_unsupported_sfp) {
1156 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1157 return 0;
1158 }
1159 hw_dbg(hw, "SFP+ module not supported\n");
1160 hw->phy.type = ixgbe_phy_sfp_unsupported;
1161 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1162 }
1163 return 0;
1195 1164
1196err_read_i2c_eeprom: 1165err_read_i2c_eeprom:
1197 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 1166 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1211,7 +1180,7 @@ err_read_i2c_eeprom:
1211static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) 1180static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1212{ 1181{
1213 struct ixgbe_adapter *adapter = hw->back; 1182 struct ixgbe_adapter *adapter = hw->back;
1214 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1183 s32 status;
1215 u32 vendor_oui = 0; 1184 u32 vendor_oui = 0;
1216 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; 1185 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1217 u8 identifier = 0; 1186 u8 identifier = 0;
@@ -1226,8 +1195,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1226 1195
1227 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { 1196 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1228 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 1197 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1229 status = IXGBE_ERR_SFP_NOT_PRESENT; 1198 return IXGBE_ERR_SFP_NOT_PRESENT;
1230 goto out;
1231 } 1199 }
1232 1200
1233 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, 1201 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
@@ -1238,8 +1206,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1238 1206
1239 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { 1207 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1240 hw->phy.type = ixgbe_phy_sfp_unsupported; 1208 hw->phy.type = ixgbe_phy_sfp_unsupported;
1241 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1209 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1242 goto out;
1243 } 1210 }
1244 1211
1245 hw->phy.id = identifier; 1212 hw->phy.id = identifier;
@@ -1310,8 +1277,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1310 } else { 1277 } else {
1311 /* unsupported module type */ 1278 /* unsupported module type */
1312 hw->phy.type = ixgbe_phy_sfp_unsupported; 1279 hw->phy.type = ixgbe_phy_sfp_unsupported;
1313 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1280 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1314 goto out;
1315 } 1281 }
1316 } 1282 }
1317 1283
@@ -1363,27 +1329,19 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1363 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 1329 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1364 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { 1330 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1365 /* Make sure we're a supported PHY type */ 1331 /* Make sure we're a supported PHY type */
1366 if (hw->phy.type == ixgbe_phy_qsfp_intel) { 1332 if (hw->phy.type == ixgbe_phy_qsfp_intel)
1367 status = 0; 1333 return 0;
1368 } else { 1334 if (hw->allow_unsupported_sfp) {
1369 if (hw->allow_unsupported_sfp == true) { 1335 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1370 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); 1336 return 0;
1371 status = 0;
1372 } else {
1373 hw_dbg(hw,
1374 "QSFP module not supported\n");
1375 hw->phy.type =
1376 ixgbe_phy_sfp_unsupported;
1377 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1378 }
1379 } 1337 }
1380 } else { 1338 hw_dbg(hw, "QSFP module not supported\n");
1381 status = 0; 1339 hw->phy.type = ixgbe_phy_sfp_unsupported;
1340 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1382 } 1341 }
1342 return 0;
1383 } 1343 }
1384 1344 return 0;
1385out:
1386 return status;
1387 1345
1388err_read_i2c_eeprom: 1346err_read_i2c_eeprom:
1389 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 1347 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1544,7 +1502,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1544s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1502s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1545 u8 dev_addr, u8 *data) 1503 u8 dev_addr, u8 *data)
1546{ 1504{
1547 s32 status = 0; 1505 s32 status;
1548 u32 max_retry = 10; 1506 u32 max_retry = 10;
1549 u32 retry = 0; 1507 u32 retry = 0;
1550 u16 swfw_mask = 0; 1508 u16 swfw_mask = 0;
@@ -1557,10 +1515,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1557 swfw_mask = IXGBE_GSSR_PHY0_SM; 1515 swfw_mask = IXGBE_GSSR_PHY0_SM;
1558 1516
1559 do { 1517 do {
1560 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { 1518 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1561 status = IXGBE_ERR_SWFW_SYNC; 1519 return IXGBE_ERR_SWFW_SYNC;
1562 goto read_byte_out;
1563 }
1564 1520
1565 ixgbe_i2c_start(hw); 1521 ixgbe_i2c_start(hw);
1566 1522
@@ -1617,7 +1573,6 @@ fail:
1617 1573
1618 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 1574 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1619 1575
1620read_byte_out:
1621 return status; 1576 return status;
1622} 1577}
1623 1578
@@ -1633,7 +1588,7 @@ read_byte_out:
1633s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 1588s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1634 u8 dev_addr, u8 data) 1589 u8 dev_addr, u8 data)
1635{ 1590{
1636 s32 status = 0; 1591 s32 status;
1637 u32 max_retry = 1; 1592 u32 max_retry = 1;
1638 u32 retry = 0; 1593 u32 retry = 0;
1639 u16 swfw_mask = 0; 1594 u16 swfw_mask = 0;
@@ -1643,10 +1598,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1643 else 1598 else
1644 swfw_mask = IXGBE_GSSR_PHY0_SM; 1599 swfw_mask = IXGBE_GSSR_PHY0_SM;
1645 1600
1646 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { 1601 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1647 status = IXGBE_ERR_SWFW_SYNC; 1602 return IXGBE_ERR_SWFW_SYNC;
1648 goto write_byte_out;
1649 }
1650 1603
1651 do { 1604 do {
1652 ixgbe_i2c_start(hw); 1605 ixgbe_i2c_start(hw);
@@ -1689,7 +1642,6 @@ fail:
1689 1642
1690 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 1643 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1691 1644
1692write_byte_out:
1693 return status; 1645 return status;
1694} 1646}
1695 1647
@@ -1774,7 +1726,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
1774 **/ 1726 **/
1775static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) 1727static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
1776{ 1728{
1777 s32 status = 0; 1729 s32 status;
1778 s32 i; 1730 s32 i;
1779 u32 i2cctl; 1731 u32 i2cctl;
1780 bool bit = false; 1732 bool bit = false;
@@ -1893,11 +1845,11 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
1893 */ 1845 */
1894 udelay(IXGBE_I2C_T_LOW); 1846 udelay(IXGBE_I2C_T_LOW);
1895 } else { 1847 } else {
1896 status = IXGBE_ERR_I2C;
1897 hw_dbg(hw, "I2C data was not set to %X\n", data); 1848 hw_dbg(hw, "I2C data was not set to %X\n", data);
1849 return IXGBE_ERR_I2C;
1898 } 1850 }
1899 1851
1900 return status; 1852 return 0;
1901} 1853}
1902/** 1854/**
1903 * ixgbe_raise_i2c_clk - Raises the I2C SCL clock 1855 * ixgbe_raise_i2c_clk - Raises the I2C SCL clock
@@ -1954,8 +1906,6 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1954 **/ 1906 **/
1955static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) 1907static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
1956{ 1908{
1957 s32 status = 0;
1958
1959 if (data) 1909 if (data)
1960 *i2cctl |= IXGBE_I2C_DATA_OUT; 1910 *i2cctl |= IXGBE_I2C_DATA_OUT;
1961 else 1911 else
@@ -1970,11 +1920,11 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
1970 /* Verify data was set correctly */ 1920 /* Verify data was set correctly */
1971 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1921 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1972 if (data != ixgbe_get_i2c_data(i2cctl)) { 1922 if (data != ixgbe_get_i2c_data(i2cctl)) {
1973 status = IXGBE_ERR_I2C;
1974 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); 1923 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
1924 return IXGBE_ERR_I2C;
1975 } 1925 }
1976 1926
1977 return status; 1927 return 0;
1978} 1928}
1979 1929
1980/** 1930/**
@@ -1986,14 +1936,9 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
1986 **/ 1936 **/
1987static bool ixgbe_get_i2c_data(u32 *i2cctl) 1937static bool ixgbe_get_i2c_data(u32 *i2cctl)
1988{ 1938{
1989 bool data;
1990
1991 if (*i2cctl & IXGBE_I2C_DATA_IN) 1939 if (*i2cctl & IXGBE_I2C_DATA_IN)
1992 data = true; 1940 return true;
1993 else 1941 return false;
1994 data = false;
1995
1996 return data;
1997} 1942}
1998 1943
1999/** 1944/**
@@ -2038,20 +1983,17 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2038 **/ 1983 **/
2039s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) 1984s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2040{ 1985{
2041 s32 status = 0;
2042 u16 phy_data = 0; 1986 u16 phy_data = 0;
2043 1987
2044 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) 1988 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2045 goto out; 1989 return 0;
2046 1990
2047 /* Check that the LASI temp alarm status was triggered */ 1991 /* Check that the LASI temp alarm status was triggered */
2048 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, 1992 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2049 MDIO_MMD_PMAPMD, &phy_data); 1993 MDIO_MMD_PMAPMD, &phy_data);
2050 1994
2051 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) 1995 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2052 goto out; 1996 return 0;
2053 1997
2054 status = IXGBE_ERR_OVERTEMP; 1998 return IXGBE_ERR_OVERTEMP;
2055out:
2056 return status;
2057} 1999}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 16b3a1cd9db6..c14d4d89672f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -245,10 +245,10 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
245 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 245 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
246 err = ixgbe_disable_sriov(adapter); 246 err = ixgbe_disable_sriov(adapter);
247 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 247 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
248 goto out; 248 return num_vfs;
249 249
250 if (err) 250 if (err)
251 goto err_out; 251 return err;
252 252
253 /* While the SR-IOV capability structure reports total VFs to be 253 /* While the SR-IOV capability structure reports total VFs to be
254 * 64 we limit the actual number that can be allocated to 63 so 254 * 64 we limit the actual number that can be allocated to 63 so
@@ -256,16 +256,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
256 * PF. The PCI bus driver already checks for other values out of 256 * PF. The PCI bus driver already checks for other values out of
257 * range. 257 * range.
258 */ 258 */
259 if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { 259 if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT)
260 err = -EPERM; 260 return -EPERM;
261 goto err_out;
262 }
263 261
264 adapter->num_vfs = num_vfs; 262 adapter->num_vfs = num_vfs;
265 263
266 err = __ixgbe_enable_sriov(adapter); 264 err = __ixgbe_enable_sriov(adapter);
267 if (err) 265 if (err)
268 goto err_out; 266 return err;
269 267
270 for (i = 0; i < adapter->num_vfs; i++) 268 for (i = 0; i < adapter->num_vfs; i++)
271 ixgbe_vf_configuration(dev, (i | 0x10000000)); 269 ixgbe_vf_configuration(dev, (i | 0x10000000));
@@ -273,17 +271,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
273 err = pci_enable_sriov(dev, num_vfs); 271 err = pci_enable_sriov(dev, num_vfs);
274 if (err) { 272 if (err) {
275 e_dev_warn("Failed to enable PCI sriov: %d\n", err); 273 e_dev_warn("Failed to enable PCI sriov: %d\n", err);
276 goto err_out; 274 return err;
277 } 275 }
278 ixgbe_sriov_reinit(adapter); 276 ixgbe_sriov_reinit(adapter);
279 277
280out:
281 return num_vfs; 278 return num_vfs;
282 279#else
283err_out:
284 return err;
285#endif
286 return 0; 280 return 0;
281#endif
287} 282}
288 283
289static int ixgbe_pci_sriov_disable(struct pci_dev *dev) 284static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
@@ -807,7 +802,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
807 if (!add && adapter->netdev->flags & IFF_PROMISC) { 802 if (!add && adapter->netdev->flags & IFF_PROMISC) {
808 reg_ndx = ixgbe_find_vlvf_entry(hw, vid); 803 reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
809 if (reg_ndx < 0) 804 if (reg_ndx < 0)
810 goto out; 805 return err;
811 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); 806 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
812 /* See if any other pools are set for this VLAN filter 807 /* See if any other pools are set for this VLAN filter
813 * entry other than the PF. 808 * entry other than the PF.
@@ -833,8 +828,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
833 ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); 828 ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
834 } 829 }
835 830
836out:
837
838 return err; 831 return err;
839} 832}
840 833
@@ -951,7 +944,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
951 944
952 /* this is a message we already processed, do nothing */ 945 /* this is a message we already processed, do nothing */
953 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) 946 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
954 return retval; 947 return 0;
955 948
956 /* flush the ack before we write any messages back */ 949 /* flush the ack before we write any messages back */
957 IXGBE_WRITE_FLUSH(hw); 950 IXGBE_WRITE_FLUSH(hw);
@@ -966,7 +959,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
966 if (!adapter->vfinfo[vf].clear_to_send) { 959 if (!adapter->vfinfo[vf].clear_to_send) {
967 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 960 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
968 ixgbe_write_mbx(hw, msgbuf, 1, vf); 961 ixgbe_write_mbx(hw, msgbuf, 1, vf);
969 return retval; 962 return 0;
970 } 963 }
971 964
972 switch ((msgbuf[0] & 0xFFFF)) { 965 switch ((msgbuf[0] & 0xFFFF)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 7920ab9a18cb..e88305d5d18d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -99,8 +99,8 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
99 99
100 /* Call adapter stop to disable tx/rx and clear interrupts */ 100 /* Call adapter stop to disable tx/rx and clear interrupts */
101 status = hw->mac.ops.stop_adapter(hw); 101 status = hw->mac.ops.stop_adapter(hw);
102 if (status != 0) 102 if (status)
103 goto reset_hw_out; 103 return status;
104 104
105 /* flush pending Tx transactions */ 105 /* flush pending Tx transactions */
106 ixgbe_clear_tx_pending(hw); 106 ixgbe_clear_tx_pending(hw);
@@ -168,7 +168,6 @@ mac_reset_top:
168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 168 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
169 &hw->mac.wwpn_prefix); 169 &hw->mac.wwpn_prefix);
170 170
171reset_hw_out:
172 return status; 171 return status;
173} 172}
174 173
@@ -182,15 +181,13 @@ reset_hw_out:
182 **/ 181 **/
183static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) 182static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
184{ 183{
185 s32 ret_val = 0; 184 s32 ret_val;
186 185
187 ret_val = ixgbe_start_hw_generic(hw); 186 ret_val = ixgbe_start_hw_generic(hw);
188 if (ret_val != 0) 187 if (ret_val)
189 goto out; 188 return ret_val;
190 189
191 ret_val = ixgbe_start_hw_gen2(hw); 190 return ixgbe_start_hw_gen2(hw);
192out:
193 return ret_val;
194} 191}
195 192
196/** 193/**
@@ -483,12 +480,12 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
483static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) 480static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
484{ 481{
485 u32 flup; 482 u32 flup;
486 s32 status = IXGBE_ERR_EEPROM; 483 s32 status;
487 484
488 status = ixgbe_poll_flash_update_done_X540(hw); 485 status = ixgbe_poll_flash_update_done_X540(hw);
489 if (status == IXGBE_ERR_EEPROM) { 486 if (status == IXGBE_ERR_EEPROM) {
490 hw_dbg(hw, "Flash update time out\n"); 487 hw_dbg(hw, "Flash update time out\n");
491 goto out; 488 return status;
492 } 489 }
493 490
494 flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; 491 flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
@@ -514,7 +511,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
514 else 511 else
515 hw_dbg(hw, "Flash update time out\n"); 512 hw_dbg(hw, "Flash update time out\n");
516 } 513 }
517out: 514
518 return status; 515 return status;
519} 516}
520 517
@@ -529,17 +526,14 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
529{ 526{
530 u32 i; 527 u32 i;
531 u32 reg; 528 u32 reg;
532 s32 status = IXGBE_ERR_EEPROM;
533 529
534 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { 530 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
535 reg = IXGBE_READ_REG(hw, IXGBE_EEC); 531 reg = IXGBE_READ_REG(hw, IXGBE_EEC);
536 if (reg & IXGBE_EEC_FLUDONE) { 532 if (reg & IXGBE_EEC_FLUDONE)
537 status = 0; 533 return 0;
538 break;
539 }
540 udelay(5); 534 udelay(5);
541 } 535 }
542 return status; 536 return IXGBE_ERR_EEPROM;
543} 537}
544 538
545/** 539/**