aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
diff options
context:
space:
mode:
authorMark Rustad <mark.d.rustad@intel.com>2014-07-22 02:51:08 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2014-07-25 22:58:36 -0400
commite90dd264566405e2f1bbb8595a4b5612281f6315 (patch)
tree0f9056b115f64524d689b6afe8e43728e26937b6 /drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
parent9f1fb8acd30c9ace0145e66942481bdb90beca15 (diff)
ixgbe: Make return values more direct
Make return values more direct, eliminating some gotos and otherwise unneeded conditionals. This also eliminates some local variables. Also a few minor cleanups in affected code so checkpatch won't complain. Signed-off-by: Mark Rustad <mark.d.rustad@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c106
1 files changed, 45 insertions, 61 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 50479575e131..cc8f0128286c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -43,16 +43,15 @@
43s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) 43s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
44{ 44{
45 struct ixgbe_mbx_info *mbx = &hw->mbx; 45 struct ixgbe_mbx_info *mbx = &hw->mbx;
46 s32 ret_val = IXGBE_ERR_MBX;
47 46
48 /* limit read to size of mailbox */ 47 /* limit read to size of mailbox */
49 if (size > mbx->size) 48 if (size > mbx->size)
50 size = mbx->size; 49 size = mbx->size;
51 50
52 if (mbx->ops.read) 51 if (!mbx->ops.read)
53 ret_val = mbx->ops.read(hw, msg, size, mbx_id); 52 return IXGBE_ERR_MBX;
54 53
55 return ret_val; 54 return mbx->ops.read(hw, msg, size, mbx_id);
56} 55}
57 56
58/** 57/**
@@ -87,12 +86,11 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
87s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) 86s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
88{ 87{
89 struct ixgbe_mbx_info *mbx = &hw->mbx; 88 struct ixgbe_mbx_info *mbx = &hw->mbx;
90 s32 ret_val = IXGBE_ERR_MBX;
91 89
92 if (mbx->ops.check_for_msg) 90 if (!mbx->ops.check_for_msg)
93 ret_val = mbx->ops.check_for_msg(hw, mbx_id); 91 return IXGBE_ERR_MBX;
94 92
95 return ret_val; 93 return mbx->ops.check_for_msg(hw, mbx_id);
96} 94}
97 95
98/** 96/**
@@ -105,12 +103,11 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
105s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) 103s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
106{ 104{
107 struct ixgbe_mbx_info *mbx = &hw->mbx; 105 struct ixgbe_mbx_info *mbx = &hw->mbx;
108 s32 ret_val = IXGBE_ERR_MBX;
109 106
110 if (mbx->ops.check_for_ack) 107 if (!mbx->ops.check_for_ack)
111 ret_val = mbx->ops.check_for_ack(hw, mbx_id); 108 return IXGBE_ERR_MBX;
112 109
113 return ret_val; 110 return mbx->ops.check_for_ack(hw, mbx_id);
114} 111}
115 112
116/** 113/**
@@ -123,12 +120,11 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
123s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) 120s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
124{ 121{
125 struct ixgbe_mbx_info *mbx = &hw->mbx; 122 struct ixgbe_mbx_info *mbx = &hw->mbx;
126 s32 ret_val = IXGBE_ERR_MBX;
127 123
128 if (mbx->ops.check_for_rst) 124 if (!mbx->ops.check_for_rst)
129 ret_val = mbx->ops.check_for_rst(hw, mbx_id); 125 return IXGBE_ERR_MBX;
130 126
131 return ret_val; 127 return mbx->ops.check_for_rst(hw, mbx_id);
132} 128}
133 129
134/** 130/**
@@ -144,17 +140,16 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
144 int countdown = mbx->timeout; 140 int countdown = mbx->timeout;
145 141
146 if (!countdown || !mbx->ops.check_for_msg) 142 if (!countdown || !mbx->ops.check_for_msg)
147 goto out; 143 return IXGBE_ERR_MBX;
148 144
149 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { 145 while (mbx->ops.check_for_msg(hw, mbx_id)) {
150 countdown--; 146 countdown--;
151 if (!countdown) 147 if (!countdown)
152 break; 148 return IXGBE_ERR_MBX;
153 udelay(mbx->usec_delay); 149 udelay(mbx->usec_delay);
154 } 150 }
155 151
156out: 152 return 0;
157 return countdown ? 0 : IXGBE_ERR_MBX;
158} 153}
159 154
160/** 155/**
@@ -170,17 +165,16 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
170 int countdown = mbx->timeout; 165 int countdown = mbx->timeout;
171 166
172 if (!countdown || !mbx->ops.check_for_ack) 167 if (!countdown || !mbx->ops.check_for_ack)
173 goto out; 168 return IXGBE_ERR_MBX;
174 169
175 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { 170 while (mbx->ops.check_for_ack(hw, mbx_id)) {
176 countdown--; 171 countdown--;
177 if (!countdown) 172 if (!countdown)
178 break; 173 return IXGBE_ERR_MBX;
179 udelay(mbx->usec_delay); 174 udelay(mbx->usec_delay);
180 } 175 }
181 176
182out: 177 return 0;
183 return countdown ? 0 : IXGBE_ERR_MBX;
184} 178}
185 179
186/** 180/**
@@ -197,18 +191,17 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
197 u16 mbx_id) 191 u16 mbx_id)
198{ 192{
199 struct ixgbe_mbx_info *mbx = &hw->mbx; 193 struct ixgbe_mbx_info *mbx = &hw->mbx;
200 s32 ret_val = IXGBE_ERR_MBX; 194 s32 ret_val;
201 195
202 if (!mbx->ops.read) 196 if (!mbx->ops.read)
203 goto out; 197 return IXGBE_ERR_MBX;
204 198
205 ret_val = ixgbe_poll_for_msg(hw, mbx_id); 199 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
200 if (ret_val)
201 return ret_val;
206 202
207 /* if ack received read message, otherwise we timed out */ 203 /* if ack received read message */
208 if (!ret_val) 204 return mbx->ops.read(hw, msg, size, mbx_id);
209 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
210out:
211 return ret_val;
212} 205}
213 206
214/** 207/**
@@ -225,33 +218,31 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
225 u16 mbx_id) 218 u16 mbx_id)
226{ 219{
227 struct ixgbe_mbx_info *mbx = &hw->mbx; 220 struct ixgbe_mbx_info *mbx = &hw->mbx;
228 s32 ret_val = IXGBE_ERR_MBX; 221 s32 ret_val;
229 222
230 /* exit if either we can't write or there isn't a defined timeout */ 223 /* exit if either we can't write or there isn't a defined timeout */
231 if (!mbx->ops.write || !mbx->timeout) 224 if (!mbx->ops.write || !mbx->timeout)
232 goto out; 225 return IXGBE_ERR_MBX;
233 226
234 /* send msg */ 227 /* send msg */
235 ret_val = mbx->ops.write(hw, msg, size, mbx_id); 228 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
229 if (ret_val)
230 return ret_val;
236 231
237 /* if msg sent wait until we receive an ack */ 232 /* if msg sent wait until we receive an ack */
238 if (!ret_val) 233 return ixgbe_poll_for_ack(hw, mbx_id);
239 ret_val = ixgbe_poll_for_ack(hw, mbx_id);
240out:
241 return ret_val;
242} 234}
243 235
244static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) 236static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
245{ 237{
246 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); 238 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
247 s32 ret_val = IXGBE_ERR_MBX;
248 239
249 if (mbvficr & mask) { 240 if (mbvficr & mask) {
250 ret_val = 0;
251 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); 241 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
242 return 0;
252 } 243 }
253 244
254 return ret_val; 245 return IXGBE_ERR_MBX;
255} 246}
256 247
257/** 248/**
@@ -263,17 +254,16 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
263 **/ 254 **/
264static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) 255static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
265{ 256{
266 s32 ret_val = IXGBE_ERR_MBX;
267 s32 index = IXGBE_MBVFICR_INDEX(vf_number); 257 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
268 u32 vf_bit = vf_number % 16; 258 u32 vf_bit = vf_number % 16;
269 259
270 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, 260 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
271 index)) { 261 index)) {
272 ret_val = 0;
273 hw->mbx.stats.reqs++; 262 hw->mbx.stats.reqs++;
263 return 0;
274 } 264 }
275 265
276 return ret_val; 266 return IXGBE_ERR_MBX;
277} 267}
278 268
279/** 269/**
@@ -285,17 +275,16 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
285 **/ 275 **/
286static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) 276static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
287{ 277{
288 s32 ret_val = IXGBE_ERR_MBX;
289 s32 index = IXGBE_MBVFICR_INDEX(vf_number); 278 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
290 u32 vf_bit = vf_number % 16; 279 u32 vf_bit = vf_number % 16;
291 280
292 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, 281 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
293 index)) { 282 index)) {
294 ret_val = 0;
295 hw->mbx.stats.acks++; 283 hw->mbx.stats.acks++;
284 return 0;
296 } 285 }
297 286
298 return ret_val; 287 return IXGBE_ERR_MBX;
299} 288}
300 289
301/** 290/**
@@ -310,7 +299,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
310 u32 reg_offset = (vf_number < 32) ? 0 : 1; 299 u32 reg_offset = (vf_number < 32) ? 0 : 1;
311 u32 vf_shift = vf_number % 32; 300 u32 vf_shift = vf_number % 32;
312 u32 vflre = 0; 301 u32 vflre = 0;
313 s32 ret_val = IXGBE_ERR_MBX;
314 302
315 switch (hw->mac.type) { 303 switch (hw->mac.type) {
316 case ixgbe_mac_82599EB: 304 case ixgbe_mac_82599EB:
@@ -324,12 +312,12 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
324 } 312 }
325 313
326 if (vflre & (1 << vf_shift)) { 314 if (vflre & (1 << vf_shift)) {
327 ret_val = 0;
328 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); 315 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
329 hw->mbx.stats.rsts++; 316 hw->mbx.stats.rsts++;
317 return 0;
330 } 318 }
331 319
332 return ret_val; 320 return IXGBE_ERR_MBX;
333} 321}
334 322
335/** 323/**
@@ -341,7 +329,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
341 **/ 329 **/
342static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) 330static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
343{ 331{
344 s32 ret_val = IXGBE_ERR_MBX;
345 u32 p2v_mailbox; 332 u32 p2v_mailbox;
346 333
347 /* Take ownership of the buffer */ 334 /* Take ownership of the buffer */
@@ -350,9 +337,9 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
350 /* reserve mailbox for vf use */ 337 /* reserve mailbox for vf use */
351 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); 338 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
352 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) 339 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
353 ret_val = 0; 340 return 0;
354 341
355 return ret_val; 342 return IXGBE_ERR_MBX;
356} 343}
357 344
358/** 345/**
@@ -373,7 +360,7 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
373 /* lock the mailbox to prevent pf/vf race condition */ 360 /* lock the mailbox to prevent pf/vf race condition */
374 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); 361 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
375 if (ret_val) 362 if (ret_val)
376 goto out_no_write; 363 return ret_val;
377 364
378 /* flush msg and acks as we are overwriting the message buffer */ 365 /* flush msg and acks as we are overwriting the message buffer */
379 ixgbe_check_for_msg_pf(hw, vf_number); 366 ixgbe_check_for_msg_pf(hw, vf_number);
@@ -389,9 +376,7 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
389 /* update stats */ 376 /* update stats */
390 hw->mbx.stats.msgs_tx++; 377 hw->mbx.stats.msgs_tx++;
391 378
392out_no_write: 379 return 0;
393 return ret_val;
394
395} 380}
396 381
397/** 382/**
@@ -414,7 +399,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
414 /* lock the mailbox to prevent pf/vf race condition */ 399 /* lock the mailbox to prevent pf/vf race condition */
415 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); 400 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
416 if (ret_val) 401 if (ret_val)
417 goto out_no_read; 402 return ret_val;
418 403
419 /* copy the message to the mailbox memory buffer */ 404 /* copy the message to the mailbox memory buffer */
420 for (i = 0; i < size; i++) 405 for (i = 0; i < size; i++)
@@ -426,8 +411,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
426 /* update stats */ 411 /* update stats */
427 hw->mbx.stats.msgs_rx++; 412 hw->mbx.stats.msgs_rx++;
428 413
429out_no_read: 414 return 0;
430 return ret_val;
431} 415}
432 416
433#ifdef CONFIG_PCI_IOV 417#ifdef CONFIG_PCI_IOV