diff options
author | Yuval Mintz <Yuval.Mintz@qlogic.com> | 2014-03-23 12:12:25 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-25 21:07:04 -0400 |
commit | 2dc33bbc4f8a5d6a05bf3c673b86c37b825450f3 (patch) | |
tree | 7c5dea75fe3c305bfd86967290628924e1ae3376 /drivers/net/ethernet/broadcom | |
parent | 370d4a26590fcc7510ad4a8432e4982a209f1b59 (diff) |
bnx2x: Remove the sriov VFOP mechanism
Since we now posses a workqueue dedicated for sriov, the paradigm that sriov-
related tasks cannot sleep is no longer correct.
The VFOP mechanism was the one previously supporting said paradigm - the sriov
related tasks were broken into segments which did not require sleep, and the
mechanism re-scheduled the next segment whenever possible.
This patch remvoes the VFOP mechanism altogether - the resulting code is a much
easier to follow code; The segments are gathered into straight-forward
functions which sleep whenever neccessary.
Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: Ariel Elior <Ariel.Elior@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 1804 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 348 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 421 |
5 files changed, 622 insertions, 1954 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 8e35dbaca76e..4d8f8aba0ea5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1414,7 +1414,6 @@ enum sp_rtnl_flag { | |||
1414 | 1414 | ||
1415 | enum bnx2x_iov_flag { | 1415 | enum bnx2x_iov_flag { |
1416 | BNX2X_IOV_HANDLE_VF_MSG, | 1416 | BNX2X_IOV_HANDLE_VF_MSG, |
1417 | BNX2X_IOV_CONT_VFOP, | ||
1418 | BNX2X_IOV_HANDLE_FLR, | 1417 | BNX2X_IOV_HANDLE_FLR, |
1419 | }; | 1418 | }; |
1420 | 1419 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b5c7f77e8108..a78edaccceee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -1857,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1857 | #else | 1857 | #else |
1858 | return; | 1858 | return; |
1859 | #endif | 1859 | #endif |
1860 | /* SRIOV: reschedule any 'in_progress' operations */ | ||
1861 | bnx2x_iov_sp_event(bp, cid); | ||
1862 | 1860 | ||
1863 | smp_mb__before_atomic_inc(); | 1861 | smp_mb__before_atomic_inc(); |
1864 | atomic_inc(&bp->cq_spq_left); | 1862 | atomic_inc(&bp->cq_spq_left); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 8e2b191234f1..df1507288b3c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -117,87 +117,7 @@ static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, | |||
117 | return true; | 117 | return true; |
118 | } | 118 | } |
119 | 119 | ||
120 | /* VFOP - VF slow-path operation support */ | ||
121 | |||
122 | #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 | ||
123 | |||
124 | /* VFOP operations states */ | 120 | /* VFOP operations states */ |
125 | enum bnx2x_vfop_qctor_state { | ||
126 | BNX2X_VFOP_QCTOR_INIT, | ||
127 | BNX2X_VFOP_QCTOR_SETUP, | ||
128 | BNX2X_VFOP_QCTOR_INT_EN | ||
129 | }; | ||
130 | |||
131 | enum bnx2x_vfop_qdtor_state { | ||
132 | BNX2X_VFOP_QDTOR_HALT, | ||
133 | BNX2X_VFOP_QDTOR_TERMINATE, | ||
134 | BNX2X_VFOP_QDTOR_CFCDEL, | ||
135 | BNX2X_VFOP_QDTOR_DONE | ||
136 | }; | ||
137 | |||
138 | enum bnx2x_vfop_vlan_mac_state { | ||
139 | BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, | ||
140 | BNX2X_VFOP_VLAN_MAC_CLEAR, | ||
141 | BNX2X_VFOP_VLAN_MAC_CHK_DONE, | ||
142 | BNX2X_VFOP_MAC_CONFIG_LIST, | ||
143 | BNX2X_VFOP_VLAN_CONFIG_LIST, | ||
144 | BNX2X_VFOP_VLAN_CONFIG_LIST_0 | ||
145 | }; | ||
146 | |||
147 | enum bnx2x_vfop_qsetup_state { | ||
148 | BNX2X_VFOP_QSETUP_CTOR, | ||
149 | BNX2X_VFOP_QSETUP_VLAN0, | ||
150 | BNX2X_VFOP_QSETUP_DONE | ||
151 | }; | ||
152 | |||
153 | enum bnx2x_vfop_mcast_state { | ||
154 | BNX2X_VFOP_MCAST_DEL, | ||
155 | BNX2X_VFOP_MCAST_ADD, | ||
156 | BNX2X_VFOP_MCAST_CHK_DONE | ||
157 | }; | ||
158 | enum bnx2x_vfop_qflr_state { | ||
159 | BNX2X_VFOP_QFLR_CLR_VLAN, | ||
160 | BNX2X_VFOP_QFLR_CLR_MAC, | ||
161 | BNX2X_VFOP_QFLR_TERMINATE, | ||
162 | BNX2X_VFOP_QFLR_DONE | ||
163 | }; | ||
164 | |||
165 | enum bnx2x_vfop_flr_state { | ||
166 | BNX2X_VFOP_FLR_QUEUES, | ||
167 | BNX2X_VFOP_FLR_HW | ||
168 | }; | ||
169 | |||
170 | enum bnx2x_vfop_close_state { | ||
171 | BNX2X_VFOP_CLOSE_QUEUES, | ||
172 | BNX2X_VFOP_CLOSE_HW | ||
173 | }; | ||
174 | |||
175 | enum bnx2x_vfop_rxmode_state { | ||
176 | BNX2X_VFOP_RXMODE_CONFIG, | ||
177 | BNX2X_VFOP_RXMODE_DONE | ||
178 | }; | ||
179 | |||
180 | enum bnx2x_vfop_qteardown_state { | ||
181 | BNX2X_VFOP_QTEARDOWN_RXMODE, | ||
182 | BNX2X_VFOP_QTEARDOWN_CLR_VLAN, | ||
183 | BNX2X_VFOP_QTEARDOWN_CLR_MAC, | ||
184 | BNX2X_VFOP_QTEARDOWN_CLR_MCAST, | ||
185 | BNX2X_VFOP_QTEARDOWN_QDTOR, | ||
186 | BNX2X_VFOP_QTEARDOWN_DONE | ||
187 | }; | ||
188 | |||
189 | enum bnx2x_vfop_rss_state { | ||
190 | BNX2X_VFOP_RSS_CONFIG, | ||
191 | BNX2X_VFOP_RSS_DONE | ||
192 | }; | ||
193 | |||
194 | enum bnx2x_vfop_tpa_state { | ||
195 | BNX2X_VFOP_TPA_CONFIG, | ||
196 | BNX2X_VFOP_TPA_DONE | ||
197 | }; | ||
198 | |||
199 | #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) | ||
200 | |||
201 | void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, | 121 | void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, |
202 | struct bnx2x_queue_init_params *init_params, | 122 | struct bnx2x_queue_init_params *init_params, |
203 | struct bnx2x_queue_setup_params *setup_params, | 123 | struct bnx2x_queue_setup_params *setup_params, |
@@ -241,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
241 | void bnx2x_vfop_qctor_prep(struct bnx2x *bp, | 161 | void bnx2x_vfop_qctor_prep(struct bnx2x *bp, |
242 | struct bnx2x_virtf *vf, | 162 | struct bnx2x_virtf *vf, |
243 | struct bnx2x_vf_queue *q, | 163 | struct bnx2x_vf_queue *q, |
244 | struct bnx2x_vfop_qctor_params *p, | 164 | struct bnx2x_vf_queue_construct_params *p, |
245 | unsigned long q_type) | 165 | unsigned long q_type) |
246 | { | 166 | { |
247 | struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; | 167 | struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; |
@@ -310,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, | |||
310 | } | 230 | } |
311 | } | 231 | } |
312 | 232 | ||
313 | /* VFOP queue construction */ | 233 | static int bnx2x_vf_queue_create(struct bnx2x *bp, |
314 | static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) | 234 | struct bnx2x_virtf *vf, int qid, |
235 | struct bnx2x_vf_queue_construct_params *qctor) | ||
315 | { | 236 | { |
316 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 237 | struct bnx2x_queue_state_params *q_params; |
317 | struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; | 238 | int rc = 0; |
318 | struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; | ||
319 | enum bnx2x_vfop_qctor_state state = vfop->state; | ||
320 | |||
321 | bnx2x_vfop_reset_wq(vf); | ||
322 | |||
323 | if (vfop->rc < 0) | ||
324 | goto op_err; | ||
325 | |||
326 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | ||
327 | |||
328 | switch (state) { | ||
329 | case BNX2X_VFOP_QCTOR_INIT: | ||
330 | |||
331 | /* has this queue already been opened? */ | ||
332 | if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == | ||
333 | BNX2X_Q_LOGICAL_STATE_ACTIVE) { | ||
334 | DP(BNX2X_MSG_IOV, | ||
335 | "Entered qctor but queue was already up. Aborting gracefully\n"); | ||
336 | goto op_done; | ||
337 | } | ||
338 | |||
339 | /* next state */ | ||
340 | vfop->state = BNX2X_VFOP_QCTOR_SETUP; | ||
341 | |||
342 | q_params->cmd = BNX2X_Q_CMD_INIT; | ||
343 | vfop->rc = bnx2x_queue_state_change(bp, q_params); | ||
344 | |||
345 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
346 | |||
347 | case BNX2X_VFOP_QCTOR_SETUP: | ||
348 | /* next state */ | ||
349 | vfop->state = BNX2X_VFOP_QCTOR_INT_EN; | ||
350 | |||
351 | /* copy pre-prepared setup params to the queue-state params */ | ||
352 | vfop->op_p->qctor.qstate.params.setup = | ||
353 | vfop->op_p->qctor.prep_qsetup; | ||
354 | |||
355 | q_params->cmd = BNX2X_Q_CMD_SETUP; | ||
356 | vfop->rc = bnx2x_queue_state_change(bp, q_params); | ||
357 | 239 | ||
358 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | 240 | DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); |
359 | 241 | ||
360 | case BNX2X_VFOP_QCTOR_INT_EN: | 242 | /* Prepare ramrod information */ |
243 | q_params = &qctor->qstate; | ||
244 | q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); | ||
245 | set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); | ||
361 | 246 | ||
362 | /* enable interrupts */ | 247 | if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == |
363 | bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), | 248 | BNX2X_Q_LOGICAL_STATE_ACTIVE) { |
364 | USTORM_ID, 0, IGU_INT_ENABLE, 0); | 249 | DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); |
365 | goto op_done; | 250 | goto out; |
366 | default: | ||
367 | bnx2x_vfop_default(state); | ||
368 | } | 251 | } |
369 | op_err: | ||
370 | BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", | ||
371 | vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); | ||
372 | op_done: | ||
373 | bnx2x_vfop_end(bp, vf, vfop); | ||
374 | op_pending: | ||
375 | return; | ||
376 | } | ||
377 | |||
378 | static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, | ||
379 | struct bnx2x_virtf *vf, | ||
380 | struct bnx2x_vfop_cmd *cmd, | ||
381 | int qid) | ||
382 | { | ||
383 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | ||
384 | 252 | ||
385 | if (vfop) { | 253 | /* Run Queue 'construction' ramrods */ |
386 | vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); | 254 | q_params->cmd = BNX2X_Q_CMD_INIT; |
255 | rc = bnx2x_queue_state_change(bp, q_params); | ||
256 | if (rc) | ||
257 | goto out; | ||
387 | 258 | ||
388 | vfop->args.qctor.qid = qid; | 259 | memcpy(&q_params->params.setup, &qctor->prep_qsetup, |
389 | vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); | 260 | sizeof(struct bnx2x_queue_setup_params)); |
261 | q_params->cmd = BNX2X_Q_CMD_SETUP; | ||
262 | rc = bnx2x_queue_state_change(bp, q_params); | ||
263 | if (rc) | ||
264 | goto out; | ||
390 | 265 | ||
391 | bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, | 266 | /* enable interrupts */ |
392 | bnx2x_vfop_qctor, cmd->done); | 267 | bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), |
393 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, | 268 | USTORM_ID, 0, IGU_INT_ENABLE, 0); |
394 | cmd->block); | 269 | out: |
395 | } | 270 | return rc; |
396 | return -ENOMEM; | ||
397 | } | 271 | } |
398 | 272 | ||
399 | /* VFOP queue destruction */ | 273 | static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, |
400 | static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) | 274 | int qid) |
401 | { | 275 | { |
402 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 276 | enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, |
403 | struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; | 277 | BNX2X_Q_CMD_TERMINATE, |
404 | struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; | 278 | BNX2X_Q_CMD_CFC_DEL}; |
405 | enum bnx2x_vfop_qdtor_state state = vfop->state; | 279 | struct bnx2x_queue_state_params q_params; |
406 | 280 | int rc, i; | |
407 | bnx2x_vfop_reset_wq(vf); | ||
408 | |||
409 | if (vfop->rc < 0) | ||
410 | goto op_err; | ||
411 | |||
412 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | ||
413 | |||
414 | switch (state) { | ||
415 | case BNX2X_VFOP_QDTOR_HALT: | ||
416 | |||
417 | /* has this queue already been stopped? */ | ||
418 | if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == | ||
419 | BNX2X_Q_LOGICAL_STATE_STOPPED) { | ||
420 | DP(BNX2X_MSG_IOV, | ||
421 | "Entered qdtor but queue was already stopped. Aborting gracefully\n"); | ||
422 | |||
423 | /* next state */ | ||
424 | vfop->state = BNX2X_VFOP_QDTOR_DONE; | ||
425 | |||
426 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
427 | } | ||
428 | |||
429 | /* next state */ | ||
430 | vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; | ||
431 | |||
432 | q_params->cmd = BNX2X_Q_CMD_HALT; | ||
433 | vfop->rc = bnx2x_queue_state_change(bp, q_params); | ||
434 | |||
435 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
436 | |||
437 | case BNX2X_VFOP_QDTOR_TERMINATE: | ||
438 | /* next state */ | ||
439 | vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; | ||
440 | |||
441 | q_params->cmd = BNX2X_Q_CMD_TERMINATE; | ||
442 | vfop->rc = bnx2x_queue_state_change(bp, q_params); | ||
443 | 281 | ||
444 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | 282 | DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); |
445 | 283 | ||
446 | case BNX2X_VFOP_QDTOR_CFCDEL: | 284 | /* Prepare ramrod information */ |
447 | /* next state */ | 285 | memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); |
448 | vfop->state = BNX2X_VFOP_QDTOR_DONE; | 286 | q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); |
287 | set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | ||
449 | 288 | ||
450 | q_params->cmd = BNX2X_Q_CMD_CFC_DEL; | 289 | if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == |
451 | vfop->rc = bnx2x_queue_state_change(bp, q_params); | 290 | BNX2X_Q_LOGICAL_STATE_STOPPED) { |
291 | DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); | ||
292 | goto out; | ||
293 | } | ||
452 | 294 | ||
453 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 295 | /* Run Queue 'destruction' ramrods */ |
454 | op_err: | 296 | for (i = 0; i < ARRAY_SIZE(cmds); i++) { |
455 | BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", | 297 | q_params.cmd = cmds[i]; |
456 | vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); | 298 | rc = bnx2x_queue_state_change(bp, &q_params); |
457 | op_done: | 299 | if (rc) { |
458 | case BNX2X_VFOP_QDTOR_DONE: | 300 | BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); |
459 | /* invalidate the context */ | 301 | return rc; |
460 | if (qdtor->cxt) { | ||
461 | qdtor->cxt->ustorm_ag_context.cdu_usage = 0; | ||
462 | qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; | ||
463 | } | 302 | } |
464 | bnx2x_vfop_end(bp, vf, vfop); | ||
465 | return; | ||
466 | default: | ||
467 | bnx2x_vfop_default(state); | ||
468 | } | 303 | } |
469 | op_pending: | 304 | out: |
470 | return; | 305 | /* Clean Context */ |
471 | } | 306 | if (bnx2x_vfq(vf, qid, cxt)) { |
472 | 307 | bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; | |
473 | static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, | 308 | bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; |
474 | struct bnx2x_virtf *vf, | ||
475 | struct bnx2x_vfop_cmd *cmd, | ||
476 | int qid) | ||
477 | { | ||
478 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | ||
479 | |||
480 | if (vfop) { | ||
481 | struct bnx2x_queue_state_params *qstate = | ||
482 | &vf->op_params.qctor.qstate; | ||
483 | |||
484 | memset(qstate, 0, sizeof(*qstate)); | ||
485 | qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); | ||
486 | |||
487 | vfop->args.qdtor.qid = qid; | ||
488 | vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); | ||
489 | |||
490 | bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, | ||
491 | bnx2x_vfop_qdtor, cmd->done); | ||
492 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, | ||
493 | cmd->block); | ||
494 | } else { | ||
495 | BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); | ||
496 | return -ENOMEM; | ||
497 | } | 309 | } |
310 | |||
311 | return 0; | ||
498 | } | 312 | } |
499 | 313 | ||
500 | static void | 314 | static void |
@@ -516,731 +330,291 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) | |||
516 | BP_VFDB(bp)->vf_sbs_pool++; | 330 | BP_VFDB(bp)->vf_sbs_pool++; |
517 | } | 331 | } |
518 | 332 | ||
519 | /* VFOP MAC/VLAN helpers */ | 333 | static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, |
520 | static inline void bnx2x_vfop_credit(struct bnx2x *bp, | 334 | struct bnx2x_vlan_mac_obj *obj, |
521 | struct bnx2x_vfop *vfop, | 335 | atomic_t *counter) |
522 | struct bnx2x_vlan_mac_obj *obj) | ||
523 | { | 336 | { |
524 | struct bnx2x_vfop_args_filters *args = &vfop->args.filters; | 337 | struct list_head *pos; |
525 | 338 | int read_lock; | |
526 | /* update credit only if there is no error | 339 | int cnt = 0; |
527 | * and a valid credit counter | ||
528 | */ | ||
529 | if (!vfop->rc && args->credit) { | ||
530 | struct list_head *pos; | ||
531 | int read_lock; | ||
532 | int cnt = 0; | ||
533 | 340 | ||
534 | read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); | 341 | read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); |
535 | if (read_lock) | 342 | if (read_lock) |
536 | DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); | 343 | DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); |
537 | 344 | ||
538 | list_for_each(pos, &obj->head) | 345 | list_for_each(pos, &obj->head) |
539 | cnt++; | 346 | cnt++; |
540 | 347 | ||
541 | if (!read_lock) | 348 | if (!read_lock) |
542 | bnx2x_vlan_mac_h_read_unlock(bp, obj); | 349 | bnx2x_vlan_mac_h_read_unlock(bp, obj); |
543 | 350 | ||
544 | atomic_set(args->credit, cnt); | 351 | atomic_set(counter, cnt); |
545 | } | ||
546 | } | 352 | } |
547 | 353 | ||
548 | static int bnx2x_vfop_set_user_req(struct bnx2x *bp, | 354 | static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, |
549 | struct bnx2x_vfop_filter *pos, | 355 | int qid, bool drv_only, bool mac) |
550 | struct bnx2x_vlan_mac_data *user_req) | ||
551 | { | 356 | { |
552 | user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : | 357 | struct bnx2x_vlan_mac_ramrod_params ramrod; |
553 | BNX2X_VLAN_MAC_DEL; | 358 | int rc; |
554 | |||
555 | switch (pos->type) { | ||
556 | case BNX2X_VFOP_FILTER_MAC: | ||
557 | memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); | ||
558 | break; | ||
559 | case BNX2X_VFOP_FILTER_VLAN: | ||
560 | user_req->u.vlan.vlan = pos->vid; | ||
561 | break; | ||
562 | default: | ||
563 | BNX2X_ERR("Invalid filter type, skipping\n"); | ||
564 | return 1; | ||
565 | } | ||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | static int bnx2x_vfop_config_list(struct bnx2x *bp, | ||
570 | struct bnx2x_vfop_filters *filters, | ||
571 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) | ||
572 | { | ||
573 | struct bnx2x_vfop_filter *pos, *tmp; | ||
574 | struct list_head rollback_list, *filters_list = &filters->head; | ||
575 | struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; | ||
576 | int rc = 0, cnt = 0; | ||
577 | |||
578 | INIT_LIST_HEAD(&rollback_list); | ||
579 | |||
580 | list_for_each_entry_safe(pos, tmp, filters_list, link) { | ||
581 | if (bnx2x_vfop_set_user_req(bp, pos, user_req)) | ||
582 | continue; | ||
583 | 359 | ||
584 | rc = bnx2x_config_vlan_mac(bp, vlan_mac); | 360 | DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, |
585 | if (rc >= 0) { | 361 | mac ? "MACs" : "VLANs"); |
586 | cnt += pos->add ? 1 : -1; | ||
587 | list_move(&pos->link, &rollback_list); | ||
588 | rc = 0; | ||
589 | } else if (rc == -EEXIST) { | ||
590 | rc = 0; | ||
591 | } else { | ||
592 | BNX2X_ERR("Failed to add a new vlan_mac command\n"); | ||
593 | break; | ||
594 | } | ||
595 | } | ||
596 | 362 | ||
597 | /* rollback if error or too many rules added */ | 363 | /* Prepare ramrod params */ |
598 | if (rc || cnt > filters->add_cnt) { | 364 | memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); |
599 | BNX2X_ERR("error or too many rules added. Performing rollback\n"); | 365 | if (mac) { |
600 | list_for_each_entry_safe(pos, tmp, &rollback_list, link) { | 366 | set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); |
601 | pos->add = !pos->add; /* reverse op */ | 367 | ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); |
602 | bnx2x_vfop_set_user_req(bp, pos, user_req); | 368 | } else { |
603 | bnx2x_config_vlan_mac(bp, vlan_mac); | 369 | set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, |
604 | list_del(&pos->link); | 370 | &ramrod.user_req.vlan_mac_flags); |
605 | } | 371 | ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); |
606 | cnt = 0; | ||
607 | if (!rc) | ||
608 | rc = -EINVAL; | ||
609 | } | 372 | } |
610 | filters->add_cnt = cnt; | 373 | ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; |
611 | return rc; | ||
612 | } | ||
613 | |||
614 | /* VFOP set VLAN/MAC */ | ||
615 | static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) | ||
616 | { | ||
617 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | ||
618 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; | ||
619 | struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; | ||
620 | struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; | ||
621 | |||
622 | enum bnx2x_vfop_vlan_mac_state state = vfop->state; | ||
623 | |||
624 | if (vfop->rc < 0) | ||
625 | goto op_err; | ||
626 | |||
627 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | ||
628 | |||
629 | bnx2x_vfop_reset_wq(vf); | ||
630 | |||
631 | switch (state) { | ||
632 | case BNX2X_VFOP_VLAN_MAC_CLEAR: | ||
633 | /* next state */ | ||
634 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
635 | |||
636 | /* do delete */ | ||
637 | vfop->rc = obj->delete_all(bp, obj, | ||
638 | &vlan_mac->user_req.vlan_mac_flags, | ||
639 | &vlan_mac->ramrod_flags); | ||
640 | |||
641 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
642 | |||
643 | case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: | ||
644 | /* next state */ | ||
645 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
646 | |||
647 | /* do config */ | ||
648 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); | ||
649 | if (vfop->rc == -EEXIST) | ||
650 | vfop->rc = 0; | ||
651 | 374 | ||
652 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | 375 | set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); |
653 | 376 | if (drv_only) | |
654 | case BNX2X_VFOP_VLAN_MAC_CHK_DONE: | 377 | set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); |
655 | vfop->rc = !!obj->raw.check_pending(&obj->raw); | 378 | else |
656 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 379 | set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); |
657 | |||
658 | case BNX2X_VFOP_MAC_CONFIG_LIST: | ||
659 | /* next state */ | ||
660 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
661 | |||
662 | /* do list config */ | ||
663 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); | ||
664 | if (vfop->rc) | ||
665 | goto op_err; | ||
666 | |||
667 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); | ||
668 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); | ||
669 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
670 | |||
671 | case BNX2X_VFOP_VLAN_CONFIG_LIST: | ||
672 | /* next state */ | ||
673 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
674 | |||
675 | /* do list config */ | ||
676 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); | ||
677 | if (!vfop->rc) { | ||
678 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); | ||
679 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); | ||
680 | } | ||
681 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
682 | 380 | ||
683 | default: | 381 | /* Start deleting */ |
684 | bnx2x_vfop_default(state); | 382 | rc = ramrod.vlan_mac_obj->delete_all(bp, |
383 | ramrod.vlan_mac_obj, | ||
384 | &ramrod.user_req.vlan_mac_flags, | ||
385 | &ramrod.ramrod_flags); | ||
386 | if (rc) { | ||
387 | BNX2X_ERR("Failed to delete all %s\n", | ||
388 | mac ? "MACs" : "VLANs"); | ||
389 | return rc; | ||
685 | } | 390 | } |
686 | op_err: | ||
687 | BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); | ||
688 | op_done: | ||
689 | kfree(filters); | ||
690 | bnx2x_vfop_credit(bp, vfop, obj); | ||
691 | bnx2x_vfop_end(bp, vf, vfop); | ||
692 | op_pending: | ||
693 | return; | ||
694 | } | ||
695 | |||
696 | struct bnx2x_vfop_vlan_mac_flags { | ||
697 | bool drv_only; | ||
698 | bool dont_consume; | ||
699 | bool single_cmd; | ||
700 | bool add; | ||
701 | }; | ||
702 | 391 | ||
703 | static void | 392 | /* Clear the vlan counters */ |
704 | bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, | 393 | if (!mac) |
705 | struct bnx2x_vfop_vlan_mac_flags *flags) | 394 | atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); |
706 | { | ||
707 | struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; | ||
708 | |||
709 | memset(ramrod, 0, sizeof(*ramrod)); | ||
710 | 395 | ||
711 | /* ramrod flags */ | 396 | return 0; |
712 | if (flags->drv_only) | ||
713 | set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); | ||
714 | if (flags->single_cmd) | ||
715 | set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); | ||
716 | |||
717 | /* mac_vlan flags */ | ||
718 | if (flags->dont_consume) | ||
719 | set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); | ||
720 | |||
721 | /* cmd */ | ||
722 | ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; | ||
723 | } | ||
724 | |||
725 | static inline void | ||
726 | bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, | ||
727 | struct bnx2x_vfop_vlan_mac_flags *flags) | ||
728 | { | ||
729 | bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); | ||
730 | set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); | ||
731 | } | 397 | } |
732 | 398 | ||
733 | static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, | 399 | static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, |
734 | struct bnx2x_virtf *vf, | 400 | struct bnx2x_virtf *vf, int qid, |
735 | struct bnx2x_vfop_cmd *cmd, | 401 | struct bnx2x_vf_mac_vlan_filter *filter, |
736 | int qid, bool drv_only) | 402 | bool drv_only) |
737 | { | 403 | { |
738 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 404 | struct bnx2x_vlan_mac_ramrod_params ramrod; |
739 | 405 | int rc; | |
740 | if (vfop) { | ||
741 | struct bnx2x_vfop_args_filters filters = { | ||
742 | .multi_filter = NULL, /* single */ | ||
743 | .credit = NULL, /* consume credit */ | ||
744 | }; | ||
745 | struct bnx2x_vfop_vlan_mac_flags flags = { | ||
746 | .drv_only = drv_only, | ||
747 | .dont_consume = (filters.credit != NULL), | ||
748 | .single_cmd = true, | ||
749 | .add = false /* don't care */, | ||
750 | }; | ||
751 | struct bnx2x_vlan_mac_ramrod_params *ramrod = | ||
752 | &vf->op_params.vlan_mac; | ||
753 | |||
754 | /* set ramrod params */ | ||
755 | bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); | ||
756 | |||
757 | /* set object */ | ||
758 | ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); | ||
759 | |||
760 | /* set extra args */ | ||
761 | vfop->args.filters = filters; | ||
762 | 406 | ||
763 | bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, | 407 | DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", |
764 | bnx2x_vfop_vlan_mac, cmd->done); | 408 | vf->abs_vfid, filter->add ? "Adding" : "Deleting", |
765 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, | 409 | filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); |
766 | cmd->block); | 410 | |
411 | /* Prepare ramrod params */ | ||
412 | memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); | ||
413 | if (filter->type == BNX2X_VF_FILTER_VLAN) { | ||
414 | set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
415 | &ramrod.user_req.vlan_mac_flags); | ||
416 | ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); | ||
417 | ramrod.user_req.u.vlan.vlan = filter->vid; | ||
418 | } else { | ||
419 | set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); | ||
420 | ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); | ||
421 | memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); | ||
422 | } | ||
423 | ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : | ||
424 | BNX2X_VLAN_MAC_DEL; | ||
425 | |||
426 | /* Verify there are available vlan credits */ | ||
427 | if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && | ||
428 | (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= | ||
429 | vf_vlan_rules_cnt(vf))) { | ||
430 | BNX2X_ERR("No credits for vlan\n"); | ||
431 | return -ENOMEM; | ||
767 | } | 432 | } |
768 | return -ENOMEM; | ||
769 | } | ||
770 | |||
771 | int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, | ||
772 | struct bnx2x_virtf *vf, | ||
773 | struct bnx2x_vfop_cmd *cmd, | ||
774 | struct bnx2x_vfop_filters *macs, | ||
775 | int qid, bool drv_only) | ||
776 | { | ||
777 | struct bnx2x_vfop *vfop; | ||
778 | 433 | ||
779 | if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) | 434 | set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); |
780 | return -EINVAL; | 435 | if (drv_only) |
781 | 436 | set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); | |
782 | vfop = bnx2x_vfop_add(bp, vf); | 437 | else |
783 | if (vfop) { | 438 | set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); |
784 | struct bnx2x_vfop_args_filters filters = { | 439 | |
785 | .multi_filter = macs, | 440 | /* Add/Remove the filter */ |
786 | .credit = NULL, /* consume credit */ | 441 | rc = bnx2x_config_vlan_mac(bp, &ramrod); |
787 | }; | 442 | if (rc && rc != -EEXIST) { |
788 | struct bnx2x_vfop_vlan_mac_flags flags = { | 443 | BNX2X_ERR("Failed to %s %s\n", |
789 | .drv_only = drv_only, | 444 | filter->add ? "add" : "delete", |
790 | .dont_consume = (filters.credit != NULL), | 445 | filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : |
791 | .single_cmd = false, | 446 | "VLAN"); |
792 | .add = false, /* don't care since only the items in the | 447 | return rc; |
793 | * filters list affect the sp operation, | ||
794 | * not the list itself | ||
795 | */ | ||
796 | }; | ||
797 | struct bnx2x_vlan_mac_ramrod_params *ramrod = | ||
798 | &vf->op_params.vlan_mac; | ||
799 | |||
800 | /* set ramrod params */ | ||
801 | bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); | ||
802 | |||
803 | /* set object */ | ||
804 | ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); | ||
805 | |||
806 | /* set extra args */ | ||
807 | filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; | ||
808 | vfop->args.filters = filters; | ||
809 | |||
810 | bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, | ||
811 | bnx2x_vfop_vlan_mac, cmd->done); | ||
812 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, | ||
813 | cmd->block); | ||
814 | } | 448 | } |
815 | return -ENOMEM; | ||
816 | } | ||
817 | |||
818 | static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, | ||
819 | struct bnx2x_virtf *vf, | ||
820 | struct bnx2x_vfop_cmd *cmd, | ||
821 | int qid, u16 vid, bool add) | ||
822 | { | ||
823 | struct bnx2x_vfop *vfop; | ||
824 | 449 | ||
825 | if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) | 450 | /* Update the vlan counters */ |
826 | return -EINVAL; | 451 | if (filter->type == BNX2X_VF_FILTER_VLAN) |
452 | bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, | ||
453 | &bnx2x_vfq(vf, qid, vlan_count)); | ||
827 | 454 | ||
828 | vfop = bnx2x_vfop_add(bp, vf); | 455 | return 0; |
829 | if (vfop) { | ||
830 | struct bnx2x_vfop_args_filters filters = { | ||
831 | .multi_filter = NULL, /* single command */ | ||
832 | .credit = &bnx2x_vfq(vf, qid, vlan_count), | ||
833 | }; | ||
834 | struct bnx2x_vfop_vlan_mac_flags flags = { | ||
835 | .drv_only = false, | ||
836 | .dont_consume = (filters.credit != NULL), | ||
837 | .single_cmd = true, | ||
838 | .add = add, | ||
839 | }; | ||
840 | struct bnx2x_vlan_mac_ramrod_params *ramrod = | ||
841 | &vf->op_params.vlan_mac; | ||
842 | |||
843 | /* set ramrod params */ | ||
844 | bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); | ||
845 | ramrod->user_req.u.vlan.vlan = vid; | ||
846 | |||
847 | /* set object */ | ||
848 | ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); | ||
849 | |||
850 | /* set extra args */ | ||
851 | vfop->args.filters = filters; | ||
852 | |||
853 | bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, | ||
854 | bnx2x_vfop_vlan_mac, cmd->done); | ||
855 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, | ||
856 | cmd->block); | ||
857 | } | ||
858 | return -ENOMEM; | ||
859 | } | 456 | } |
860 | 457 | ||
861 | static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, | 458 | int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, |
862 | struct bnx2x_virtf *vf, | 459 | struct bnx2x_vf_mac_vlan_filters *filters, |
863 | struct bnx2x_vfop_cmd *cmd, | 460 | int qid, bool drv_only) |
864 | int qid, bool drv_only) | ||
865 | { | 461 | { |
866 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 462 | int rc = 0, i; |
867 | |||
868 | if (vfop) { | ||
869 | struct bnx2x_vfop_args_filters filters = { | ||
870 | .multi_filter = NULL, /* single command */ | ||
871 | .credit = &bnx2x_vfq(vf, qid, vlan_count), | ||
872 | }; | ||
873 | struct bnx2x_vfop_vlan_mac_flags flags = { | ||
874 | .drv_only = drv_only, | ||
875 | .dont_consume = (filters.credit != NULL), | ||
876 | .single_cmd = true, | ||
877 | .add = false, /* don't care */ | ||
878 | }; | ||
879 | struct bnx2x_vlan_mac_ramrod_params *ramrod = | ||
880 | &vf->op_params.vlan_mac; | ||
881 | |||
882 | /* set ramrod params */ | ||
883 | bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); | ||
884 | 463 | ||
885 | /* set object */ | 464 | DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); |
886 | ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); | ||
887 | |||
888 | /* set extra args */ | ||
889 | vfop->args.filters = filters; | ||
890 | |||
891 | bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, | ||
892 | bnx2x_vfop_vlan_mac, cmd->done); | ||
893 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, | ||
894 | cmd->block); | ||
895 | } | ||
896 | return -ENOMEM; | ||
897 | } | ||
898 | |||
899 | int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, | ||
900 | struct bnx2x_virtf *vf, | ||
901 | struct bnx2x_vfop_cmd *cmd, | ||
902 | struct bnx2x_vfop_filters *vlans, | ||
903 | int qid, bool drv_only) | ||
904 | { | ||
905 | struct bnx2x_vfop *vfop; | ||
906 | 465 | ||
907 | if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) | 466 | if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) |
908 | return -EINVAL; | 467 | return -EINVAL; |
909 | 468 | ||
910 | vfop = bnx2x_vfop_add(bp, vf); | 469 | /* Prepare ramrod params */ |
911 | if (vfop) { | 470 | for (i = 0; i < filters->count; i++) { |
912 | struct bnx2x_vfop_args_filters filters = { | 471 | rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, |
913 | .multi_filter = vlans, | 472 | &filters->filters[i], drv_only); |
914 | .credit = &bnx2x_vfq(vf, qid, vlan_count), | 473 | if (rc) |
915 | }; | 474 | break; |
916 | struct bnx2x_vfop_vlan_mac_flags flags = { | ||
917 | .drv_only = drv_only, | ||
918 | .dont_consume = (filters.credit != NULL), | ||
919 | .single_cmd = false, | ||
920 | .add = false, /* don't care */ | ||
921 | }; | ||
922 | struct bnx2x_vlan_mac_ramrod_params *ramrod = | ||
923 | &vf->op_params.vlan_mac; | ||
924 | |||
925 | /* set ramrod params */ | ||
926 | bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); | ||
927 | |||
928 | /* set object */ | ||
929 | ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); | ||
930 | |||
931 | /* set extra args */ | ||
932 | filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - | ||
933 | atomic_read(filters.credit); | ||
934 | |||
935 | vfop->args.filters = filters; | ||
936 | |||
937 | bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, | ||
938 | bnx2x_vfop_vlan_mac, cmd->done); | ||
939 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, | ||
940 | cmd->block); | ||
941 | } | 475 | } |
942 | return -ENOMEM; | ||
943 | } | ||
944 | |||
945 | /* VFOP queue setup (queue constructor + set vlan 0) */ | ||
946 | static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) | ||
947 | { | ||
948 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | ||
949 | int qid = vfop->args.qctor.qid; | ||
950 | enum bnx2x_vfop_qsetup_state state = vfop->state; | ||
951 | struct bnx2x_vfop_cmd cmd = { | ||
952 | .done = bnx2x_vfop_qsetup, | ||
953 | .block = false, | ||
954 | }; | ||
955 | |||
956 | if (vfop->rc < 0) | ||
957 | goto op_err; | ||
958 | |||
959 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | ||
960 | |||
961 | switch (state) { | ||
962 | case BNX2X_VFOP_QSETUP_CTOR: | ||
963 | /* init the queue ctor command */ | ||
964 | vfop->state = BNX2X_VFOP_QSETUP_VLAN0; | ||
965 | vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); | ||
966 | if (vfop->rc) | ||
967 | goto op_err; | ||
968 | return; | ||
969 | |||
970 | case BNX2X_VFOP_QSETUP_VLAN0: | ||
971 | /* skip if non-leading or FPGA/EMU*/ | ||
972 | if (qid) | ||
973 | goto op_done; | ||
974 | 476 | ||
975 | /* init the queue set-vlan command (for vlan 0) */ | 477 | /* Rollback if needed */ |
976 | vfop->state = BNX2X_VFOP_QSETUP_DONE; | 478 | if (i != filters->count) { |
977 | vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); | 479 | BNX2X_ERR("Managed only %d/%d filters - rolling back\n", |
978 | if (vfop->rc) | 480 | i, filters->count + 1); |
979 | goto op_err; | 481 | while (--i >= 0) { |
980 | return; | 482 | filters->filters[i].add = !filters->filters[i].add; |
981 | op_err: | 483 | bnx2x_vf_mac_vlan_config(bp, vf, qid, |
982 | BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); | 484 | &filters->filters[i], |
983 | op_done: | 485 | drv_only); |
984 | case BNX2X_VFOP_QSETUP_DONE: | 486 | } |
985 | vf->cfg_flags |= VF_CFG_VLAN; | ||
986 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, | ||
987 | BNX2X_MSG_IOV); | ||
988 | bnx2x_vfop_end(bp, vf, vfop); | ||
989 | return; | ||
990 | default: | ||
991 | bnx2x_vfop_default(state); | ||
992 | } | 487 | } |
993 | } | ||
994 | |||
995 | int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, | ||
996 | struct bnx2x_virtf *vf, | ||
997 | struct bnx2x_vfop_cmd *cmd, | ||
998 | int qid) | ||
999 | { | ||
1000 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | ||
1001 | 488 | ||
1002 | if (vfop) { | 489 | /* It's our responsibility to free the filters */ |
1003 | vfop->args.qctor.qid = qid; | 490 | kfree(filters); |
1004 | 491 | ||
1005 | bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, | 492 | return rc; |
1006 | bnx2x_vfop_qsetup, cmd->done); | ||
1007 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, | ||
1008 | cmd->block); | ||
1009 | } | ||
1010 | return -ENOMEM; | ||
1011 | } | 493 | } |
1012 | 494 | ||
1013 | /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ | 495 | int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, |
1014 | static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) | 496 | struct bnx2x_vf_queue_construct_params *qctor) |
1015 | { | 497 | { |
1016 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 498 | int rc; |
1017 | int qid = vfop->args.qx.qid; | ||
1018 | enum bnx2x_vfop_qflr_state state = vfop->state; | ||
1019 | struct bnx2x_queue_state_params *qstate; | ||
1020 | struct bnx2x_vfop_cmd cmd; | ||
1021 | 499 | ||
1022 | bnx2x_vfop_reset_wq(vf); | 500 | DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); |
1023 | 501 | ||
1024 | if (vfop->rc < 0) | 502 | rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); |
503 | if (rc) | ||
1025 | goto op_err; | 504 | goto op_err; |
1026 | 505 | ||
1027 | DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); | 506 | /* Configure vlan0 for leading queue */ |
1028 | 507 | if (!qid) { | |
1029 | cmd.done = bnx2x_vfop_qflr; | 508 | struct bnx2x_vf_mac_vlan_filter filter; |
1030 | cmd.block = false; | ||
1031 | |||
1032 | switch (state) { | ||
1033 | case BNX2X_VFOP_QFLR_CLR_VLAN: | ||
1034 | /* vlan-clear-all: driver-only, don't consume credit */ | ||
1035 | vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; | ||
1036 | |||
1037 | /* the vlan_mac vfop will re-schedule us */ | ||
1038 | vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); | ||
1039 | if (vfop->rc) | ||
1040 | goto op_err; | ||
1041 | return; | ||
1042 | 509 | ||
1043 | case BNX2X_VFOP_QFLR_CLR_MAC: | 510 | memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); |
1044 | /* mac-clear-all: driver only consume credit */ | 511 | filter.type = BNX2X_VF_FILTER_VLAN; |
1045 | vfop->state = BNX2X_VFOP_QFLR_TERMINATE; | 512 | filter.add = true; |
1046 | /* the vlan_mac vfop will re-schedule us */ | 513 | filter.vid = 0; |
1047 | vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); | 514 | rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); |
1048 | if (vfop->rc) | 515 | if (rc) |
1049 | goto op_err; | 516 | goto op_err; |
1050 | return; | 517 | } |
1051 | |||
1052 | case BNX2X_VFOP_QFLR_TERMINATE: | ||
1053 | qstate = &vfop->op_p->qctor.qstate; | ||
1054 | memset(qstate , 0, sizeof(*qstate)); | ||
1055 | qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); | ||
1056 | vfop->state = BNX2X_VFOP_QFLR_DONE; | ||
1057 | |||
1058 | DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", | ||
1059 | vf->abs_vfid, qstate->q_obj->state); | ||
1060 | |||
1061 | if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { | ||
1062 | qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; | ||
1063 | qstate->cmd = BNX2X_Q_CMD_TERMINATE; | ||
1064 | vfop->rc = bnx2x_queue_state_change(bp, qstate); | ||
1065 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); | ||
1066 | } else { | ||
1067 | goto op_done; | ||
1068 | } | ||
1069 | 518 | ||
519 | /* Schedule the configuration of any pending vlan filters */ | ||
520 | vf->cfg_flags |= VF_CFG_VLAN; | ||
521 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, | ||
522 | BNX2X_MSG_IOV); | ||
523 | return 0; | ||
1070 | op_err: | 524 | op_err: |
1071 | BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", | 525 | BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); |
1072 | vf->abs_vfid, qid, vfop->rc); | 526 | return rc; |
1073 | op_done: | ||
1074 | case BNX2X_VFOP_QFLR_DONE: | ||
1075 | bnx2x_vfop_end(bp, vf, vfop); | ||
1076 | return; | ||
1077 | default: | ||
1078 | bnx2x_vfop_default(state); | ||
1079 | } | ||
1080 | op_pending: | ||
1081 | return; | ||
1082 | } | 527 | } |
1083 | 528 | ||
1084 | static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, | 529 | static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, |
1085 | struct bnx2x_virtf *vf, | ||
1086 | struct bnx2x_vfop_cmd *cmd, | ||
1087 | int qid) | 530 | int qid) |
1088 | { | 531 | { |
1089 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 532 | int rc; |
1090 | |||
1091 | if (vfop) { | ||
1092 | vfop->args.qx.qid = qid; | ||
1093 | if ((qid == LEADING_IDX) && | ||
1094 | bnx2x_validate_vf_sp_objs(bp, vf, false)) | ||
1095 | bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, | ||
1096 | bnx2x_vfop_qflr, cmd->done); | ||
1097 | else | ||
1098 | bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE, | ||
1099 | bnx2x_vfop_qflr, cmd->done); | ||
1100 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, | ||
1101 | cmd->block); | ||
1102 | } | ||
1103 | return -ENOMEM; | ||
1104 | } | ||
1105 | |||
1106 | /* VFOP multi-casts */ | ||
1107 | static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) | ||
1108 | { | ||
1109 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | ||
1110 | struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; | ||
1111 | struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; | ||
1112 | struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; | ||
1113 | enum bnx2x_vfop_mcast_state state = vfop->state; | ||
1114 | int i; | ||
1115 | 533 | ||
1116 | bnx2x_vfop_reset_wq(vf); | 534 | DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); |
1117 | 535 | ||
1118 | if (vfop->rc < 0) | 536 | /* If needed, clean the filtering data base */ |
1119 | goto op_err; | 537 | if ((qid == LEADING_IDX) && |
538 | bnx2x_validate_vf_sp_objs(bp, vf, false)) { | ||
539 | rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); | ||
540 | if (rc) | ||
541 | goto op_err; | ||
542 | rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); | ||
543 | if (rc) | ||
544 | goto op_err; | ||
545 | } | ||
1120 | 546 | ||
1121 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | 547 | /* Terminate queue */ |
1122 | 548 | if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { | |
1123 | switch (state) { | 549 | struct bnx2x_queue_state_params qstate; |
1124 | case BNX2X_VFOP_MCAST_DEL: | ||
1125 | /* clear existing mcasts */ | ||
1126 | vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD | ||
1127 | : BNX2X_VFOP_MCAST_CHK_DONE; | ||
1128 | mcast->mcast_list_len = vf->mcast_list_len; | ||
1129 | vf->mcast_list_len = args->mc_num; | ||
1130 | vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); | ||
1131 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
1132 | |||
1133 | case BNX2X_VFOP_MCAST_ADD: | ||
1134 | if (raw->check_pending(raw)) | ||
1135 | goto op_pending; | ||
1136 | |||
1137 | /* update mcast list on the ramrod params */ | ||
1138 | INIT_LIST_HEAD(&mcast->mcast_list); | ||
1139 | for (i = 0; i < args->mc_num; i++) | ||
1140 | list_add_tail(&(args->mc[i].link), | ||
1141 | &mcast->mcast_list); | ||
1142 | mcast->mcast_list_len = args->mc_num; | ||
1143 | 550 | ||
1144 | /* add new mcasts */ | 551 | memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); |
1145 | vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; | 552 | qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); |
1146 | vfop->rc = bnx2x_config_mcast(bp, mcast, | 553 | qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; |
1147 | BNX2X_MCAST_CMD_ADD); | 554 | qstate.cmd = BNX2X_Q_CMD_TERMINATE; |
1148 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 555 | set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); |
1149 | 556 | rc = bnx2x_queue_state_change(bp, &qstate); | |
1150 | case BNX2X_VFOP_MCAST_CHK_DONE: | 557 | if (rc) |
1151 | vfop->rc = raw->check_pending(raw) ? 1 : 0; | 558 | goto op_err; |
1152 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | ||
1153 | default: | ||
1154 | bnx2x_vfop_default(state); | ||
1155 | } | 559 | } |
1156 | op_err: | ||
1157 | BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); | ||
1158 | op_done: | ||
1159 | kfree(args->mc); | ||
1160 | bnx2x_vfop_end(bp, vf, vfop); | ||
1161 | op_pending: | ||
1162 | return; | ||
1163 | } | ||
1164 | 560 | ||
1165 | int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, | 561 | return 0; |
1166 | struct bnx2x_virtf *vf, | 562 | op_err: |
1167 | struct bnx2x_vfop_cmd *cmd, | 563 | BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); |
1168 | bnx2x_mac_addr_t *mcasts, | 564 | return rc; |
1169 | int mcast_num, bool drv_only) | ||
1170 | { | ||
1171 | struct bnx2x_vfop *vfop = NULL; | ||
1172 | size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); | ||
1173 | struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : | ||
1174 | NULL; | ||
1175 | |||
1176 | if (!mc_sz || mc) { | ||
1177 | vfop = bnx2x_vfop_add(bp, vf); | ||
1178 | if (vfop) { | ||
1179 | int i; | ||
1180 | struct bnx2x_mcast_ramrod_params *ramrod = | ||
1181 | &vf->op_params.mcast; | ||
1182 | |||
1183 | /* set ramrod params */ | ||
1184 | memset(ramrod, 0, sizeof(*ramrod)); | ||
1185 | ramrod->mcast_obj = &vf->mcast_obj; | ||
1186 | if (drv_only) | ||
1187 | set_bit(RAMROD_DRV_CLR_ONLY, | ||
1188 | &ramrod->ramrod_flags); | ||
1189 | |||
1190 | /* copy mcasts pointers */ | ||
1191 | vfop->args.mc_list.mc_num = mcast_num; | ||
1192 | vfop->args.mc_list.mc = mc; | ||
1193 | for (i = 0; i < mcast_num; i++) | ||
1194 | mc[i].mac = mcasts[i]; | ||
1195 | |||
1196 | bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, | ||
1197 | bnx2x_vfop_mcast, cmd->done); | ||
1198 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, | ||
1199 | cmd->block); | ||
1200 | } else { | ||
1201 | kfree(mc); | ||
1202 | } | ||
1203 | } | ||
1204 | return -ENOMEM; | ||
1205 | } | 565 | } |
1206 | 566 | ||
1207 | /* VFOP rx-mode */ | 567 | int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, |
1208 | static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) | 568 | bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) |
1209 | { | 569 | { |
1210 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 570 | struct bnx2x_mcast_list_elem *mc = NULL; |
1211 | struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; | 571 | struct bnx2x_mcast_ramrod_params mcast; |
1212 | enum bnx2x_vfop_rxmode_state state = vfop->state; | 572 | int rc, i; |
1213 | 573 | ||
1214 | bnx2x_vfop_reset_wq(vf); | 574 | DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); |
1215 | 575 | ||
1216 | if (vfop->rc < 0) | 576 | /* Prepare Multicast command */ |
1217 | goto op_err; | 577 | memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); |
578 | mcast.mcast_obj = &vf->mcast_obj; | ||
579 | if (drv_only) | ||
580 | set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); | ||
581 | else | ||
582 | set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); | ||
583 | if (mc_num) { | ||
584 | mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), | ||
585 | GFP_KERNEL); | ||
586 | if (!mc) { | ||
587 | BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); | ||
588 | return -ENOMEM; | ||
589 | } | ||
590 | } | ||
1218 | 591 | ||
1219 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | 592 | /* clear existing mcasts */ |
593 | mcast.mcast_list_len = vf->mcast_list_len; | ||
594 | vf->mcast_list_len = mc_num; | ||
595 | rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); | ||
596 | if (rc) { | ||
597 | BNX2X_ERR("Failed to remove multicasts\n"); | ||
598 | return rc; | ||
599 | } | ||
1220 | 600 | ||
1221 | switch (state) { | 601 | /* update mcast list on the ramrod params */ |
1222 | case BNX2X_VFOP_RXMODE_CONFIG: | 602 | if (mc_num) { |
1223 | /* next state */ | 603 | INIT_LIST_HEAD(&mcast.mcast_list); |
1224 | vfop->state = BNX2X_VFOP_RXMODE_DONE; | 604 | for (i = 0; i < mc_num; i++) { |
605 | mc[i].mac = mcasts[i]; | ||
606 | list_add_tail(&mc[i].link, | ||
607 | &mcast.mcast_list); | ||
608 | } | ||
1225 | 609 | ||
1226 | /* record the accept flags in vfdb so hypervisor can modify them | 610 | /* add new mcasts */ |
1227 | * if necessary | 611 | rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); |
1228 | */ | 612 | if (rc) |
1229 | bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = | 613 | BNX2X_ERR("Faled to add multicasts\n"); |
1230 | ramrod->rx_accept_flags; | 614 | kfree(mc); |
1231 | vfop->rc = bnx2x_config_rx_mode(bp, ramrod); | ||
1232 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | ||
1233 | op_err: | ||
1234 | BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); | ||
1235 | op_done: | ||
1236 | case BNX2X_VFOP_RXMODE_DONE: | ||
1237 | bnx2x_vfop_end(bp, vf, vfop); | ||
1238 | return; | ||
1239 | default: | ||
1240 | bnx2x_vfop_default(state); | ||
1241 | } | 615 | } |
1242 | op_pending: | 616 | |
1243 | return; | 617 | return rc; |
1244 | } | 618 | } |
1245 | 619 | ||
1246 | static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, | 620 | static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, |
@@ -1268,121 +642,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, | |||
1268 | ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); | 642 | ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); |
1269 | } | 643 | } |
1270 | 644 | ||
1271 | int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, | 645 | int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, |
1272 | struct bnx2x_virtf *vf, | 646 | int qid, unsigned long accept_flags) |
1273 | struct bnx2x_vfop_cmd *cmd, | ||
1274 | int qid, unsigned long accept_flags) | ||
1275 | { | 647 | { |
1276 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 648 | struct bnx2x_rx_mode_ramrod_params ramrod; |
1277 | |||
1278 | if (vfop) { | ||
1279 | struct bnx2x_rx_mode_ramrod_params *ramrod = | ||
1280 | &vf->op_params.rx_mode; | ||
1281 | 649 | ||
1282 | bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); | 650 | DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); |
1283 | 651 | ||
1284 | bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, | 652 | bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); |
1285 | bnx2x_vfop_rxmode, cmd->done); | 653 | set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); |
1286 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, | 654 | vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; |
1287 | cmd->block); | 655 | return bnx2x_config_rx_mode(bp, &ramrod); |
1288 | } | ||
1289 | return -ENOMEM; | ||
1290 | } | 656 | } |
1291 | 657 | ||
1292 | /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, | 658 | int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) |
1293 | * queue destructor) | ||
1294 | */ | ||
1295 | static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) | ||
1296 | { | 659 | { |
1297 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 660 | int rc; |
1298 | int qid = vfop->args.qx.qid; | ||
1299 | enum bnx2x_vfop_qteardown_state state = vfop->state; | ||
1300 | struct bnx2x_vfop_cmd cmd; | ||
1301 | |||
1302 | if (vfop->rc < 0) | ||
1303 | goto op_err; | ||
1304 | |||
1305 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | ||
1306 | |||
1307 | cmd.done = bnx2x_vfop_qdown; | ||
1308 | cmd.block = false; | ||
1309 | |||
1310 | switch (state) { | ||
1311 | case BNX2X_VFOP_QTEARDOWN_RXMODE: | ||
1312 | /* Drop all */ | ||
1313 | if (bnx2x_validate_vf_sp_objs(bp, vf, true)) | ||
1314 | vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; | ||
1315 | else | ||
1316 | vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; | ||
1317 | vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); | ||
1318 | if (vfop->rc) | ||
1319 | goto op_err; | ||
1320 | return; | ||
1321 | |||
1322 | case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: | ||
1323 | /* vlan-clear-all: don't consume credit */ | ||
1324 | vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; | ||
1325 | vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); | ||
1326 | if (vfop->rc) | ||
1327 | goto op_err; | ||
1328 | return; | ||
1329 | |||
1330 | case BNX2X_VFOP_QTEARDOWN_CLR_MAC: | ||
1331 | /* mac-clear-all: consume credit */ | ||
1332 | vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST; | ||
1333 | vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); | ||
1334 | if (vfop->rc) | ||
1335 | goto op_err; | ||
1336 | return; | ||
1337 | 661 | ||
1338 | case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: | 662 | DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); |
1339 | vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; | ||
1340 | vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); | ||
1341 | if (vfop->rc) | ||
1342 | goto op_err; | ||
1343 | return; | ||
1344 | 663 | ||
1345 | case BNX2X_VFOP_QTEARDOWN_QDTOR: | 664 | /* Remove all classification configuration for leading queue */ |
1346 | /* run the queue destruction flow */ | 665 | if (qid == LEADING_IDX) { |
1347 | DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); | 666 | rc = bnx2x_vf_rxmode(bp, vf, qid, 0); |
1348 | vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; | 667 | if (rc) |
1349 | DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); | ||
1350 | vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); | ||
1351 | DP(BNX2X_MSG_IOV, "returned from cmd\n"); | ||
1352 | if (vfop->rc) | ||
1353 | goto op_err; | 668 | goto op_err; |
1354 | return; | ||
1355 | op_err: | ||
1356 | BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", | ||
1357 | vf->abs_vfid, qid, vfop->rc); | ||
1358 | 669 | ||
1359 | case BNX2X_VFOP_QTEARDOWN_DONE: | 670 | /* Remove filtering if feasible */ |
1360 | bnx2x_vfop_end(bp, vf, vfop); | 671 | if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { |
1361 | return; | 672 | rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, |
1362 | default: | 673 | false, false); |
1363 | bnx2x_vfop_default(state); | 674 | if (rc) |
1364 | } | 675 | goto op_err; |
1365 | } | 676 | rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, |
1366 | 677 | false, true); | |
1367 | int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, | 678 | if (rc) |
1368 | struct bnx2x_virtf *vf, | 679 | goto op_err; |
1369 | struct bnx2x_vfop_cmd *cmd, | 680 | rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); |
1370 | int qid) | 681 | if (rc) |
1371 | { | 682 | goto op_err; |
1372 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 683 | } |
1373 | |||
1374 | /* for non leading queues skip directly to qdown sate */ | ||
1375 | if (vfop) { | ||
1376 | vfop->args.qx.qid = qid; | ||
1377 | bnx2x_vfop_opset(qid == LEADING_IDX ? | ||
1378 | BNX2X_VFOP_QTEARDOWN_RXMODE : | ||
1379 | BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, | ||
1380 | cmd->done); | ||
1381 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, | ||
1382 | cmd->block); | ||
1383 | } | 684 | } |
1384 | 685 | ||
1385 | return -ENOMEM; | 686 | /* Destroy queue */ |
687 | rc = bnx2x_vf_queue_destroy(bp, vf, qid); | ||
688 | if (rc) | ||
689 | goto op_err; | ||
690 | return rc; | ||
691 | op_err: | ||
692 | BNX2X_ERR("vf[%d:%d] error: rc %d\n", | ||
693 | vf->abs_vfid, qid, rc); | ||
694 | return rc; | ||
1386 | } | 695 | } |
1387 | 696 | ||
1388 | /* VF enable primitives | 697 | /* VF enable primitives |
@@ -1582,120 +891,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
1582 | bnx2x_tx_hw_flushed(bp, poll_cnt); | 891 | bnx2x_tx_hw_flushed(bp, poll_cnt); |
1583 | } | 892 | } |
1584 | 893 | ||
1585 | static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) | 894 | static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) |
1586 | { | 895 | { |
1587 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 896 | int rc, i; |
1588 | struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; | ||
1589 | enum bnx2x_vfop_flr_state state = vfop->state; | ||
1590 | struct bnx2x_vfop_cmd cmd = { | ||
1591 | .done = bnx2x_vfop_flr, | ||
1592 | .block = false, | ||
1593 | }; | ||
1594 | |||
1595 | if (vfop->rc < 0) | ||
1596 | goto op_err; | ||
1597 | 897 | ||
1598 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | 898 | DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); |
1599 | 899 | ||
1600 | switch (state) { | 900 | /* the cleanup operations are valid if and only if the VF |
1601 | case BNX2X_VFOP_FLR_QUEUES: | 901 | * was first acquired. |
1602 | /* the cleanup operations are valid if and only if the VF | 902 | */ |
1603 | * was first acquired. | 903 | for (i = 0; i < vf_rxq_count(vf); i++) { |
1604 | */ | 904 | rc = bnx2x_vf_queue_flr(bp, vf, i); |
1605 | if (++(qx->qid) < vf_rxq_count(vf)) { | 905 | if (rc) |
1606 | vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, | 906 | goto out; |
1607 | qx->qid); | 907 | } |
1608 | if (vfop->rc) | ||
1609 | goto op_err; | ||
1610 | return; | ||
1611 | } | ||
1612 | /* remove multicasts */ | ||
1613 | vfop->state = BNX2X_VFOP_FLR_HW; | ||
1614 | vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, | ||
1615 | 0, true); | ||
1616 | if (vfop->rc) | ||
1617 | goto op_err; | ||
1618 | return; | ||
1619 | case BNX2X_VFOP_FLR_HW: | ||
1620 | 908 | ||
1621 | /* dispatch final cleanup and wait for HW queues to flush */ | 909 | /* remove multicasts */ |
1622 | bnx2x_vf_flr_clnup_hw(bp, vf); | 910 | bnx2x_vf_mcast(bp, vf, NULL, 0, true); |
1623 | 911 | ||
1624 | /* release VF resources */ | 912 | /* dispatch final cleanup and wait for HW queues to flush */ |
1625 | bnx2x_vf_free_resc(bp, vf); | 913 | bnx2x_vf_flr_clnup_hw(bp, vf); |
1626 | 914 | ||
1627 | /* re-open the mailbox */ | 915 | /* release VF resources */ |
1628 | bnx2x_vf_enable_mbx(bp, vf->abs_vfid); | 916 | bnx2x_vf_free_resc(bp, vf); |
1629 | 917 | ||
1630 | goto op_done; | 918 | /* re-open the mailbox */ |
1631 | default: | 919 | bnx2x_vf_enable_mbx(bp, vf->abs_vfid); |
1632 | bnx2x_vfop_default(state); | 920 | return; |
1633 | } | 921 | out: |
1634 | op_err: | 922 | BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", |
1635 | BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); | 923 | vf->abs_vfid, i, rc); |
1636 | op_done: | ||
1637 | vf->flr_clnup_stage = VF_FLR_ACK; | ||
1638 | bnx2x_vfop_end(bp, vf, vfop); | ||
1639 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); | ||
1640 | } | ||
1641 | |||
1642 | static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, | ||
1643 | struct bnx2x_virtf *vf, | ||
1644 | vfop_handler_t done) | ||
1645 | { | ||
1646 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | ||
1647 | if (vfop) { | ||
1648 | vfop->args.qx.qid = -1; /* loop */ | ||
1649 | bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, | ||
1650 | bnx2x_vfop_flr, done); | ||
1651 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); | ||
1652 | } | ||
1653 | return -ENOMEM; | ||
1654 | } | 924 | } |
1655 | 925 | ||
1656 | static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) | 926 | static void bnx2x_vf_flr_clnup(struct bnx2x *bp) |
1657 | { | 927 | { |
1658 | int i = prev_vf ? prev_vf->index + 1 : 0; | ||
1659 | struct bnx2x_virtf *vf; | 928 | struct bnx2x_virtf *vf; |
929 | int i; | ||
1660 | 930 | ||
1661 | /* find next VF to cleanup */ | 931 | for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { |
1662 | next_vf_to_clean: | 932 | /* VF should be RESET & in FLR cleanup states */ |
1663 | for (; | 933 | if (bnx2x_vf(bp, i, state) != VF_RESET || |
1664 | i < BNX2X_NR_VIRTFN(bp) && | 934 | !bnx2x_vf(bp, i, flr_clnup_stage)) |
1665 | (bnx2x_vf(bp, i, state) != VF_RESET || | 935 | continue; |
1666 | bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); | ||
1667 | i++) | ||
1668 | ; | ||
1669 | 936 | ||
1670 | DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, | 937 | DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", |
1671 | BNX2X_NR_VIRTFN(bp)); | 938 | i, BNX2X_NR_VIRTFN(bp)); |
1672 | 939 | ||
1673 | if (i < BNX2X_NR_VIRTFN(bp)) { | ||
1674 | vf = BP_VF(bp, i); | 940 | vf = BP_VF(bp, i); |
1675 | 941 | ||
1676 | /* lock the vf pf channel */ | 942 | /* lock the vf pf channel */ |
1677 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); | 943 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); |
1678 | 944 | ||
1679 | /* invoke the VF FLR SM */ | 945 | /* invoke the VF FLR SM */ |
1680 | if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { | 946 | bnx2x_vf_flr(bp, vf); |
1681 | BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", | ||
1682 | vf->abs_vfid); | ||
1683 | 947 | ||
1684 | /* mark the VF to be ACKED and continue */ | 948 | /* mark the VF to be ACKED and continue */ |
1685 | vf->flr_clnup_stage = VF_FLR_ACK; | 949 | vf->flr_clnup_stage = false; |
1686 | goto next_vf_to_clean; | 950 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); |
1687 | } | ||
1688 | return; | ||
1689 | } | ||
1690 | |||
1691 | /* we are done, update vf records */ | ||
1692 | for_each_vf(bp, i) { | ||
1693 | vf = BP_VF(bp, i); | ||
1694 | |||
1695 | if (vf->flr_clnup_stage != VF_FLR_ACK) | ||
1696 | continue; | ||
1697 | |||
1698 | vf->flr_clnup_stage = VF_FLR_EPILOG; | ||
1699 | } | 951 | } |
1700 | 952 | ||
1701 | /* Acknowledge the handled VFs. | 953 | /* Acknowledge the handled VFs. |
@@ -1745,7 +997,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp) | |||
1745 | if (reset) { | 997 | if (reset) { |
1746 | /* set as reset and ready for cleanup */ | 998 | /* set as reset and ready for cleanup */ |
1747 | vf->state = VF_RESET; | 999 | vf->state = VF_RESET; |
1748 | vf->flr_clnup_stage = VF_FLR_CLN; | 1000 | vf->flr_clnup_stage = true; |
1749 | 1001 | ||
1750 | DP(BNX2X_MSG_IOV, | 1002 | DP(BNX2X_MSG_IOV, |
1751 | "Initiating Final cleanup for VF %d\n", | 1003 | "Initiating Final cleanup for VF %d\n", |
@@ -1754,7 +1006,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp) | |||
1754 | } | 1006 | } |
1755 | 1007 | ||
1756 | /* do the FLR cleanup for all marked VFs*/ | 1008 | /* do the FLR cleanup for all marked VFs*/ |
1757 | bnx2x_vf_flr_clnup(bp, NULL); | 1009 | bnx2x_vf_flr_clnup(bp); |
1758 | } | 1010 | } |
1759 | 1011 | ||
1760 | /* IOV global initialization routines */ | 1012 | /* IOV global initialization routines */ |
@@ -2021,7 +1273,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, | |||
2021 | bnx2x_vf(bp, i, index) = i; | 1273 | bnx2x_vf(bp, i, index) = i; |
2022 | bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; | 1274 | bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; |
2023 | bnx2x_vf(bp, i, state) = VF_FREE; | 1275 | bnx2x_vf(bp, i, state) = VF_FREE; |
2024 | INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); | ||
2025 | mutex_init(&bnx2x_vf(bp, i, op_mutex)); | 1276 | mutex_init(&bnx2x_vf(bp, i, op_mutex)); |
2026 | bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; | 1277 | bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; |
2027 | } | 1278 | } |
@@ -2288,7 +1539,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp) | |||
2288 | 1539 | ||
2289 | /* release all the VFs */ | 1540 | /* release all the VFs */ |
2290 | for_each_vf(bp, i) | 1541 | for_each_vf(bp, i) |
2291 | bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ | 1542 | bnx2x_vf_release(bp, BP_VF(bp, i)); |
2292 | 1543 | ||
2293 | return 0; | 1544 | return 0; |
2294 | } | 1545 | } |
@@ -2378,6 +1629,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, | |||
2378 | smp_mb__after_clear_bit(); | 1629 | smp_mb__after_clear_bit(); |
2379 | } | 1630 | } |
2380 | 1631 | ||
1632 | static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, | ||
1633 | struct bnx2x_virtf *vf) | ||
1634 | { | ||
1635 | vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); | ||
1636 | } | ||
1637 | |||
2381 | int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) | 1638 | int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) |
2382 | { | 1639 | { |
2383 | struct bnx2x_virtf *vf; | 1640 | struct bnx2x_virtf *vf; |
@@ -2402,6 +1659,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) | |||
2402 | case EVENT_RING_OPCODE_CLASSIFICATION_RULES: | 1659 | case EVENT_RING_OPCODE_CLASSIFICATION_RULES: |
2403 | case EVENT_RING_OPCODE_MULTICAST_RULES: | 1660 | case EVENT_RING_OPCODE_MULTICAST_RULES: |
2404 | case EVENT_RING_OPCODE_FILTERS_RULES: | 1661 | case EVENT_RING_OPCODE_FILTERS_RULES: |
1662 | case EVENT_RING_OPCODE_RSS_UPDATE_RULES: | ||
2405 | cid = (elem->message.data.eth_event.echo & | 1663 | cid = (elem->message.data.eth_event.echo & |
2406 | BNX2X_SWCID_MASK); | 1664 | BNX2X_SWCID_MASK); |
2407 | DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); | 1665 | DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); |
@@ -2466,13 +1724,15 @@ get_vf: | |||
2466 | vf->abs_vfid, qidx); | 1724 | vf->abs_vfid, qidx); |
2467 | bnx2x_vf_handle_filters_eqe(bp, vf); | 1725 | bnx2x_vf_handle_filters_eqe(bp, vf); |
2468 | break; | 1726 | break; |
1727 | case EVENT_RING_OPCODE_RSS_UPDATE_RULES: | ||
1728 | DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", | ||
1729 | vf->abs_vfid, qidx); | ||
1730 | bnx2x_vf_handle_rss_update_eqe(bp, vf); | ||
2469 | case EVENT_RING_OPCODE_VF_FLR: | 1731 | case EVENT_RING_OPCODE_VF_FLR: |
2470 | case EVENT_RING_OPCODE_MALICIOUS_VF: | 1732 | case EVENT_RING_OPCODE_MALICIOUS_VF: |
2471 | /* Do nothing for now */ | 1733 | /* Do nothing for now */ |
2472 | return 0; | 1734 | return 0; |
2473 | } | 1735 | } |
2474 | /* SRIOV: reschedule any 'in_progress' operations */ | ||
2475 | bnx2x_iov_sp_event(bp, cid); | ||
2476 | 1736 | ||
2477 | return 0; | 1737 | return 0; |
2478 | } | 1738 | } |
@@ -2509,22 +1769,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, | |||
2509 | } | 1769 | } |
2510 | } | 1770 | } |
2511 | 1771 | ||
2512 | void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) | ||
2513 | { | ||
2514 | struct bnx2x_virtf *vf; | ||
2515 | |||
2516 | /* check if the cid is the VF range */ | ||
2517 | if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) | ||
2518 | return; | ||
2519 | |||
2520 | vf = bnx2x_vf_by_cid(bp, vf_cid); | ||
2521 | if (vf) { | ||
2522 | /* set in_progress flag */ | ||
2523 | atomic_set(&vf->op_in_progress, 1); | ||
2524 | bnx2x_schedule_iov_task(bp, BNX2X_IOV_CONT_VFOP); | ||
2525 | } | ||
2526 | } | ||
2527 | |||
2528 | void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) | 1772 | void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) |
2529 | { | 1773 | { |
2530 | int i; | 1774 | int i; |
@@ -2606,33 +1850,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) | |||
2606 | bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; | 1850 | bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; |
2607 | } | 1851 | } |
2608 | 1852 | ||
2609 | void bnx2x_iov_vfop_cont(struct bnx2x *bp) | ||
2610 | { | ||
2611 | int i; | ||
2612 | |||
2613 | if (!IS_SRIOV(bp)) | ||
2614 | return; | ||
2615 | /* Iterate over all VFs and invoke state transition for VFs with | ||
2616 | * 'in-progress' slow-path operations | ||
2617 | */ | ||
2618 | DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP), | ||
2619 | "searching for pending vf operations\n"); | ||
2620 | for_each_vf(bp, i) { | ||
2621 | struct bnx2x_virtf *vf = BP_VF(bp, i); | ||
2622 | |||
2623 | if (!vf) { | ||
2624 | BNX2X_ERR("VF was null! skipping...\n"); | ||
2625 | continue; | ||
2626 | } | ||
2627 | |||
2628 | if (!list_empty(&vf->op_list_head) && | ||
2629 | atomic_read(&vf->op_in_progress)) { | ||
2630 | DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); | ||
2631 | bnx2x_vfop_cur(bp, vf)->transition(bp, vf); | ||
2632 | } | ||
2633 | } | ||
2634 | } | ||
2635 | |||
2636 | static inline | 1853 | static inline |
2637 | struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) | 1854 | struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) |
2638 | { | 1855 | { |
@@ -2868,52 +2085,26 @@ static void bnx2x_set_vf_state(void *cookie) | |||
2868 | p->vf->state = p->state; | 2085 | p->vf->state = p->state; |
2869 | } | 2086 | } |
2870 | 2087 | ||
2871 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ | 2088 | int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) |
2872 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | ||
2873 | { | 2089 | { |
2874 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 2090 | int rc = 0, i; |
2875 | struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; | ||
2876 | enum bnx2x_vfop_close_state state = vfop->state; | ||
2877 | struct bnx2x_vfop_cmd cmd = { | ||
2878 | .done = bnx2x_vfop_close, | ||
2879 | .block = false, | ||
2880 | }; | ||
2881 | |||
2882 | if (vfop->rc < 0) | ||
2883 | goto op_err; | ||
2884 | |||
2885 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | ||
2886 | 2091 | ||
2887 | switch (state) { | 2092 | DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); |
2888 | case BNX2X_VFOP_CLOSE_QUEUES: | ||
2889 | |||
2890 | if (++(qx->qid) < vf_rxq_count(vf)) { | ||
2891 | vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); | ||
2892 | if (vfop->rc) | ||
2893 | goto op_err; | ||
2894 | return; | ||
2895 | } | ||
2896 | vfop->state = BNX2X_VFOP_CLOSE_HW; | ||
2897 | vfop->rc = 0; | ||
2898 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
2899 | 2093 | ||
2900 | case BNX2X_VFOP_CLOSE_HW: | 2094 | /* Close all queues */ |
2095 | for (i = 0; i < vf_rxq_count(vf); i++) { | ||
2096 | rc = bnx2x_vf_queue_teardown(bp, vf, i); | ||
2097 | if (rc) | ||
2098 | goto op_err; | ||
2099 | } | ||
2901 | 2100 | ||
2902 | /* disable the interrupts */ | 2101 | /* disable the interrupts */ |
2903 | DP(BNX2X_MSG_IOV, "disabling igu\n"); | 2102 | DP(BNX2X_MSG_IOV, "disabling igu\n"); |
2904 | bnx2x_vf_igu_disable(bp, vf); | 2103 | bnx2x_vf_igu_disable(bp, vf); |
2905 | 2104 | ||
2906 | /* disable the VF */ | 2105 | /* disable the VF */ |
2907 | DP(BNX2X_MSG_IOV, "clearing qtbl\n"); | 2106 | DP(BNX2X_MSG_IOV, "clearing qtbl\n"); |
2908 | bnx2x_vf_clr_qtbl(bp, vf); | 2107 | bnx2x_vf_clr_qtbl(bp, vf); |
2909 | |||
2910 | goto op_done; | ||
2911 | default: | ||
2912 | bnx2x_vfop_default(state); | ||
2913 | } | ||
2914 | op_err: | ||
2915 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); | ||
2916 | op_done: | ||
2917 | 2108 | ||
2918 | /* need to make sure there are no outstanding stats ramrods which may | 2109 | /* need to make sure there are no outstanding stats ramrods which may |
2919 | * cause the device to access the VF's stats buffer which it will free | 2110 | * cause the device to access the VF's stats buffer which it will free |
@@ -2928,43 +2119,20 @@ op_done: | |||
2928 | } | 2119 | } |
2929 | 2120 | ||
2930 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2121 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
2931 | bnx2x_vfop_end(bp, vf, vfop); | ||
2932 | op_pending: | ||
2933 | /* Not supported at the moment; Exists for macros only */ | ||
2934 | return; | ||
2935 | } | ||
2936 | 2122 | ||
2937 | int bnx2x_vfop_close_cmd(struct bnx2x *bp, | 2123 | return 0; |
2938 | struct bnx2x_virtf *vf, | 2124 | op_err: |
2939 | struct bnx2x_vfop_cmd *cmd) | 2125 | BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); |
2940 | { | 2126 | return rc; |
2941 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | ||
2942 | if (vfop) { | ||
2943 | vfop->args.qx.qid = -1; /* loop */ | ||
2944 | bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, | ||
2945 | bnx2x_vfop_close, cmd->done); | ||
2946 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, | ||
2947 | cmd->block); | ||
2948 | } | ||
2949 | return -ENOMEM; | ||
2950 | } | 2127 | } |
2951 | 2128 | ||
2952 | /* VF release can be called either: 1. The VF was acquired but | 2129 | /* VF release can be called either: 1. The VF was acquired but |
2953 | * not enabled 2. the vf was enabled or in the process of being | 2130 | * not enabled 2. the vf was enabled or in the process of being |
2954 | * enabled | 2131 | * enabled |
2955 | */ | 2132 | */ |
2956 | static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) | 2133 | int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) |
2957 | { | 2134 | { |
2958 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 2135 | int rc; |
2959 | struct bnx2x_vfop_cmd cmd = { | ||
2960 | .done = bnx2x_vfop_release, | ||
2961 | .block = false, | ||
2962 | }; | ||
2963 | |||
2964 | DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); | ||
2965 | |||
2966 | if (vfop->rc < 0) | ||
2967 | goto op_err; | ||
2968 | 2136 | ||
2969 | DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, | 2137 | DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, |
2970 | vf->state == VF_FREE ? "Free" : | 2138 | vf->state == VF_FREE ? "Free" : |
@@ -2975,193 +2143,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
2975 | 2143 | ||
2976 | switch (vf->state) { | 2144 | switch (vf->state) { |
2977 | case VF_ENABLED: | 2145 | case VF_ENABLED: |
2978 | vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); | 2146 | rc = bnx2x_vf_close(bp, vf); |
2979 | if (vfop->rc) | 2147 | if (rc) |
2980 | goto op_err; | 2148 | goto op_err; |
2981 | return; | 2149 | /* Fallthrough to release resources */ |
2982 | |||
2983 | case VF_ACQUIRED: | 2150 | case VF_ACQUIRED: |
2984 | DP(BNX2X_MSG_IOV, "about to free resources\n"); | 2151 | DP(BNX2X_MSG_IOV, "about to free resources\n"); |
2985 | bnx2x_vf_free_resc(bp, vf); | 2152 | bnx2x_vf_free_resc(bp, vf); |
2986 | DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); | 2153 | break; |
2987 | goto op_done; | ||
2988 | 2154 | ||
2989 | case VF_FREE: | 2155 | case VF_FREE: |
2990 | case VF_RESET: | 2156 | case VF_RESET: |
2991 | /* do nothing */ | ||
2992 | goto op_done; | ||
2993 | default: | 2157 | default: |
2994 | bnx2x_vfop_default(vf->state); | 2158 | break; |
2995 | } | ||
2996 | op_err: | ||
2997 | BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); | ||
2998 | op_done: | ||
2999 | bnx2x_vfop_end(bp, vf, vfop); | ||
3000 | } | ||
3001 | |||
3002 | static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) | ||
3003 | { | ||
3004 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | ||
3005 | enum bnx2x_vfop_rss_state state; | ||
3006 | |||
3007 | if (!vfop) { | ||
3008 | BNX2X_ERR("vfop was null\n"); | ||
3009 | return; | ||
3010 | } | 2159 | } |
3011 | 2160 | return 0; | |
3012 | state = vfop->state; | ||
3013 | bnx2x_vfop_reset_wq(vf); | ||
3014 | |||
3015 | if (vfop->rc < 0) | ||
3016 | goto op_err; | ||
3017 | |||
3018 | DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); | ||
3019 | |||
3020 | switch (state) { | ||
3021 | case BNX2X_VFOP_RSS_CONFIG: | ||
3022 | /* next state */ | ||
3023 | vfop->state = BNX2X_VFOP_RSS_DONE; | ||
3024 | bnx2x_config_rss(bp, &vfop->op_p->rss); | ||
3025 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | ||
3026 | op_err: | 2161 | op_err: |
3027 | BNX2X_ERR("RSS error: rc %d\n", vfop->rc); | 2162 | BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); |
3028 | op_done: | 2163 | return rc; |
3029 | case BNX2X_VFOP_RSS_DONE: | ||
3030 | bnx2x_vfop_end(bp, vf, vfop); | ||
3031 | return; | ||
3032 | default: | ||
3033 | bnx2x_vfop_default(state); | ||
3034 | } | ||
3035 | op_pending: | ||
3036 | return; | ||
3037 | } | ||
3038 | |||
3039 | int bnx2x_vfop_release_cmd(struct bnx2x *bp, | ||
3040 | struct bnx2x_virtf *vf, | ||
3041 | struct bnx2x_vfop_cmd *cmd) | ||
3042 | { | ||
3043 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | ||
3044 | if (vfop) { | ||
3045 | bnx2x_vfop_opset(-1, /* use vf->state */ | ||
3046 | bnx2x_vfop_release, cmd->done); | ||
3047 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, | ||
3048 | cmd->block); | ||
3049 | } | ||
3050 | return -ENOMEM; | ||
3051 | } | 2164 | } |
3052 | 2165 | ||
3053 | int bnx2x_vfop_rss_cmd(struct bnx2x *bp, | 2166 | int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, |
3054 | struct bnx2x_virtf *vf, | 2167 | struct bnx2x_config_rss_params *rss) |
3055 | struct bnx2x_vfop_cmd *cmd) | ||
3056 | { | 2168 | { |
3057 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 2169 | DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); |
3058 | 2170 | set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); | |
3059 | if (vfop) { | 2171 | return bnx2x_config_rss(bp, rss); |
3060 | bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, | ||
3061 | cmd->done); | ||
3062 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, | ||
3063 | cmd->block); | ||
3064 | } | ||
3065 | return -ENOMEM; | ||
3066 | } | 2172 | } |
3067 | 2173 | ||
3068 | /* VFOP tpa update, send update on all queues */ | 2174 | int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, |
3069 | static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf) | 2175 | struct vfpf_tpa_tlv *tlv, |
2176 | struct bnx2x_queue_update_tpa_params *params) | ||
3070 | { | 2177 | { |
3071 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 2178 | aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; |
3072 | struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa; | 2179 | struct bnx2x_queue_state_params qstate; |
3073 | enum bnx2x_vfop_tpa_state state = vfop->state; | 2180 | int qid, rc = 0; |
3074 | |||
3075 | bnx2x_vfop_reset_wq(vf); | ||
3076 | 2181 | ||
3077 | if (vfop->rc < 0) | 2182 | DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); |
3078 | goto op_err; | ||
3079 | |||
3080 | DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n", | ||
3081 | vf->abs_vfid, tpa_args->qid, | ||
3082 | state); | ||
3083 | |||
3084 | switch (state) { | ||
3085 | case BNX2X_VFOP_TPA_CONFIG: | ||
3086 | |||
3087 | if (tpa_args->qid < vf_rxq_count(vf)) { | ||
3088 | struct bnx2x_queue_state_params *qstate = | ||
3089 | &vf->op_params.qstate; | ||
3090 | 2183 | ||
3091 | qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj); | 2184 | /* Set ramrod params */ |
2185 | memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); | ||
2186 | memcpy(&qstate.params.update_tpa, params, | ||
2187 | sizeof(struct bnx2x_queue_update_tpa_params)); | ||
2188 | qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; | ||
2189 | set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); | ||
3092 | 2190 | ||
3093 | /* The only thing that changes for the ramrod params | 2191 | for (qid = 0; qid < vf_rxq_count(vf); qid++) { |
3094 | * between calls is the sge_map | 2192 | qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); |
3095 | */ | 2193 | qstate.params.update_tpa.sge_map = sge_addr[qid]; |
3096 | qstate->params.update_tpa.sge_map = | 2194 | DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", |
3097 | tpa_args->sge_map[tpa_args->qid]; | 2195 | vf->abs_vfid, qid, U64_HI(sge_addr[qid]), |
3098 | 2196 | U64_LO(sge_addr[qid])); | |
3099 | DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n", | 2197 | rc = bnx2x_queue_state_change(bp, &qstate); |
3100 | tpa_args->qid, | 2198 | if (rc) { |
3101 | U64_HI(qstate->params.update_tpa.sge_map), | 2199 | BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", |
3102 | U64_LO(qstate->params.update_tpa.sge_map)); | 2200 | U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), |
3103 | qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA; | 2201 | vf->abs_vfid, qid); |
3104 | vfop->rc = bnx2x_queue_state_change(bp, qstate); | 2202 | return rc; |
3105 | |||
3106 | tpa_args->qid++; | ||
3107 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); | ||
3108 | } | 2203 | } |
3109 | vfop->state = BNX2X_VFOP_TPA_DONE; | ||
3110 | vfop->rc = 0; | ||
3111 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | ||
3112 | op_err: | ||
3113 | BNX2X_ERR("TPA update error: rc %d\n", vfop->rc); | ||
3114 | op_done: | ||
3115 | case BNX2X_VFOP_TPA_DONE: | ||
3116 | bnx2x_vfop_end(bp, vf, vfop); | ||
3117 | return; | ||
3118 | default: | ||
3119 | bnx2x_vfop_default(state); | ||
3120 | } | 2204 | } |
3121 | op_pending: | ||
3122 | return; | ||
3123 | } | ||
3124 | |||
3125 | int bnx2x_vfop_tpa_cmd(struct bnx2x *bp, | ||
3126 | struct bnx2x_virtf *vf, | ||
3127 | struct bnx2x_vfop_cmd *cmd, | ||
3128 | struct vfpf_tpa_tlv *tpa_tlv) | ||
3129 | { | ||
3130 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | ||
3131 | 2205 | ||
3132 | if (vfop) { | 2206 | return rc; |
3133 | vfop->args.qx.qid = 0; /* loop */ | ||
3134 | memcpy(&vfop->args.tpa.sge_map, | ||
3135 | tpa_tlv->tpa_client_info.sge_addr, | ||
3136 | sizeof(vfop->args.tpa.sge_map)); | ||
3137 | bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG, | ||
3138 | bnx2x_vfop_tpa, cmd->done); | ||
3139 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa, | ||
3140 | cmd->block); | ||
3141 | } | ||
3142 | return -ENOMEM; | ||
3143 | } | 2207 | } |
3144 | 2208 | ||
3145 | /* VF release ~ VF close + VF release-resources | 2209 | /* VF release ~ VF close + VF release-resources |
3146 | * Release is the ultimate SW shutdown and is called whenever an | 2210 | * Release is the ultimate SW shutdown and is called whenever an |
3147 | * irrecoverable error is encountered. | 2211 | * irrecoverable error is encountered. |
3148 | */ | 2212 | */ |
3149 | void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) | 2213 | int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) |
3150 | { | 2214 | { |
3151 | struct bnx2x_vfop_cmd cmd = { | ||
3152 | .done = NULL, | ||
3153 | .block = block, | ||
3154 | }; | ||
3155 | int rc; | 2215 | int rc; |
3156 | 2216 | ||
3157 | DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); | 2217 | DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); |
3158 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); | 2218 | bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); |
3159 | 2219 | ||
3160 | rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); | 2220 | rc = bnx2x_vf_free(bp, vf); |
3161 | if (rc) | 2221 | if (rc) |
3162 | WARN(rc, | 2222 | WARN(rc, |
3163 | "VF[%d] Failed to allocate resources for release op- rc=%d\n", | 2223 | "VF[%d] Failed to allocate resources for release op- rc=%d\n", |
3164 | vf->abs_vfid, rc); | 2224 | vf->abs_vfid, rc); |
2225 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); | ||
2226 | return rc; | ||
3165 | } | 2227 | } |
3166 | 2228 | ||
3167 | static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, | 2229 | static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, |
@@ -3889,10 +2951,6 @@ void bnx2x_iov_task(struct work_struct *work) | |||
3889 | &bp->iov_task_state)) | 2951 | &bp->iov_task_state)) |
3890 | bnx2x_vf_handle_flr_event(bp); | 2952 | bnx2x_vf_handle_flr_event(bp); |
3891 | 2953 | ||
3892 | if (test_and_clear_bit(BNX2X_IOV_CONT_VFOP, | ||
3893 | &bp->iov_task_state)) | ||
3894 | bnx2x_iov_vfop_cont(bp); | ||
3895 | |||
3896 | if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, | 2954 | if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, |
3897 | &bp->iov_task_state)) | 2955 | &bp->iov_task_state)) |
3898 | bnx2x_vf_mbx(bp); | 2956 | bnx2x_vf_mbx(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 87f7c9743f71..db73a247ecfb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
@@ -88,113 +88,32 @@ struct bnx2x_vf_queue { | |||
88 | bool sp_initialized; | 88 | bool sp_initialized; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: | 91 | /* struct bnx2x_vf_queue_construct_params - prepare queue construction |
92 | * q-init, q-setup and SB index | 92 | * parameters: q-init, q-setup and SB index |
93 | */ | 93 | */ |
94 | struct bnx2x_vfop_qctor_params { | 94 | struct bnx2x_vf_queue_construct_params { |
95 | struct bnx2x_queue_state_params qstate; | 95 | struct bnx2x_queue_state_params qstate; |
96 | struct bnx2x_queue_setup_params prep_qsetup; | 96 | struct bnx2x_queue_setup_params prep_qsetup; |
97 | }; | 97 | }; |
98 | 98 | ||
99 | /* VFOP parameters (one copy per VF) */ | ||
100 | union bnx2x_vfop_params { | ||
101 | struct bnx2x_vlan_mac_ramrod_params vlan_mac; | ||
102 | struct bnx2x_rx_mode_ramrod_params rx_mode; | ||
103 | struct bnx2x_mcast_ramrod_params mcast; | ||
104 | struct bnx2x_config_rss_params rss; | ||
105 | struct bnx2x_vfop_qctor_params qctor; | ||
106 | struct bnx2x_queue_state_params qstate; | ||
107 | }; | ||
108 | |||
109 | /* forward */ | 99 | /* forward */ |
110 | struct bnx2x_virtf; | 100 | struct bnx2x_virtf; |
111 | 101 | ||
112 | /* VFOP definitions */ | 102 | /* VFOP definitions */ |
113 | typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); | ||
114 | 103 | ||
115 | struct bnx2x_vfop_cmd { | 104 | struct bnx2x_vf_mac_vlan_filter { |
116 | vfop_handler_t done; | ||
117 | bool block; | ||
118 | }; | ||
119 | |||
120 | /* VFOP queue filters command additional arguments */ | ||
121 | struct bnx2x_vfop_filter { | ||
122 | struct list_head link; | ||
123 | int type; | 105 | int type; |
124 | #define BNX2X_VFOP_FILTER_MAC 1 | 106 | #define BNX2X_VF_FILTER_MAC 1 |
125 | #define BNX2X_VFOP_FILTER_VLAN 2 | 107 | #define BNX2X_VF_FILTER_VLAN 2 |
126 | 108 | ||
127 | bool add; | 109 | bool add; |
128 | u8 *mac; | 110 | u8 *mac; |
129 | u16 vid; | 111 | u16 vid; |
130 | }; | 112 | }; |
131 | 113 | ||
132 | struct bnx2x_vfop_filters { | 114 | struct bnx2x_vf_mac_vlan_filters { |
133 | int add_cnt; | 115 | int count; |
134 | struct list_head head; | 116 | struct bnx2x_vf_mac_vlan_filter filters[]; |
135 | struct bnx2x_vfop_filter filters[]; | ||
136 | }; | ||
137 | |||
138 | /* transient list allocated, built and saved until its | ||
139 | * passed to the SP-VERBs layer. | ||
140 | */ | ||
141 | struct bnx2x_vfop_args_mcast { | ||
142 | int mc_num; | ||
143 | struct bnx2x_mcast_list_elem *mc; | ||
144 | }; | ||
145 | |||
146 | struct bnx2x_vfop_args_qctor { | ||
147 | int qid; | ||
148 | u16 sb_idx; | ||
149 | }; | ||
150 | |||
151 | struct bnx2x_vfop_args_qdtor { | ||
152 | int qid; | ||
153 | struct eth_context *cxt; | ||
154 | }; | ||
155 | |||
156 | struct bnx2x_vfop_args_defvlan { | ||
157 | int qid; | ||
158 | bool enable; | ||
159 | u16 vid; | ||
160 | u8 prio; | ||
161 | }; | ||
162 | |||
163 | struct bnx2x_vfop_args_qx { | ||
164 | int qid; | ||
165 | bool en_add; | ||
166 | }; | ||
167 | |||
168 | struct bnx2x_vfop_args_filters { | ||
169 | struct bnx2x_vfop_filters *multi_filter; | ||
170 | atomic_t *credit; /* non NULL means 'don't consume credit' */ | ||
171 | }; | ||
172 | |||
173 | struct bnx2x_vfop_args_tpa { | ||
174 | int qid; | ||
175 | dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF]; | ||
176 | }; | ||
177 | |||
178 | union bnx2x_vfop_args { | ||
179 | struct bnx2x_vfop_args_mcast mc_list; | ||
180 | struct bnx2x_vfop_args_qctor qctor; | ||
181 | struct bnx2x_vfop_args_qdtor qdtor; | ||
182 | struct bnx2x_vfop_args_defvlan defvlan; | ||
183 | struct bnx2x_vfop_args_qx qx; | ||
184 | struct bnx2x_vfop_args_filters filters; | ||
185 | struct bnx2x_vfop_args_tpa tpa; | ||
186 | }; | ||
187 | |||
188 | struct bnx2x_vfop { | ||
189 | struct list_head link; | ||
190 | int rc; /* return code */ | ||
191 | int state; /* next state */ | ||
192 | union bnx2x_vfop_args args; /* extra arguments */ | ||
193 | union bnx2x_vfop_params *op_p; /* ramrod params */ | ||
194 | |||
195 | /* state machine callbacks */ | ||
196 | vfop_handler_t transition; | ||
197 | vfop_handler_t done; | ||
198 | }; | 117 | }; |
199 | 118 | ||
200 | /* vf context */ | 119 | /* vf context */ |
@@ -214,15 +133,7 @@ struct bnx2x_virtf { | |||
214 | #define VF_ENABLED 2 /* VF Enabled */ | 133 | #define VF_ENABLED 2 /* VF Enabled */ |
215 | #define VF_RESET 3 /* VF FLR'd, pending cleanup */ | 134 | #define VF_RESET 3 /* VF FLR'd, pending cleanup */ |
216 | 135 | ||
217 | /* non 0 during flr cleanup */ | 136 | bool flr_clnup_stage; /* true during flr cleanup */ |
218 | u8 flr_clnup_stage; | ||
219 | #define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup' | ||
220 | * sans the end-wait | ||
221 | */ | ||
222 | #define VF_FLR_ACK 2 /* ACK flr notification */ | ||
223 | #define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW | ||
224 | * ~ final cleanup' end wait | ||
225 | */ | ||
226 | 137 | ||
227 | /* dma */ | 138 | /* dma */ |
228 | dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ | 139 | dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ |
@@ -286,11 +197,6 @@ struct bnx2x_virtf { | |||
286 | struct bnx2x_rss_config_obj rss_conf_obj; | 197 | struct bnx2x_rss_config_obj rss_conf_obj; |
287 | 198 | ||
288 | /* slow-path operations */ | 199 | /* slow-path operations */ |
289 | atomic_t op_in_progress; | ||
290 | int op_rc; | ||
291 | bool op_wait_blocking; | ||
292 | struct list_head op_list_head; | ||
293 | union bnx2x_vfop_params op_params; | ||
294 | struct mutex op_mutex; /* one vfop at a time mutex */ | 200 | struct mutex op_mutex; /* one vfop at a time mutex */ |
295 | enum channel_tlvs op_current; | 201 | enum channel_tlvs op_current; |
296 | }; | 202 | }; |
@@ -477,7 +383,6 @@ void bnx2x_iov_init_dq(struct bnx2x *bp); | |||
477 | void bnx2x_iov_init_dmae(struct bnx2x *bp); | 383 | void bnx2x_iov_init_dmae(struct bnx2x *bp); |
478 | void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, | 384 | void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, |
479 | struct bnx2x_queue_sp_obj **q_obj); | 385 | struct bnx2x_queue_sp_obj **q_obj); |
480 | void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid); | ||
481 | int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); | 386 | int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); |
482 | void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); | 387 | void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); |
483 | void bnx2x_iov_storm_stats_update(struct bnx2x *bp); | 388 | void bnx2x_iov_storm_stats_update(struct bnx2x *bp); |
@@ -497,163 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
497 | int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, | 402 | int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, |
498 | dma_addr_t *sb_map); | 403 | dma_addr_t *sb_map); |
499 | 404 | ||
500 | /* VFOP generic helpers */ | ||
501 | #define bnx2x_vfop_default(state) do { \ | ||
502 | BNX2X_ERR("Bad state %d\n", (state)); \ | ||
503 | vfop->rc = -EINVAL; \ | ||
504 | goto op_err; \ | ||
505 | } while (0) | ||
506 | |||
507 | enum { | ||
508 | VFOP_DONE, | ||
509 | VFOP_CONT, | ||
510 | VFOP_VERIFY_PEND, | ||
511 | }; | ||
512 | |||
513 | #define bnx2x_vfop_finalize(vf, rc, next) do { \ | ||
514 | if ((rc) < 0) \ | ||
515 | goto op_err; \ | ||
516 | else if ((rc) > 0) \ | ||
517 | goto op_pending; \ | ||
518 | else if ((next) == VFOP_DONE) \ | ||
519 | goto op_done; \ | ||
520 | else if ((next) == VFOP_VERIFY_PEND) \ | ||
521 | BNX2X_ERR("expected pending\n"); \ | ||
522 | else { \ | ||
523 | DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ | ||
524 | atomic_set(&vf->op_in_progress, 1); \ | ||
525 | bnx2x_schedule_iov_task(bp, \ | ||
526 | BNX2X_IOV_CONT_VFOP); \ | ||
527 | return; \ | ||
528 | } \ | ||
529 | } while (0) | ||
530 | |||
531 | #define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \ | ||
532 | do { \ | ||
533 | vfop->state = first_state; \ | ||
534 | vfop->op_p = &vf->op_params; \ | ||
535 | vfop->transition = trans_hndlr; \ | ||
536 | vfop->done = done_hndlr; \ | ||
537 | } while (0) | ||
538 | |||
539 | static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, | ||
540 | struct bnx2x_virtf *vf) | ||
541 | { | ||
542 | WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); | ||
543 | WARN_ON(list_empty(&vf->op_list_head)); | ||
544 | return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); | ||
545 | } | ||
546 | |||
547 | static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp, | ||
548 | struct bnx2x_virtf *vf) | ||
549 | { | ||
550 | struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL); | ||
551 | |||
552 | WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); | ||
553 | if (vfop) { | ||
554 | INIT_LIST_HEAD(&vfop->link); | ||
555 | list_add(&vfop->link, &vf->op_list_head); | ||
556 | } | ||
557 | return vfop; | ||
558 | } | ||
559 | |||
560 | static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf, | ||
561 | struct bnx2x_vfop *vfop) | ||
562 | { | ||
563 | /* rc < 0 - error, otherwise set to 0 */ | ||
564 | DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc); | ||
565 | if (vfop->rc >= 0) | ||
566 | vfop->rc = 0; | ||
567 | DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc); | ||
568 | |||
569 | /* unlink the current op context and propagate error code | ||
570 | * must be done before invoking the 'done()' handler | ||
571 | */ | ||
572 | WARN(!mutex_is_locked(&vf->op_mutex), | ||
573 | "about to access vf op linked list but mutex was not locked!"); | ||
574 | list_del(&vfop->link); | ||
575 | |||
576 | if (list_empty(&vf->op_list_head)) { | ||
577 | DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc); | ||
578 | vf->op_rc = vfop->rc; | ||
579 | DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", | ||
580 | vf->op_rc, vfop->rc); | ||
581 | } else { | ||
582 | struct bnx2x_vfop *cur_vfop; | ||
583 | |||
584 | DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc); | ||
585 | cur_vfop = bnx2x_vfop_cur(bp, vf); | ||
586 | cur_vfop->rc = vfop->rc; | ||
587 | DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", | ||
588 | vf->op_rc, vfop->rc); | ||
589 | } | ||
590 | |||
591 | /* invoke done handler */ | ||
592 | if (vfop->done) { | ||
593 | DP(BNX2X_MSG_IOV, "calling done handler\n"); | ||
594 | vfop->done(bp, vf); | ||
595 | } else { | ||
596 | /* there is no done handler for the operation to unlock | ||
597 | * the mutex. Must have gotten here from PF initiated VF RELEASE | ||
598 | */ | ||
599 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); | ||
600 | } | ||
601 | |||
602 | DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n", | ||
603 | vf->op_rc, vfop->rc); | ||
604 | |||
605 | /* if this is the last nested op reset the wait_blocking flag | ||
606 | * to release any blocking wrappers, only after 'done()' is invoked | ||
607 | */ | ||
608 | if (list_empty(&vf->op_list_head)) { | ||
609 | DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc); | ||
610 | vf->op_wait_blocking = false; | ||
611 | } | ||
612 | |||
613 | kfree(vfop); | ||
614 | } | ||
615 | |||
616 | static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp, | ||
617 | struct bnx2x_virtf *vf) | ||
618 | { | ||
619 | /* can take a while if any port is running */ | ||
620 | int cnt = 5000; | ||
621 | |||
622 | might_sleep(); | ||
623 | while (cnt--) { | ||
624 | if (vf->op_wait_blocking == false) { | ||
625 | #ifdef BNX2X_STOP_ON_ERROR | ||
626 | DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt); | ||
627 | #endif | ||
628 | return 0; | ||
629 | } | ||
630 | usleep_range(1000, 2000); | ||
631 | |||
632 | if (bp->panic) | ||
633 | return -EIO; | ||
634 | } | ||
635 | |||
636 | /* timeout! */ | ||
637 | #ifdef BNX2X_STOP_ON_ERROR | ||
638 | bnx2x_panic(); | ||
639 | #endif | ||
640 | |||
641 | return -EBUSY; | ||
642 | } | ||
643 | |||
644 | static inline int bnx2x_vfop_transition(struct bnx2x *bp, | ||
645 | struct bnx2x_virtf *vf, | ||
646 | vfop_handler_t transition, | ||
647 | bool block) | ||
648 | { | ||
649 | if (block) | ||
650 | vf->op_wait_blocking = true; | ||
651 | transition(bp, vf); | ||
652 | if (block) | ||
653 | return bnx2x_vfop_wait_blocking(bp, vf); | ||
654 | return 0; | ||
655 | } | ||
656 | |||
657 | /* VFOP queue construction helpers */ | 405 | /* VFOP queue construction helpers */ |
658 | void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, | 406 | void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, |
659 | struct bnx2x_queue_init_params *init_params, | 407 | struct bnx2x_queue_init_params *init_params, |
@@ -668,64 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
668 | void bnx2x_vfop_qctor_prep(struct bnx2x *bp, | 416 | void bnx2x_vfop_qctor_prep(struct bnx2x *bp, |
669 | struct bnx2x_virtf *vf, | 417 | struct bnx2x_virtf *vf, |
670 | struct bnx2x_vf_queue *q, | 418 | struct bnx2x_vf_queue *q, |
671 | struct bnx2x_vfop_qctor_params *p, | 419 | struct bnx2x_vf_queue_construct_params *p, |
672 | unsigned long q_type); | 420 | unsigned long q_type); |
673 | int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, | ||
674 | struct bnx2x_virtf *vf, | ||
675 | struct bnx2x_vfop_cmd *cmd, | ||
676 | struct bnx2x_vfop_filters *macs, | ||
677 | int qid, bool drv_only); | ||
678 | |||
679 | int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, | ||
680 | struct bnx2x_virtf *vf, | ||
681 | struct bnx2x_vfop_cmd *cmd, | ||
682 | struct bnx2x_vfop_filters *vlans, | ||
683 | int qid, bool drv_only); | ||
684 | |||
685 | int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, | ||
686 | struct bnx2x_virtf *vf, | ||
687 | struct bnx2x_vfop_cmd *cmd, | ||
688 | int qid); | ||
689 | |||
690 | int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, | ||
691 | struct bnx2x_virtf *vf, | ||
692 | struct bnx2x_vfop_cmd *cmd, | ||
693 | int qid); | ||
694 | |||
695 | int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, | ||
696 | struct bnx2x_virtf *vf, | ||
697 | struct bnx2x_vfop_cmd *cmd, | ||
698 | bnx2x_mac_addr_t *mcasts, | ||
699 | int mcast_num, bool drv_only); | ||
700 | |||
701 | int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, | ||
702 | struct bnx2x_virtf *vf, | ||
703 | struct bnx2x_vfop_cmd *cmd, | ||
704 | int qid, unsigned long accept_flags); | ||
705 | |||
706 | int bnx2x_vfop_close_cmd(struct bnx2x *bp, | ||
707 | struct bnx2x_virtf *vf, | ||
708 | struct bnx2x_vfop_cmd *cmd); | ||
709 | |||
710 | int bnx2x_vfop_release_cmd(struct bnx2x *bp, | ||
711 | struct bnx2x_virtf *vf, | ||
712 | struct bnx2x_vfop_cmd *cmd); | ||
713 | 421 | ||
714 | int bnx2x_vfop_rss_cmd(struct bnx2x *bp, | 422 | int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, |
715 | struct bnx2x_virtf *vf, | 423 | struct bnx2x_vf_mac_vlan_filters *filters, |
716 | struct bnx2x_vfop_cmd *cmd); | 424 | int qid, bool drv_only); |
425 | |||
426 | int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, | ||
427 | struct bnx2x_vf_queue_construct_params *qctor); | ||
428 | |||
429 | int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); | ||
430 | |||
431 | int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, | ||
432 | bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only); | ||
433 | |||
434 | int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, | ||
435 | int qid, unsigned long accept_flags); | ||
436 | |||
437 | int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); | ||
438 | |||
439 | int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); | ||
440 | |||
441 | int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, | ||
442 | struct bnx2x_config_rss_params *rss); | ||
717 | 443 | ||
718 | int bnx2x_vfop_tpa_cmd(struct bnx2x *bp, | 444 | int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, |
719 | struct bnx2x_virtf *vf, | 445 | struct vfpf_tpa_tlv *tlv, |
720 | struct bnx2x_vfop_cmd *cmd, | 446 | struct bnx2x_queue_update_tpa_params *params); |
721 | struct vfpf_tpa_tlv *tpa_tlv); | ||
722 | 447 | ||
723 | /* VF release ~ VF close + VF release-resources | 448 | /* VF release ~ VF close + VF release-resources |
724 | * | 449 | * |
725 | * Release is the ultimate SW shutdown and is called whenever an | 450 | * Release is the ultimate SW shutdown and is called whenever an |
726 | * irrecoverable error is encountered. | 451 | * irrecoverable error is encountered. |
727 | */ | 452 | */ |
728 | void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); | 453 | int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf); |
729 | int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); | 454 | int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); |
730 | u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); | 455 | u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); |
731 | 456 | ||
@@ -796,7 +521,6 @@ void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); | |||
796 | 521 | ||
797 | static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, | 522 | static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, |
798 | struct bnx2x_queue_sp_obj **q_obj) {} | 523 | struct bnx2x_queue_sp_obj **q_obj) {} |
799 | static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) {} | ||
800 | static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} | 524 | static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} |
801 | static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, | 525 | static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, |
802 | union event_ring_elem *elem) {return 1; } | 526 | union event_ring_elem *elem) {return 1; } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 63c95658ba60..fe3737e56d08 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -673,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) | |||
673 | 673 | ||
674 | out: | 674 | out: |
675 | bnx2x_vfpf_finalize(bp, &req->first_tlv); | 675 | bnx2x_vfpf_finalize(bp, &req->first_tlv); |
676 | |||
676 | return rc; | 677 | return rc; |
677 | } | 678 | } |
678 | 679 | ||
@@ -1048,7 +1049,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, | |||
1048 | } | 1049 | } |
1049 | 1050 | ||
1050 | static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, | 1051 | static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, |
1051 | struct bnx2x_virtf *vf) | 1052 | struct bnx2x_virtf *vf, |
1053 | int vf_rc) | ||
1052 | { | 1054 | { |
1053 | struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); | 1055 | struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); |
1054 | struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; | 1056 | struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; |
@@ -1060,7 +1062,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, | |||
1060 | DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", | 1062 | DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", |
1061 | mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); | 1063 | mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); |
1062 | 1064 | ||
1063 | resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); | 1065 | resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc); |
1064 | 1066 | ||
1065 | /* send response */ | 1067 | /* send response */ |
1066 | vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + | 1068 | vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + |
@@ -1108,14 +1110,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, | |||
1108 | return; | 1110 | return; |
1109 | 1111 | ||
1110 | mbx_error: | 1112 | mbx_error: |
1111 | bnx2x_vf_release(bp, vf, false); /* non blocking */ | 1113 | bnx2x_vf_release(bp, vf); |
1112 | } | 1114 | } |
1113 | 1115 | ||
1114 | static void bnx2x_vf_mbx_resp(struct bnx2x *bp, | 1116 | static void bnx2x_vf_mbx_resp(struct bnx2x *bp, |
1115 | struct bnx2x_virtf *vf) | 1117 | struct bnx2x_virtf *vf, |
1118 | int rc) | ||
1116 | { | 1119 | { |
1117 | bnx2x_vf_mbx_resp_single_tlv(bp, vf); | 1120 | bnx2x_vf_mbx_resp_single_tlv(bp, vf); |
1118 | bnx2x_vf_mbx_resp_send_msg(bp, vf); | 1121 | bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); |
1119 | } | 1122 | } |
1120 | 1123 | ||
1121 | static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, | 1124 | static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, |
@@ -1239,8 +1242,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1239 | sizeof(struct channel_list_end_tlv)); | 1242 | sizeof(struct channel_list_end_tlv)); |
1240 | 1243 | ||
1241 | /* send the response */ | 1244 | /* send the response */ |
1242 | vf->op_rc = vfop_status; | 1245 | bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); |
1243 | bnx2x_vf_mbx_resp_send_msg(bp, vf); | ||
1244 | } | 1246 | } |
1245 | 1247 | ||
1246 | static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, | 1248 | static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, |
@@ -1272,19 +1274,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1272 | struct bnx2x_vf_mbx *mbx) | 1274 | struct bnx2x_vf_mbx *mbx) |
1273 | { | 1275 | { |
1274 | struct vfpf_init_tlv *init = &mbx->msg->req.init; | 1276 | struct vfpf_init_tlv *init = &mbx->msg->req.init; |
1277 | int rc; | ||
1275 | 1278 | ||
1276 | /* record ghost addresses from vf message */ | 1279 | /* record ghost addresses from vf message */ |
1277 | vf->spq_map = init->spq_addr; | 1280 | vf->spq_map = init->spq_addr; |
1278 | vf->fw_stat_map = init->stats_addr; | 1281 | vf->fw_stat_map = init->stats_addr; |
1279 | vf->stats_stride = init->stats_stride; | 1282 | vf->stats_stride = init->stats_stride; |
1280 | vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); | 1283 | rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); |
1281 | 1284 | ||
1282 | /* set VF multiqueue statistics collection mode */ | 1285 | /* set VF multiqueue statistics collection mode */ |
1283 | if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) | 1286 | if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) |
1284 | vf->cfg_flags |= VF_CFG_STATS_COALESCE; | 1287 | vf->cfg_flags |= VF_CFG_STATS_COALESCE; |
1285 | 1288 | ||
1286 | /* response */ | 1289 | /* response */ |
1287 | bnx2x_vf_mbx_resp(bp, vf); | 1290 | bnx2x_vf_mbx_resp(bp, vf, rc); |
1288 | } | 1291 | } |
1289 | 1292 | ||
1290 | /* convert MBX queue-flags to standard SP queue-flags */ | 1293 | /* convert MBX queue-flags to standard SP queue-flags */ |
@@ -1319,16 +1322,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1319 | struct bnx2x_vf_mbx *mbx) | 1322 | struct bnx2x_vf_mbx *mbx) |
1320 | { | 1323 | { |
1321 | struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; | 1324 | struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; |
1322 | struct bnx2x_vfop_cmd cmd = { | 1325 | struct bnx2x_vf_queue_construct_params qctor; |
1323 | .done = bnx2x_vf_mbx_resp, | 1326 | int rc = 0; |
1324 | .block = false, | ||
1325 | }; | ||
1326 | 1327 | ||
1327 | /* verify vf_qid */ | 1328 | /* verify vf_qid */ |
1328 | if (setup_q->vf_qid >= vf_rxq_count(vf)) { | 1329 | if (setup_q->vf_qid >= vf_rxq_count(vf)) { |
1329 | BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", | 1330 | BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", |
1330 | setup_q->vf_qid, vf_rxq_count(vf)); | 1331 | setup_q->vf_qid, vf_rxq_count(vf)); |
1331 | vf->op_rc = -EINVAL; | 1332 | rc = -EINVAL; |
1332 | goto response; | 1333 | goto response; |
1333 | } | 1334 | } |
1334 | 1335 | ||
@@ -1346,9 +1347,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1346 | bnx2x_leading_vfq_init(bp, vf, q); | 1347 | bnx2x_leading_vfq_init(bp, vf, q); |
1347 | 1348 | ||
1348 | /* re-init the VF operation context */ | 1349 | /* re-init the VF operation context */ |
1349 | memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); | 1350 | memset(&qctor, 0 , |
1350 | setup_p = &vf->op_params.qctor.prep_qsetup; | 1351 | sizeof(struct bnx2x_vf_queue_construct_params)); |
1351 | init_p = &vf->op_params.qctor.qstate.params.init; | 1352 | setup_p = &qctor.prep_qsetup; |
1353 | init_p = &qctor.qstate.params.init; | ||
1352 | 1354 | ||
1353 | /* activate immediately */ | 1355 | /* activate immediately */ |
1354 | __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); | 1356 | __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); |
@@ -1434,44 +1436,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1434 | q->index, q->sb_idx); | 1436 | q->index, q->sb_idx); |
1435 | } | 1437 | } |
1436 | /* complete the preparations */ | 1438 | /* complete the preparations */ |
1437 | bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type); | 1439 | bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); |
1438 | 1440 | ||
1439 | vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index); | 1441 | rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); |
1440 | if (vf->op_rc) | 1442 | if (rc) |
1441 | goto response; | 1443 | goto response; |
1442 | return; | ||
1443 | } | 1444 | } |
1444 | response: | 1445 | response: |
1445 | bnx2x_vf_mbx_resp(bp, vf); | 1446 | bnx2x_vf_mbx_resp(bp, vf, rc); |
1446 | } | 1447 | } |
1447 | 1448 | ||
1448 | enum bnx2x_vfop_filters_state { | ||
1449 | BNX2X_VFOP_MBX_Q_FILTERS_MACS, | ||
1450 | BNX2X_VFOP_MBX_Q_FILTERS_VLANS, | ||
1451 | BNX2X_VFOP_MBX_Q_FILTERS_RXMODE, | ||
1452 | BNX2X_VFOP_MBX_Q_FILTERS_MCAST, | ||
1453 | BNX2X_VFOP_MBX_Q_FILTERS_DONE | ||
1454 | }; | ||
1455 | |||
1456 | static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, | 1449 | static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, |
1457 | struct bnx2x_virtf *vf, | 1450 | struct bnx2x_virtf *vf, |
1458 | struct vfpf_set_q_filters_tlv *tlv, | 1451 | struct vfpf_set_q_filters_tlv *tlv, |
1459 | struct bnx2x_vfop_filters **pfl, | 1452 | struct bnx2x_vf_mac_vlan_filters **pfl, |
1460 | u32 type_flag) | 1453 | u32 type_flag) |
1461 | { | 1454 | { |
1462 | int i, j; | 1455 | int i, j; |
1463 | struct bnx2x_vfop_filters *fl = NULL; | 1456 | struct bnx2x_vf_mac_vlan_filters *fl = NULL; |
1464 | size_t fsz; | 1457 | size_t fsz; |
1465 | 1458 | ||
1466 | fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) + | 1459 | fsz = tlv->n_mac_vlan_filters * |
1467 | sizeof(struct bnx2x_vfop_filters); | 1460 | sizeof(struct bnx2x_vf_mac_vlan_filter) + |
1461 | sizeof(struct bnx2x_vf_mac_vlan_filters); | ||
1468 | 1462 | ||
1469 | fl = kzalloc(fsz, GFP_KERNEL); | 1463 | fl = kzalloc(fsz, GFP_KERNEL); |
1470 | if (!fl) | 1464 | if (!fl) |
1471 | return -ENOMEM; | 1465 | return -ENOMEM; |
1472 | 1466 | ||
1473 | INIT_LIST_HEAD(&fl->head); | ||
1474 | |||
1475 | for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { | 1467 | for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { |
1476 | struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; | 1468 | struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; |
1477 | 1469 | ||
@@ -1479,17 +1471,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, | |||
1479 | continue; | 1471 | continue; |
1480 | if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { | 1472 | if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { |
1481 | fl->filters[j].mac = msg_filter->mac; | 1473 | fl->filters[j].mac = msg_filter->mac; |
1482 | fl->filters[j].type = BNX2X_VFOP_FILTER_MAC; | 1474 | fl->filters[j].type = BNX2X_VF_FILTER_MAC; |
1483 | } else { | 1475 | } else { |
1484 | fl->filters[j].vid = msg_filter->vlan_tag; | 1476 | fl->filters[j].vid = msg_filter->vlan_tag; |
1485 | fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN; | 1477 | fl->filters[j].type = BNX2X_VF_FILTER_VLAN; |
1486 | } | 1478 | } |
1487 | fl->filters[j].add = | 1479 | fl->filters[j].add = |
1488 | (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? | 1480 | (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? |
1489 | true : false; | 1481 | true : false; |
1490 | list_add_tail(&fl->filters[j++].link, &fl->head); | 1482 | fl->count++; |
1491 | } | 1483 | } |
1492 | if (list_empty(&fl->head)) | 1484 | if (!fl->count) |
1493 | kfree(fl); | 1485 | kfree(fl); |
1494 | else | 1486 | else |
1495 | *pfl = fl; | 1487 | *pfl = fl; |
@@ -1529,168 +1521,97 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, | |||
1529 | #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID | 1521 | #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID |
1530 | #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID | 1522 | #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID |
1531 | 1523 | ||
1532 | static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) | 1524 | static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) |
1533 | { | 1525 | { |
1534 | int rc; | 1526 | int rc = 0; |
1535 | 1527 | ||
1536 | struct vfpf_set_q_filters_tlv *msg = | 1528 | struct vfpf_set_q_filters_tlv *msg = |
1537 | &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; | 1529 | &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; |
1538 | 1530 | ||
1539 | struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); | 1531 | /* check for any mac/vlan changes */ |
1540 | enum bnx2x_vfop_filters_state state = vfop->state; | 1532 | if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { |
1541 | 1533 | /* build mac list */ | |
1542 | struct bnx2x_vfop_cmd cmd = { | 1534 | struct bnx2x_vf_mac_vlan_filters *fl = NULL; |
1543 | .done = bnx2x_vfop_mbx_qfilters, | ||
1544 | .block = false, | ||
1545 | }; | ||
1546 | 1535 | ||
1547 | DP(BNX2X_MSG_IOV, "STATE: %d\n", state); | 1536 | rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, |
1548 | 1537 | VFPF_MAC_FILTER); | |
1549 | if (vfop->rc < 0) | 1538 | if (rc) |
1550 | goto op_err; | 1539 | goto op_err; |
1551 | |||
1552 | switch (state) { | ||
1553 | case BNX2X_VFOP_MBX_Q_FILTERS_MACS: | ||
1554 | /* next state */ | ||
1555 | vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS; | ||
1556 | 1540 | ||
1557 | /* check for any vlan/mac changes */ | 1541 | if (fl) { |
1558 | if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { | ||
1559 | /* build mac list */ | ||
1560 | struct bnx2x_vfop_filters *fl = NULL; | ||
1561 | 1542 | ||
1562 | vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, | 1543 | /* set mac list */ |
1563 | VFPF_MAC_FILTER); | 1544 | rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, |
1564 | if (vfop->rc) | 1545 | msg->vf_qid, |
1546 | false); | ||
1547 | if (rc) | ||
1565 | goto op_err; | 1548 | goto op_err; |
1566 | |||
1567 | if (fl) { | ||
1568 | /* set mac list */ | ||
1569 | rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl, | ||
1570 | msg->vf_qid, | ||
1571 | false); | ||
1572 | if (rc) { | ||
1573 | vfop->rc = rc; | ||
1574 | goto op_err; | ||
1575 | } | ||
1576 | return; | ||
1577 | } | ||
1578 | } | 1549 | } |
1579 | /* fall through */ | ||
1580 | |||
1581 | case BNX2X_VFOP_MBX_Q_FILTERS_VLANS: | ||
1582 | /* next state */ | ||
1583 | vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE; | ||
1584 | 1550 | ||
1585 | /* check for any vlan/mac changes */ | 1551 | /* build vlan list */ |
1586 | if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { | 1552 | fl = NULL; |
1587 | /* build vlan list */ | ||
1588 | struct bnx2x_vfop_filters *fl = NULL; | ||
1589 | 1553 | ||
1590 | vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, | 1554 | rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, |
1591 | VFPF_VLAN_FILTER); | 1555 | VFPF_VLAN_FILTER); |
1592 | if (vfop->rc) | 1556 | if (rc) |
1557 | goto op_err; | ||
1558 | |||
1559 | if (fl) { | ||
1560 | /* set vlan list */ | ||
1561 | rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, | ||
1562 | msg->vf_qid, | ||
1563 | false); | ||
1564 | if (rc) | ||
1593 | goto op_err; | 1565 | goto op_err; |
1594 | |||
1595 | if (fl) { | ||
1596 | /* set vlan list */ | ||
1597 | rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl, | ||
1598 | msg->vf_qid, | ||
1599 | false); | ||
1600 | if (rc) { | ||
1601 | vfop->rc = rc; | ||
1602 | goto op_err; | ||
1603 | } | ||
1604 | return; | ||
1605 | } | ||
1606 | } | 1566 | } |
1607 | /* fall through */ | 1567 | } |
1608 | 1568 | ||
1609 | case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE: | 1569 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { |
1610 | /* next state */ | 1570 | unsigned long accept = 0; |
1611 | vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST; | 1571 | struct pf_vf_bulletin_content *bulletin = |
1572 | BP_VF_BULLETIN(bp, vf->index); | ||
1612 | 1573 | ||
1613 | if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { | 1574 | /* covert VF-PF if mask to bnx2x accept flags */ |
1614 | unsigned long accept = 0; | 1575 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) |
1615 | struct pf_vf_bulletin_content *bulletin = | 1576 | __set_bit(BNX2X_ACCEPT_UNICAST, &accept); |
1616 | BP_VF_BULLETIN(bp, vf->index); | ||
1617 | 1577 | ||
1618 | /* covert VF-PF if mask to bnx2x accept flags */ | 1578 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) |
1619 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) | 1579 | __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); |
1620 | __set_bit(BNX2X_ACCEPT_UNICAST, &accept); | ||
1621 | 1580 | ||
1622 | if (msg->rx_mask & | 1581 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) |
1623 | VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) | 1582 | __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); |
1624 | __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); | ||
1625 | 1583 | ||
1626 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) | 1584 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) |
1627 | __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); | 1585 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); |
1628 | 1586 | ||
1629 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) | 1587 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) |
1630 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); | 1588 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); |
1631 | 1589 | ||
1632 | if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) | 1590 | /* A packet arriving the vf's mac should be accepted |
1633 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); | 1591 | * with any vlan, unless a vlan has already been |
1634 | 1592 | * configured. | |
1635 | /* A packet arriving the vf's mac should be accepted | 1593 | */ |
1636 | * with any vlan, unless a vlan has already been | 1594 | if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) |
1637 | * configured. | 1595 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); |
1638 | */ | ||
1639 | if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) | ||
1640 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); | ||
1641 | |||
1642 | /* set rx-mode */ | ||
1643 | rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, | ||
1644 | msg->vf_qid, accept); | ||
1645 | if (rc) { | ||
1646 | vfop->rc = rc; | ||
1647 | goto op_err; | ||
1648 | } | ||
1649 | return; | ||
1650 | } | ||
1651 | /* fall through */ | ||
1652 | |||
1653 | case BNX2X_VFOP_MBX_Q_FILTERS_MCAST: | ||
1654 | /* next state */ | ||
1655 | vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE; | ||
1656 | |||
1657 | if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { | ||
1658 | /* set mcasts */ | ||
1659 | rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast, | ||
1660 | msg->n_multicast, false); | ||
1661 | if (rc) { | ||
1662 | vfop->rc = rc; | ||
1663 | goto op_err; | ||
1664 | } | ||
1665 | return; | ||
1666 | } | ||
1667 | /* fall through */ | ||
1668 | op_done: | ||
1669 | case BNX2X_VFOP_MBX_Q_FILTERS_DONE: | ||
1670 | bnx2x_vfop_end(bp, vf, vfop); | ||
1671 | return; | ||
1672 | op_err: | ||
1673 | BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", | ||
1674 | vf->abs_vfid, msg->vf_qid, vfop->rc); | ||
1675 | goto op_done; | ||
1676 | 1596 | ||
1677 | default: | 1597 | /* set rx-mode */ |
1678 | bnx2x_vfop_default(state); | 1598 | rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); |
1599 | if (rc) | ||
1600 | goto op_err; | ||
1679 | } | 1601 | } |
1680 | } | ||
1681 | 1602 | ||
1682 | static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, | 1603 | if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { |
1683 | struct bnx2x_virtf *vf, | 1604 | /* set mcasts */ |
1684 | struct bnx2x_vfop_cmd *cmd) | 1605 | rc = bnx2x_vf_mcast(bp, vf, msg->multicast, |
1685 | { | 1606 | msg->n_multicast, false); |
1686 | struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); | 1607 | if (rc) |
1687 | if (vfop) { | 1608 | goto op_err; |
1688 | bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS, | ||
1689 | bnx2x_vfop_mbx_qfilters, cmd->done); | ||
1690 | return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters, | ||
1691 | cmd->block); | ||
1692 | } | 1609 | } |
1693 | return -ENOMEM; | 1610 | op_err: |
1611 | if (rc) | ||
1612 | BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", | ||
1613 | vf->abs_vfid, msg->vf_qid, rc); | ||
1614 | return rc; | ||
1694 | } | 1615 | } |
1695 | 1616 | ||
1696 | static int bnx2x_filters_validate_mac(struct bnx2x *bp, | 1617 | static int bnx2x_filters_validate_mac(struct bnx2x *bp, |
@@ -1710,7 +1631,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp, | |||
1710 | if (filters->n_mac_vlan_filters > 1) { | 1631 | if (filters->n_mac_vlan_filters > 1) { |
1711 | BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", | 1632 | BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", |
1712 | vf->abs_vfid); | 1633 | vf->abs_vfid); |
1713 | vf->op_rc = -EPERM; | ||
1714 | rc = -EPERM; | 1634 | rc = -EPERM; |
1715 | goto response; | 1635 | goto response; |
1716 | } | 1636 | } |
@@ -1721,7 +1641,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp, | |||
1721 | BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", | 1641 | BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", |
1722 | vf->abs_vfid); | 1642 | vf->abs_vfid); |
1723 | 1643 | ||
1724 | vf->op_rc = -EPERM; | ||
1725 | rc = -EPERM; | 1644 | rc = -EPERM; |
1726 | goto response; | 1645 | goto response; |
1727 | } | 1646 | } |
@@ -1748,7 +1667,6 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp, | |||
1748 | VFPF_Q_FILTER_VLAN_TAG_VALID) { | 1667 | VFPF_Q_FILTER_VLAN_TAG_VALID) { |
1749 | BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", | 1668 | BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", |
1750 | vf->abs_vfid); | 1669 | vf->abs_vfid); |
1751 | vf->op_rc = -EPERM; | ||
1752 | rc = -EPERM; | 1670 | rc = -EPERM; |
1753 | goto response; | 1671 | goto response; |
1754 | } | 1672 | } |
@@ -1770,15 +1688,14 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, | |||
1770 | struct bnx2x_vf_mbx *mbx) | 1688 | struct bnx2x_vf_mbx *mbx) |
1771 | { | 1689 | { |
1772 | struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; | 1690 | struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; |
1773 | struct bnx2x_vfop_cmd cmd = { | 1691 | int rc; |
1774 | .done = bnx2x_vf_mbx_resp, | ||
1775 | .block = false, | ||
1776 | }; | ||
1777 | 1692 | ||
1778 | if (bnx2x_filters_validate_mac(bp, vf, filters)) | 1693 | rc = bnx2x_filters_validate_mac(bp, vf, filters); |
1694 | if (rc) | ||
1779 | goto response; | 1695 | goto response; |
1780 | 1696 | ||
1781 | if (bnx2x_filters_validate_vlan(bp, vf, filters)) | 1697 | rc = bnx2x_filters_validate_vlan(bp, vf, filters); |
1698 | if (rc) | ||
1782 | goto response; | 1699 | goto response; |
1783 | 1700 | ||
1784 | DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", | 1701 | DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", |
@@ -1788,125 +1705,105 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, | |||
1788 | /* print q_filter message */ | 1705 | /* print q_filter message */ |
1789 | bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); | 1706 | bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); |
1790 | 1707 | ||
1791 | vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd); | 1708 | rc = bnx2x_vf_mbx_qfilters(bp, vf); |
1792 | if (vf->op_rc) | ||
1793 | goto response; | ||
1794 | return; | ||
1795 | |||
1796 | response: | 1709 | response: |
1797 | bnx2x_vf_mbx_resp(bp, vf); | 1710 | bnx2x_vf_mbx_resp(bp, vf, rc); |
1798 | } | 1711 | } |
1799 | 1712 | ||
1800 | static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, | 1713 | static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, |
1801 | struct bnx2x_vf_mbx *mbx) | 1714 | struct bnx2x_vf_mbx *mbx) |
1802 | { | 1715 | { |
1803 | int qid = mbx->msg->req.q_op.vf_qid; | 1716 | int qid = mbx->msg->req.q_op.vf_qid; |
1804 | struct bnx2x_vfop_cmd cmd = { | 1717 | int rc; |
1805 | .done = bnx2x_vf_mbx_resp, | ||
1806 | .block = false, | ||
1807 | }; | ||
1808 | 1718 | ||
1809 | DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", | 1719 | DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", |
1810 | vf->abs_vfid, qid); | 1720 | vf->abs_vfid, qid); |
1811 | 1721 | ||
1812 | vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid); | 1722 | rc = bnx2x_vf_queue_teardown(bp, vf, qid); |
1813 | if (vf->op_rc) | 1723 | bnx2x_vf_mbx_resp(bp, vf, rc); |
1814 | bnx2x_vf_mbx_resp(bp, vf); | ||
1815 | } | 1724 | } |
1816 | 1725 | ||
1817 | static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, | 1726 | static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, |
1818 | struct bnx2x_vf_mbx *mbx) | 1727 | struct bnx2x_vf_mbx *mbx) |
1819 | { | 1728 | { |
1820 | struct bnx2x_vfop_cmd cmd = { | 1729 | int rc; |
1821 | .done = bnx2x_vf_mbx_resp, | ||
1822 | .block = false, | ||
1823 | }; | ||
1824 | 1730 | ||
1825 | DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); | 1731 | DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); |
1826 | 1732 | ||
1827 | vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); | 1733 | rc = bnx2x_vf_close(bp, vf); |
1828 | if (vf->op_rc) | 1734 | bnx2x_vf_mbx_resp(bp, vf, rc); |
1829 | bnx2x_vf_mbx_resp(bp, vf); | ||
1830 | } | 1735 | } |
1831 | 1736 | ||
1832 | static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, | 1737 | static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, |
1833 | struct bnx2x_vf_mbx *mbx) | 1738 | struct bnx2x_vf_mbx *mbx) |
1834 | { | 1739 | { |
1835 | struct bnx2x_vfop_cmd cmd = { | 1740 | int rc; |
1836 | .done = bnx2x_vf_mbx_resp, | ||
1837 | .block = false, | ||
1838 | }; | ||
1839 | 1741 | ||
1840 | DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); | 1742 | DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); |
1841 | 1743 | ||
1842 | vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); | 1744 | rc = bnx2x_vf_free(bp, vf); |
1843 | if (vf->op_rc) | 1745 | bnx2x_vf_mbx_resp(bp, vf, rc); |
1844 | bnx2x_vf_mbx_resp(bp, vf); | ||
1845 | } | 1746 | } |
1846 | 1747 | ||
1847 | static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, | 1748 | static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, |
1848 | struct bnx2x_vf_mbx *mbx) | 1749 | struct bnx2x_vf_mbx *mbx) |
1849 | { | 1750 | { |
1850 | struct bnx2x_vfop_cmd cmd = { | 1751 | struct bnx2x_config_rss_params rss; |
1851 | .done = bnx2x_vf_mbx_resp, | ||
1852 | .block = false, | ||
1853 | }; | ||
1854 | struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss; | ||
1855 | struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; | 1752 | struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; |
1753 | int rc = 0; | ||
1856 | 1754 | ||
1857 | if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || | 1755 | if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || |
1858 | rss_tlv->rss_key_size != T_ETH_RSS_KEY) { | 1756 | rss_tlv->rss_key_size != T_ETH_RSS_KEY) { |
1859 | BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", | 1757 | BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", |
1860 | vf->index); | 1758 | vf->index); |
1861 | vf->op_rc = -EINVAL; | 1759 | rc = -EINVAL; |
1862 | goto mbx_resp; | 1760 | goto mbx_resp; |
1863 | } | 1761 | } |
1864 | 1762 | ||
1763 | memset(&rss, 0, sizeof(struct bnx2x_config_rss_params)); | ||
1764 | |||
1865 | /* set vfop params according to rss tlv */ | 1765 | /* set vfop params according to rss tlv */ |
1866 | memcpy(vf_op_params->ind_table, rss_tlv->ind_table, | 1766 | memcpy(rss.ind_table, rss_tlv->ind_table, |
1867 | T_ETH_INDIRECTION_TABLE_SIZE); | 1767 | T_ETH_INDIRECTION_TABLE_SIZE); |
1868 | memcpy(vf_op_params->rss_key, rss_tlv->rss_key, | 1768 | memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key)); |
1869 | sizeof(rss_tlv->rss_key)); | 1769 | rss.rss_obj = &vf->rss_conf_obj; |
1870 | vf_op_params->rss_obj = &vf->rss_conf_obj; | 1770 | rss.rss_result_mask = rss_tlv->rss_result_mask; |
1871 | vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; | ||
1872 | 1771 | ||
1873 | /* flags handled individually for backward/forward compatability */ | 1772 | /* flags handled individually for backward/forward compatability */ |
1874 | vf_op_params->rss_flags = 0; | 1773 | rss.rss_flags = 0; |
1875 | vf_op_params->ramrod_flags = 0; | 1774 | rss.ramrod_flags = 0; |
1876 | 1775 | ||
1877 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) | 1776 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) |
1878 | __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); | 1777 | __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags); |
1879 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) | 1778 | if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) |
1880 | __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); | 1779 | __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags); |
1881 | if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) | 1780 | if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) |
1882 | __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); | 1781 | __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags); |
1883 | if (rss_tlv->rss_flags & VFPF_RSS_IPV4) | 1782 | if (rss_tlv->rss_flags & VFPF_RSS_IPV4) |
1884 | __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); | 1783 | __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags); |
1885 | if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) | 1784 | if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) |
1886 | __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); | 1785 | __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags); |
1887 | if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) | 1786 | if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) |
1888 | __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); | 1787 | __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags); |
1889 | if (rss_tlv->rss_flags & VFPF_RSS_IPV6) | 1788 | if (rss_tlv->rss_flags & VFPF_RSS_IPV6) |
1890 | __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); | 1789 | __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags); |
1891 | if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) | 1790 | if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) |
1892 | __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); | 1791 | __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags); |
1893 | if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) | 1792 | if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) |
1894 | __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); | 1793 | __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags); |
1895 | 1794 | ||
1896 | if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && | 1795 | if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && |
1897 | rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || | 1796 | rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || |
1898 | (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && | 1797 | (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && |
1899 | rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { | 1798 | rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { |
1900 | BNX2X_ERR("about to hit a FW assert. aborting...\n"); | 1799 | BNX2X_ERR("about to hit a FW assert. aborting...\n"); |
1901 | vf->op_rc = -EINVAL; | 1800 | rc = -EINVAL; |
1902 | goto mbx_resp; | 1801 | goto mbx_resp; |
1903 | } | 1802 | } |
1904 | 1803 | ||
1905 | vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); | 1804 | rc = bnx2x_vf_rss_update(bp, vf, &rss); |
1906 | |||
1907 | mbx_resp: | 1805 | mbx_resp: |
1908 | if (vf->op_rc) | 1806 | bnx2x_vf_mbx_resp(bp, vf, rc); |
1909 | bnx2x_vf_mbx_resp(bp, vf); | ||
1910 | } | 1807 | } |
1911 | 1808 | ||
1912 | static int bnx2x_validate_tpa_params(struct bnx2x *bp, | 1809 | static int bnx2x_validate_tpa_params(struct bnx2x *bp, |
@@ -1935,47 +1832,42 @@ static int bnx2x_validate_tpa_params(struct bnx2x *bp, | |||
1935 | static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, | 1832 | static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, |
1936 | struct bnx2x_vf_mbx *mbx) | 1833 | struct bnx2x_vf_mbx *mbx) |
1937 | { | 1834 | { |
1938 | struct bnx2x_vfop_cmd cmd = { | 1835 | struct bnx2x_queue_update_tpa_params vf_op_params; |
1939 | .done = bnx2x_vf_mbx_resp, | ||
1940 | .block = false, | ||
1941 | }; | ||
1942 | struct bnx2x_queue_update_tpa_params *vf_op_params = | ||
1943 | &vf->op_params.qstate.params.update_tpa; | ||
1944 | struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; | 1836 | struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; |
1837 | int rc = 0; | ||
1945 | 1838 | ||
1946 | memset(vf_op_params, 0, sizeof(*vf_op_params)); | 1839 | memset(&vf_op_params, 0, sizeof(vf_op_params)); |
1947 | 1840 | ||
1948 | if (bnx2x_validate_tpa_params(bp, tpa_tlv)) | 1841 | if (bnx2x_validate_tpa_params(bp, tpa_tlv)) |
1949 | goto mbx_resp; | 1842 | goto mbx_resp; |
1950 | 1843 | ||
1951 | vf_op_params->complete_on_both_clients = | 1844 | vf_op_params.complete_on_both_clients = |
1952 | tpa_tlv->tpa_client_info.complete_on_both_clients; | 1845 | tpa_tlv->tpa_client_info.complete_on_both_clients; |
1953 | vf_op_params->dont_verify_thr = | 1846 | vf_op_params.dont_verify_thr = |
1954 | tpa_tlv->tpa_client_info.dont_verify_thr; | 1847 | tpa_tlv->tpa_client_info.dont_verify_thr; |
1955 | vf_op_params->max_agg_sz = | 1848 | vf_op_params.max_agg_sz = |
1956 | tpa_tlv->tpa_client_info.max_agg_size; | 1849 | tpa_tlv->tpa_client_info.max_agg_size; |
1957 | vf_op_params->max_sges_pkt = | 1850 | vf_op_params.max_sges_pkt = |
1958 | tpa_tlv->tpa_client_info.max_sges_for_packet; | 1851 | tpa_tlv->tpa_client_info.max_sges_for_packet; |
1959 | vf_op_params->max_tpa_queues = | 1852 | vf_op_params.max_tpa_queues = |
1960 | tpa_tlv->tpa_client_info.max_tpa_queues; | 1853 | tpa_tlv->tpa_client_info.max_tpa_queues; |
1961 | vf_op_params->sge_buff_sz = | 1854 | vf_op_params.sge_buff_sz = |
1962 | tpa_tlv->tpa_client_info.sge_buff_size; | 1855 | tpa_tlv->tpa_client_info.sge_buff_size; |
1963 | vf_op_params->sge_pause_thr_high = | 1856 | vf_op_params.sge_pause_thr_high = |
1964 | tpa_tlv->tpa_client_info.sge_pause_thr_high; | 1857 | tpa_tlv->tpa_client_info.sge_pause_thr_high; |
1965 | vf_op_params->sge_pause_thr_low = | 1858 | vf_op_params.sge_pause_thr_low = |
1966 | tpa_tlv->tpa_client_info.sge_pause_thr_low; | 1859 | tpa_tlv->tpa_client_info.sge_pause_thr_low; |
1967 | vf_op_params->tpa_mode = | 1860 | vf_op_params.tpa_mode = |
1968 | tpa_tlv->tpa_client_info.tpa_mode; | 1861 | tpa_tlv->tpa_client_info.tpa_mode; |
1969 | vf_op_params->update_ipv4 = | 1862 | vf_op_params.update_ipv4 = |
1970 | tpa_tlv->tpa_client_info.update_ipv4; | 1863 | tpa_tlv->tpa_client_info.update_ipv4; |
1971 | vf_op_params->update_ipv6 = | 1864 | vf_op_params.update_ipv6 = |
1972 | tpa_tlv->tpa_client_info.update_ipv6; | 1865 | tpa_tlv->tpa_client_info.update_ipv6; |
1973 | 1866 | ||
1974 | vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv); | 1867 | rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); |
1975 | 1868 | ||
1976 | mbx_resp: | 1869 | mbx_resp: |
1977 | if (vf->op_rc) | 1870 | bnx2x_vf_mbx_resp(bp, vf, rc); |
1978 | bnx2x_vf_mbx_resp(bp, vf); | ||
1979 | } | 1871 | } |
1980 | 1872 | ||
1981 | /* dispatch request */ | 1873 | /* dispatch request */ |
@@ -2039,11 +1931,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
2039 | 1931 | ||
2040 | /* can we respond to VF (do we have an address for it?) */ | 1932 | /* can we respond to VF (do we have an address for it?) */ |
2041 | if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { | 1933 | if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { |
2042 | /* mbx_resp uses the op_rc of the VF */ | ||
2043 | vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; | ||
2044 | |||
2045 | /* notify the VF that we do not support this request */ | 1934 | /* notify the VF that we do not support this request */ |
2046 | bnx2x_vf_mbx_resp(bp, vf); | 1935 | bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); |
2047 | } else { | 1936 | } else { |
2048 | /* can't send a response since this VF is unknown to us | 1937 | /* can't send a response since this VF is unknown to us |
2049 | * just ack the FW to release the mailbox and unlock | 1938 | * just ack the FW to release the mailbox and unlock |
@@ -2123,7 +2012,7 @@ void bnx2x_vf_mbx(struct bnx2x *bp) | |||
2123 | if (rc) { | 2012 | if (rc) { |
2124 | BNX2X_ERR("Failed to copy request VF %d\n", | 2013 | BNX2X_ERR("Failed to copy request VF %d\n", |
2125 | vf->abs_vfid); | 2014 | vf->abs_vfid); |
2126 | bnx2x_vf_release(bp, vf, false); /* non blocking */ | 2015 | bnx2x_vf_release(bp, vf); |
2127 | return; | 2016 | return; |
2128 | } | 2017 | } |
2129 | 2018 | ||