aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYishai Hadas <yishaih@mellanox.com>2015-01-25 09:59:39 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-25 17:43:14 -0500
commitf5aef5aa35063f2b45c3605871cd525d0cb7fb7a (patch)
tree2e2d0f3e4e7a272cfe536a22532afb9124ef47e5
parentf6bc11e42646e661e699a5593cbd1e9dba7191d0 (diff)
net/mlx4_core: Activate reset flow upon fatal command cases
We activate reset flow upon command fatal errors, when the device enters an erroneous state, and must be reset. The cases below are assumed to be fatal: FW command timed-out, an error from FW on closing commands, pci is offline when posting/pending a command. In those cases we place the device into an error state: chip is reset, pending commands are awakened and completed immediately. Subsequent commands will return immediately. The return code in the above cases will depend on the command. Commands which free and close resources will return success (because the chip was reset, so callers may safely free their kernel resources). Other commands will return -EIO. Since the device's state was marked as error, the catas poller will detect this and restart the device's software stack (as is done when a FW internal error is directly detected). The device state is protected by a persistent mutex lives on its mlx4_dev, as such no need any more for the hcr_mutex which is removed. Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--include/linux/mlx4/cmd.h1
5 files changed, 153 insertions, 27 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 588d6b5e1211..63f14ffcc906 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -42,8 +42,8 @@ enum {
42 42
43 43
44 44
45static int internal_err_reset = 1; 45int mlx4_internal_err_reset = 1;
46module_param(internal_err_reset, int, 0644); 46module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644);
47MODULE_PARM_DESC(internal_err_reset, 47MODULE_PARM_DESC(internal_err_reset,
48 "Reset device on internal errors if non-zero" 48 "Reset device on internal errors if non-zero"
49 " (default 1, in SRIOV mode default is 0)"); 49 " (default 1, in SRIOV mode default is 0)");
@@ -92,7 +92,7 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
92 int err; 92 int err;
93 struct mlx4_dev *dev; 93 struct mlx4_dev *dev;
94 94
95 if (!internal_err_reset) 95 if (!mlx4_internal_err_reset)
96 return; 96 return;
97 97
98 mutex_lock(&persist->device_state_mutex); 98 mutex_lock(&persist->device_state_mutex);
@@ -110,6 +110,7 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
110 110
111 /* At that step HW was already reset, now notify clients */ 111 /* At that step HW was already reset, now notify clients */
112 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); 112 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
113 mlx4_cmd_wake_completions(dev);
113 return; 114 return;
114 115
115out: 116out:
@@ -157,7 +158,7 @@ static void poll_catas(unsigned long dev_ptr)
157 return; 158 return;
158 159
159internal_err: 160internal_err:
160 if (internal_err_reset) 161 if (mlx4_internal_err_reset)
161 queue_work(dev->persist->catas_wq, &dev->persist->catas_work); 162 queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
162} 163}
163 164
@@ -177,7 +178,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
177 178
178 /*If we are in SRIOV the default of the module param must be 0*/ 179 /*If we are in SRIOV the default of the module param must be 0*/
179 if (mlx4_is_mfunc(dev)) 180 if (mlx4_is_mfunc(dev))
180 internal_err_reset = 0; 181 mlx4_internal_err_reset = 0;
181 182
182 INIT_LIST_HEAD(&priv->catas_err.list); 183 INIT_LIST_HEAD(&priv->catas_err.list);
183 init_timer(&priv->catas_err.timer); 184 init_timer(&priv->catas_err.timer);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 7cd90e6a4272..3895b2b5fc92 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -182,6 +182,72 @@ static u8 mlx4_errno_to_status(int errno)
182 } 182 }
183} 183}
184 184
185static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
186 u8 op_modifier)
187{
188 switch (op) {
189 case MLX4_CMD_UNMAP_ICM:
190 case MLX4_CMD_UNMAP_ICM_AUX:
191 case MLX4_CMD_UNMAP_FA:
192 case MLX4_CMD_2RST_QP:
193 case MLX4_CMD_HW2SW_EQ:
194 case MLX4_CMD_HW2SW_CQ:
195 case MLX4_CMD_HW2SW_SRQ:
196 case MLX4_CMD_HW2SW_MPT:
197 case MLX4_CMD_CLOSE_HCA:
198 case MLX4_QP_FLOW_STEERING_DETACH:
199 case MLX4_CMD_FREE_RES:
200 case MLX4_CMD_CLOSE_PORT:
201 return CMD_STAT_OK;
202
203 case MLX4_CMD_QP_ATTACH:
204 /* On Detach case return success */
205 if (op_modifier == 0)
206 return CMD_STAT_OK;
207 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
208
209 default:
210 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
211 }
212}
213
214static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
215{
216 /* Any error during the closing commands below is considered fatal */
217 if (op == MLX4_CMD_CLOSE_HCA ||
218 op == MLX4_CMD_HW2SW_EQ ||
219 op == MLX4_CMD_HW2SW_CQ ||
220 op == MLX4_CMD_2RST_QP ||
221 op == MLX4_CMD_HW2SW_SRQ ||
222 op == MLX4_CMD_SYNC_TPT ||
223 op == MLX4_CMD_UNMAP_ICM ||
224 op == MLX4_CMD_UNMAP_ICM_AUX ||
225 op == MLX4_CMD_UNMAP_FA)
226 return 1;
227 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
228 * CMD_STAT_REG_BOUND.
229 * This status indicates that memory region has memory windows bound to it
230 * which may result from invalid user space usage and is not fatal.
231 */
232 if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
233 return 1;
234 return 0;
235}
236
237static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
238 int err)
239{
240 /* Only if reset flow is really active return code is based on
241 * command, otherwise current error code is returned.
242 */
243 if (mlx4_internal_err_reset) {
244 mlx4_enter_error_state(dev->persist);
245 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
246 }
247
248 return err;
249}
250
185static int comm_pending(struct mlx4_dev *dev) 251static int comm_pending(struct mlx4_dev *dev)
186{ 252{
187 struct mlx4_priv *priv = mlx4_priv(dev); 253 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -258,7 +324,7 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
258 cmd->free_head = context->next; 324 cmd->free_head = context->next;
259 spin_unlock(&cmd->context_lock); 325 spin_unlock(&cmd->context_lock);
260 326
261 init_completion(&context->done); 327 reinit_completion(&context->done);
262 328
263 mlx4_comm_cmd_post(dev, op, param); 329 mlx4_comm_cmd_post(dev, op, param);
264 330
@@ -323,17 +389,21 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
323{ 389{
324 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 390 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
325 u32 __iomem *hcr = cmd->hcr; 391 u32 __iomem *hcr = cmd->hcr;
326 int ret = -EAGAIN; 392 int ret = -EIO;
327 unsigned long end; 393 unsigned long end;
328 394
329 mutex_lock(&cmd->hcr_mutex); 395 mutex_lock(&dev->persist->device_state_mutex);
330 396 /* To avoid writing to unknown addresses after the device state was
331 if (pci_channel_offline(dev->persist->pdev)) { 397 * changed to internal error and the chip was reset,
398 * check the INTERNAL_ERROR flag which is updated under
399 * device_state_mutex lock.
400 */
401 if (pci_channel_offline(dev->persist->pdev) ||
402 (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
332 /* 403 /*
333 * Device is going through error recovery 404 * Device is going through error recovery
334 * and cannot accept commands. 405 * and cannot accept commands.
335 */ 406 */
336 ret = -EIO;
337 goto out; 407 goto out;
338 } 408 }
339 409
@@ -347,7 +417,6 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
347 * Device is going through error recovery 417 * Device is going through error recovery
348 * and cannot accept commands. 418 * and cannot accept commands.
349 */ 419 */
350 ret = -EIO;
351 goto out; 420 goto out;
352 } 421 }
353 422
@@ -391,7 +460,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
391 ret = 0; 460 ret = 0;
392 461
393out: 462out:
394 mutex_unlock(&cmd->hcr_mutex); 463 if (ret)
464 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
465 op, ret, in_param, in_modifier, op_modifier);
466 mutex_unlock(&dev->persist->device_state_mutex);
467
395 return ret; 468 return ret;
396} 469}
397 470
@@ -464,12 +537,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
464 537
465 down(&priv->cmd.poll_sem); 538 down(&priv->cmd.poll_sem);
466 539
467 if (pci_channel_offline(dev->persist->pdev)) { 540 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
468 /* 541 /*
469 * Device is going through error recovery 542 * Device is going through error recovery
470 * and cannot accept commands. 543 * and cannot accept commands.
471 */ 544 */
472 err = -EIO; 545 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
473 goto out; 546 goto out;
474 } 547 }
475 548
@@ -483,7 +556,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
483 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 556 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
484 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); 557 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
485 if (err) 558 if (err)
486 goto out; 559 goto out_reset;
487 560
488 end = msecs_to_jiffies(timeout) + jiffies; 561 end = msecs_to_jiffies(timeout) + jiffies;
489 while (cmd_pending(dev) && time_before(jiffies, end)) { 562 while (cmd_pending(dev) && time_before(jiffies, end)) {
@@ -493,6 +566,11 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
493 * and cannot accept commands. 566 * and cannot accept commands.
494 */ 567 */
495 err = -EIO; 568 err = -EIO;
569 goto out_reset;
570 }
571
572 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
573 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
496 goto out; 574 goto out;
497 } 575 }
498 576
@@ -502,8 +580,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
502 if (cmd_pending(dev)) { 580 if (cmd_pending(dev)) {
503 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 581 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
504 op); 582 op);
505 err = -ETIMEDOUT; 583 err = -EIO;
506 goto out; 584 goto out_reset;
507 } 585 }
508 586
509 if (out_is_imm) 587 if (out_is_imm)
@@ -515,10 +593,17 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
515 stat = be32_to_cpu((__force __be32) 593 stat = be32_to_cpu((__force __be32)
516 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; 594 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
517 err = mlx4_status_to_errno(stat); 595 err = mlx4_status_to_errno(stat);
518 if (err) 596 if (err) {
519 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 597 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
520 op, stat); 598 op, stat);
599 if (mlx4_closing_cmd_fatal_error(op, stat))
600 goto out_reset;
601 goto out;
602 }
521 603
604out_reset:
605 if (err)
606 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
522out: 607out:
523 up(&priv->cmd.poll_sem); 608 up(&priv->cmd.poll_sem);
524 return err; 609 return err;
@@ -565,17 +650,19 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
565 goto out; 650 goto out;
566 } 651 }
567 652
568 init_completion(&context->done); 653 reinit_completion(&context->done);
569 654
570 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 655 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
571 in_modifier, op_modifier, op, context->token, 1); 656 in_modifier, op_modifier, op, context->token, 1);
657 if (err)
658 goto out_reset;
572 659
573 if (!wait_for_completion_timeout(&context->done, 660 if (!wait_for_completion_timeout(&context->done,
574 msecs_to_jiffies(timeout))) { 661 msecs_to_jiffies(timeout))) {
575 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 662 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
576 op); 663 op);
577 err = -EBUSY; 664 err = -EIO;
578 goto out; 665 goto out_reset;
579 } 666 }
580 667
581 err = context->result; 668 err = context->result;
@@ -592,12 +679,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
592 else 679 else
593 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 680 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
594 op, context->fw_status); 681 op, context->fw_status);
682 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
683 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
684 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
685 goto out_reset;
686
595 goto out; 687 goto out;
596 } 688 }
597 689
598 if (out_is_imm) 690 if (out_is_imm)
599 *out_param = context->out_param; 691 *out_param = context->out_param;
600 692
693out_reset:
694 if (err)
695 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
601out: 696out:
602 spin_lock(&cmd->context_lock); 697 spin_lock(&cmd->context_lock);
603 context->next = cmd->free_head; 698 context->next = cmd->free_head;
@@ -613,9 +708,12 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
613 u16 op, unsigned long timeout, int native) 708 u16 op, unsigned long timeout, int native)
614{ 709{
615 if (pci_channel_offline(dev->persist->pdev)) 710 if (pci_channel_offline(dev->persist->pdev))
616 return -EIO; 711 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
617 712
618 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { 713 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
714 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
715 return mlx4_internal_err_ret_value(dev, op,
716 op_modifier);
619 if (mlx4_priv(dev)->cmd.use_events) 717 if (mlx4_priv(dev)->cmd.use_events)
620 return mlx4_cmd_wait(dev, in_param, out_param, 718 return mlx4_cmd_wait(dev, in_param, out_param,
621 out_is_imm, in_modifier, 719 out_is_imm, in_modifier,
@@ -2121,7 +2219,6 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2121 int flags = 0; 2219 int flags = 0;
2122 2220
2123 if (!priv->cmd.initialized) { 2221 if (!priv->cmd.initialized) {
2124 mutex_init(&priv->cmd.hcr_mutex);
2125 mutex_init(&priv->cmd.slave_cmd_mutex); 2222 mutex_init(&priv->cmd.slave_cmd_mutex);
2126 sema_init(&priv->cmd.poll_sem, 1); 2223 sema_init(&priv->cmd.poll_sem, 1);
2127 priv->cmd.use_events = 0; 2224 priv->cmd.use_events = 0;
@@ -2232,6 +2329,11 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
2232 for (i = 0; i < priv->cmd.max_cmds; ++i) { 2329 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2233 priv->cmd.context[i].token = i; 2330 priv->cmd.context[i].token = i;
2234 priv->cmd.context[i].next = i + 1; 2331 priv->cmd.context[i].next = i + 1;
2332 /* To support fatal error flow, initialize all
2333 * cmd contexts to allow simulating completions
2334 * with complete() at any time.
2335 */
2336 init_completion(&priv->cmd.context[i].done);
2235 } 2337 }
2236 2338
2237 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; 2339 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
@@ -2329,6 +2431,25 @@ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2329 return slave - 1; 2431 return slave - 1;
2330} 2432}
2331 2433
2434void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2435{
2436 struct mlx4_priv *priv = mlx4_priv(dev);
2437 struct mlx4_cmd_context *context;
2438 int i;
2439
2440 spin_lock(&priv->cmd.context_lock);
2441 if (priv->cmd.context) {
2442 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2443 context = &priv->cmd.context[i];
2444 context->fw_status = CMD_STAT_INTERNAL_ERR;
2445 context->result =
2446 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2447 complete(&context->done);
2448 }
2449 }
2450 spin_unlock(&priv->cmd.context_lock);
2451}
2452
2332struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) 2453struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2333{ 2454{
2334 struct mlx4_active_ports actv_ports; 2455 struct mlx4_active_ports actv_ports;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index a3867e7ef885..d22d9283d2cd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1318,6 +1318,9 @@ out:
1318 mutex_unlock(&priv->mcg_table.mutex); 1318 mutex_unlock(&priv->mcg_table.mutex);
1319 1319
1320 mlx4_free_cmd_mailbox(dev, mailbox); 1320 mlx4_free_cmd_mailbox(dev, mailbox);
1321 if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1322 /* In case device is under an error, return success as a closing command */
1323 err = 0;
1321 return err; 1324 return err;
1322} 1325}
1323 1326
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index aa1ecbc5a606..5c772ea4473b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -235,6 +235,7 @@ do { \
235 235
236extern int mlx4_log_num_mgm_entry_size; 236extern int mlx4_log_num_mgm_entry_size;
237extern int log_mtts_per_seg; 237extern int log_mtts_per_seg;
238extern int mlx4_internal_err_reset;
238 239
239#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) 240#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
240#define ALL_SLAVES 0xff 241#define ALL_SLAVES 0xff
@@ -607,7 +608,6 @@ struct mlx4_mgm {
607struct mlx4_cmd { 608struct mlx4_cmd {
608 struct pci_pool *pool; 609 struct pci_pool *pool;
609 void __iomem *hcr; 610 void __iomem *hcr;
610 struct mutex hcr_mutex;
611 struct mutex slave_cmd_mutex; 611 struct mutex slave_cmd_mutex;
612 struct semaphore poll_sem; 612 struct semaphore poll_sem;
613 struct semaphore event_sem; 613 struct semaphore event_sem;
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 64d25941b329..e7543844cc7a 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -279,6 +279,7 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
279int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); 279int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
280int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 280int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
281 struct mlx4_config_dev_params *params); 281 struct mlx4_config_dev_params *params);
282void mlx4_cmd_wake_completions(struct mlx4_dev *dev);
282/* 283/*
283 * mlx4_get_slave_default_vlan - 284 * mlx4_get_slave_default_vlan -
284 * return true if VST ( default vlan) 285 * return true if VST ( default vlan)