diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_init_ops.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_mcp.c | 187 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_mcp.h | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_filter.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qlge/qlge_main.c | 23 |
6 files changed, 189 insertions, 58 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d9ab5add27a8..34193c2f1699 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c | |||
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, | |||
407 | 407 | ||
408 | if (i == QED_INIT_MAX_POLL_COUNT) { | 408 | if (i == QED_INIT_MAX_POLL_COUNT) { |
409 | DP_ERR(p_hwfn, | 409 | DP_ERR(p_hwfn, |
410 | "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", | 410 | "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", |
411 | addr, le32_to_cpu(cmd->expected_val), | 411 | addr, le32_to_cpu(cmd->expected_val), |
412 | val, le32_to_cpu(cmd->op_data)); | 412 | val, le32_to_cpu(cmd->op_data)); |
413 | } | 413 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index d89a0e22f6e4..5d37ec7e9b0b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #include "qed_reg_addr.h" | 48 | #include "qed_reg_addr.h" |
49 | #include "qed_sriov.h" | 49 | #include "qed_sriov.h" |
50 | 50 | ||
51 | #define CHIP_MCP_RESP_ITER_US 10 | 51 | #define QED_MCP_RESP_ITER_US 10 |
52 | 52 | ||
53 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ | 53 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ |
54 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ | 54 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ |
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) | |||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | /* Maximum of 1 sec to wait for the SHMEM ready indication */ | ||
187 | #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 | ||
188 | #define QED_MCP_SHMEM_RDY_ITER_MS 50 | ||
189 | |||
186 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 190 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
187 | { | 191 | { |
188 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; | 192 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; |
193 | u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; | ||
194 | u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; | ||
189 | u32 drv_mb_offsize, mfw_mb_offsize; | 195 | u32 drv_mb_offsize, mfw_mb_offsize; |
190 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); | 196 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); |
191 | 197 | ||
192 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); | 198 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); |
193 | if (!p_info->public_base) | 199 | if (!p_info->public_base) { |
194 | return 0; | 200 | DP_NOTICE(p_hwfn, |
201 | "The address of the MCP scratch-pad is not configured\n"); | ||
202 | return -EINVAL; | ||
203 | } | ||
195 | 204 | ||
196 | p_info->public_base |= GRCBASE_MCP; | 205 | p_info->public_base |= GRCBASE_MCP; |
197 | 206 | ||
207 | /* Get the MFW MB address and number of supported messages */ | ||
208 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | ||
209 | SECTION_OFFSIZE_ADDR(p_info->public_base, | ||
210 | PUBLIC_MFW_MB)); | ||
211 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | ||
212 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, | ||
213 | p_info->mfw_mb_addr + | ||
214 | offsetof(struct public_mfw_mb, | ||
215 | sup_msgs)); | ||
216 | |||
217 | /* The driver can notify that there was an MCP reset, and might read the | ||
218 | * SHMEM values before the MFW has completed initializing them. | ||
219 | * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a | ||
220 | * data ready indication. | ||
221 | */ | ||
222 | while (!p_info->mfw_mb_length && --cnt) { | ||
223 | msleep(msec); | ||
224 | p_info->mfw_mb_length = | ||
225 | (u16)qed_rd(p_hwfn, p_ptt, | ||
226 | p_info->mfw_mb_addr + | ||
227 | offsetof(struct public_mfw_mb, sup_msgs)); | ||
228 | } | ||
229 | |||
230 | if (!cnt) { | ||
231 | DP_NOTICE(p_hwfn, | ||
232 | "Failed to get the SHMEM ready notification after %d msec\n", | ||
233 | QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); | ||
234 | return -EBUSY; | ||
235 | } | ||
236 | |||
198 | /* Calculate the driver and MFW mailbox address */ | 237 | /* Calculate the driver and MFW mailbox address */ |
199 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, | 238 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, |
200 | SECTION_OFFSIZE_ADDR(p_info->public_base, | 239 | SECTION_OFFSIZE_ADDR(p_info->public_base, |
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
204 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", | 243 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", |
205 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); | 244 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); |
206 | 245 | ||
207 | /* Set the MFW MB address */ | ||
208 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | ||
209 | SECTION_OFFSIZE_ADDR(p_info->public_base, | ||
210 | PUBLIC_MFW_MB)); | ||
211 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | ||
212 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); | ||
213 | |||
214 | /* Get the current driver mailbox sequence before sending | 246 | /* Get the current driver mailbox sequence before sending |
215 | * the first command | 247 | * the first command |
216 | */ | 248 | */ |
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, | |||
285 | 317 | ||
286 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 318 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
287 | { | 319 | { |
288 | u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; | 320 | u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; |
289 | int rc = 0; | 321 | int rc = 0; |
290 | 322 | ||
323 | if (p_hwfn->mcp_info->b_block_cmd) { | ||
324 | DP_NOTICE(p_hwfn, | ||
325 | "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); | ||
326 | return -EBUSY; | ||
327 | } | ||
328 | |||
291 | /* Ensure that only a single thread is accessing the mailbox */ | 329 | /* Ensure that only a single thread is accessing the mailbox */ |
292 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 330 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
293 | 331 | ||
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
413 | (p_mb_params->cmd | seq_num), p_mb_params->param); | 451 | (p_mb_params->cmd | seq_num), p_mb_params->param); |
414 | } | 452 | } |
415 | 453 | ||
454 | static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) | ||
455 | { | ||
456 | p_hwfn->mcp_info->b_block_cmd = block_cmd; | ||
457 | |||
458 | DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", | ||
459 | block_cmd ? "Block" : "Unblock"); | ||
460 | } | ||
461 | |||
462 | static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, | ||
463 | struct qed_ptt *p_ptt) | ||
464 | { | ||
465 | u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; | ||
466 | u32 delay = QED_MCP_RESP_ITER_US; | ||
467 | |||
468 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | ||
469 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
470 | cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
471 | udelay(delay); | ||
472 | cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
473 | udelay(delay); | ||
474 | cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
475 | |||
476 | DP_NOTICE(p_hwfn, | ||
477 | "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", | ||
478 | cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); | ||
479 | } | ||
480 | |||
416 | static int | 481 | static int |
417 | _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | 482 | _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, |
418 | struct qed_ptt *p_ptt, | 483 | struct qed_ptt *p_ptt, |
419 | struct qed_mcp_mb_params *p_mb_params, | 484 | struct qed_mcp_mb_params *p_mb_params, |
420 | u32 max_retries, u32 delay) | 485 | u32 max_retries, u32 usecs) |
421 | { | 486 | { |
487 | u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); | ||
422 | struct qed_mcp_cmd_elem *p_cmd_elem; | 488 | struct qed_mcp_cmd_elem *p_cmd_elem; |
423 | u32 cnt = 0; | ||
424 | u16 seq_num; | 489 | u16 seq_num; |
425 | int rc = 0; | 490 | int rc = 0; |
426 | 491 | ||
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
443 | goto err; | 508 | goto err; |
444 | 509 | ||
445 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); | 510 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); |
446 | udelay(delay); | 511 | |
512 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) | ||
513 | msleep(msecs); | ||
514 | else | ||
515 | udelay(usecs); | ||
447 | } while (++cnt < max_retries); | 516 | } while (++cnt < max_retries); |
448 | 517 | ||
449 | if (cnt >= max_retries) { | 518 | if (cnt >= max_retries) { |
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
472 | * The spinlock stays locked until the list element is removed. | 541 | * The spinlock stays locked until the list element is removed. |
473 | */ | 542 | */ |
474 | 543 | ||
475 | udelay(delay); | 544 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) |
545 | msleep(msecs); | ||
546 | else | ||
547 | udelay(usecs); | ||
548 | |||
476 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 549 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
477 | 550 | ||
478 | if (p_cmd_elem->b_is_completed) | 551 | if (p_cmd_elem->b_is_completed) |
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
491 | DP_NOTICE(p_hwfn, | 564 | DP_NOTICE(p_hwfn, |
492 | "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", | 565 | "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", |
493 | p_mb_params->cmd, p_mb_params->param); | 566 | p_mb_params->cmd, p_mb_params->param); |
567 | qed_mcp_print_cpu_info(p_hwfn, p_ptt); | ||
494 | 568 | ||
495 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 569 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
496 | qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); | 570 | qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); |
497 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); | 571 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); |
498 | 572 | ||
573 | if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) | ||
574 | qed_mcp_cmd_set_blocking(p_hwfn, true); | ||
575 | |||
499 | return -EAGAIN; | 576 | return -EAGAIN; |
500 | } | 577 | } |
501 | 578 | ||
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
507 | "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", | 584 | "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", |
508 | p_mb_params->mcp_resp, | 585 | p_mb_params->mcp_resp, |
509 | p_mb_params->mcp_param, | 586 | p_mb_params->mcp_param, |
510 | (cnt * delay) / 1000, (cnt * delay) % 1000); | 587 | (cnt * usecs) / 1000, (cnt * usecs) % 1000); |
511 | 588 | ||
512 | /* Clear the sequence number from the MFW response */ | 589 | /* Clear the sequence number from the MFW response */ |
513 | p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; | 590 | p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; |
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
525 | { | 602 | { |
526 | size_t union_data_size = sizeof(union drv_union_data); | 603 | size_t union_data_size = sizeof(union drv_union_data); |
527 | u32 max_retries = QED_DRV_MB_MAX_RETRIES; | 604 | u32 max_retries = QED_DRV_MB_MAX_RETRIES; |
528 | u32 delay = CHIP_MCP_RESP_ITER_US; | 605 | u32 usecs = QED_MCP_RESP_ITER_US; |
529 | 606 | ||
530 | /* MCP not initialized */ | 607 | /* MCP not initialized */ |
531 | if (!qed_mcp_is_init(p_hwfn)) { | 608 | if (!qed_mcp_is_init(p_hwfn)) { |
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
533 | return -EBUSY; | 610 | return -EBUSY; |
534 | } | 611 | } |
535 | 612 | ||
613 | if (p_hwfn->mcp_info->b_block_cmd) { | ||
614 | DP_NOTICE(p_hwfn, | ||
615 | "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", | ||
616 | p_mb_params->cmd, p_mb_params->param); | ||
617 | return -EBUSY; | ||
618 | } | ||
619 | |||
536 | if (p_mb_params->data_src_size > union_data_size || | 620 | if (p_mb_params->data_src_size > union_data_size || |
537 | p_mb_params->data_dst_size > union_data_size) { | 621 | p_mb_params->data_dst_size > union_data_size) { |
538 | DP_ERR(p_hwfn, | 622 | DP_ERR(p_hwfn, |
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
542 | return -EINVAL; | 626 | return -EINVAL; |
543 | } | 627 | } |
544 | 628 | ||
629 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { | ||
630 | max_retries = DIV_ROUND_UP(max_retries, 1000); | ||
631 | usecs *= 1000; | ||
632 | } | ||
633 | |||
545 | return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, | 634 | return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, |
546 | delay); | 635 | usecs); |
547 | } | 636 | } |
548 | 637 | ||
549 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, | 638 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, |
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, | |||
761 | mb_params.data_src_size = sizeof(load_req); | 850 | mb_params.data_src_size = sizeof(load_req); |
762 | mb_params.p_data_dst = &load_rsp; | 851 | mb_params.p_data_dst = &load_rsp; |
763 | mb_params.data_dst_size = sizeof(load_rsp); | 852 | mb_params.data_dst_size = sizeof(load_rsp); |
853 | mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; | ||
764 | 854 | ||
765 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | 855 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
766 | "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", | 856 | "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", |
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, | |||
982 | 1072 | ||
983 | int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 1073 | int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
984 | { | 1074 | { |
985 | u32 wol_param, mcp_resp, mcp_param; | 1075 | struct qed_mcp_mb_params mb_params; |
1076 | u32 wol_param; | ||
986 | 1077 | ||
987 | switch (p_hwfn->cdev->wol_config) { | 1078 | switch (p_hwfn->cdev->wol_config) { |
988 | case QED_OV_WOL_DISABLED: | 1079 | case QED_OV_WOL_DISABLED: |
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
1000 | wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; | 1091 | wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; |
1001 | } | 1092 | } |
1002 | 1093 | ||
1003 | return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, | 1094 | memset(&mb_params, 0, sizeof(mb_params)); |
1004 | &mcp_resp, &mcp_param); | 1095 | mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; |
1096 | mb_params.param = wol_param; | ||
1097 | mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; | ||
1098 | |||
1099 | return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | ||
1005 | } | 1100 | } |
1006 | 1101 | ||
1007 | int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 1102 | int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
@@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, | |||
2077 | return rc; | 2172 | return rc; |
2078 | } | 2173 | } |
2079 | 2174 | ||
2175 | /* A maximal 100 msec waiting time for the MCP to halt */ | ||
2176 | #define QED_MCP_HALT_SLEEP_MS 10 | ||
2177 | #define QED_MCP_HALT_MAX_RETRIES 10 | ||
2178 | |||
2080 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 2179 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
2081 | { | 2180 | { |
2082 | u32 resp = 0, param = 0; | 2181 | u32 resp = 0, param = 0, cpu_state, cnt = 0; |
2083 | int rc; | 2182 | int rc; |
2084 | 2183 | ||
2085 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, | 2184 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, |
2086 | ¶m); | 2185 | ¶m); |
2087 | if (rc) | 2186 | if (rc) { |
2088 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); | 2187 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); |
2188 | return rc; | ||
2189 | } | ||
2089 | 2190 | ||
2090 | return rc; | 2191 | do { |
2192 | msleep(QED_MCP_HALT_SLEEP_MS); | ||
2193 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
2194 | if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) | ||
2195 | break; | ||
2196 | } while (++cnt < QED_MCP_HALT_MAX_RETRIES); | ||
2197 | |||
2198 | if (cnt == QED_MCP_HALT_MAX_RETRIES) { | ||
2199 | DP_NOTICE(p_hwfn, | ||
2200 | "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", | ||
2201 | qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); | ||
2202 | return -EBUSY; | ||
2203 | } | ||
2204 | |||
2205 | qed_mcp_cmd_set_blocking(p_hwfn, true); | ||
2206 | |||
2207 | return 0; | ||
2091 | } | 2208 | } |
2092 | 2209 | ||
2210 | #define QED_MCP_RESUME_SLEEP_MS 10 | ||
2211 | |||
2093 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 2212 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
2094 | { | 2213 | { |
2095 | u32 value, cpu_mode; | 2214 | u32 cpu_mode, cpu_state; |
2096 | 2215 | ||
2097 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); | 2216 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); |
2098 | 2217 | ||
2099 | value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | ||
2100 | value &= ~MCP_REG_CPU_MODE_SOFT_HALT; | ||
2101 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); | ||
2102 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | 2218 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); |
2219 | cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; | ||
2220 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); | ||
2221 | msleep(QED_MCP_RESUME_SLEEP_MS); | ||
2222 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
2103 | 2223 | ||
2104 | return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; | 2224 | if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { |
2225 | DP_NOTICE(p_hwfn, | ||
2226 | "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", | ||
2227 | cpu_mode, cpu_state); | ||
2228 | return -EBUSY; | ||
2229 | } | ||
2230 | |||
2231 | qed_mcp_cmd_set_blocking(p_hwfn, false); | ||
2232 | |||
2233 | return 0; | ||
2105 | } | 2234 | } |
2106 | 2235 | ||
2107 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, | 2236 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 047976d5c6e9..85e6b3989e7a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h | |||
@@ -635,11 +635,14 @@ struct qed_mcp_info { | |||
635 | */ | 635 | */ |
636 | spinlock_t cmd_lock; | 636 | spinlock_t cmd_lock; |
637 | 637 | ||
638 | /* Flag to indicate whether sending a MFW mailbox command is blocked */ | ||
639 | bool b_block_cmd; | ||
640 | |||
638 | /* Spinlock used for syncing SW link-changes and link-changes | 641 | /* Spinlock used for syncing SW link-changes and link-changes |
639 | * originating from attention context. | 642 | * originating from attention context. |
640 | */ | 643 | */ |
641 | spinlock_t link_lock; | 644 | spinlock_t link_lock; |
642 | bool block_mb_sending; | 645 | |
643 | u32 public_base; | 646 | u32 public_base; |
644 | u32 drv_mb_addr; | 647 | u32 drv_mb_addr; |
645 | u32 mfw_mb_addr; | 648 | u32 mfw_mb_addr; |
@@ -660,14 +663,20 @@ struct qed_mcp_info { | |||
660 | }; | 663 | }; |
661 | 664 | ||
662 | struct qed_mcp_mb_params { | 665 | struct qed_mcp_mb_params { |
663 | u32 cmd; | 666 | u32 cmd; |
664 | u32 param; | 667 | u32 param; |
665 | void *p_data_src; | 668 | void *p_data_src; |
666 | u8 data_src_size; | 669 | void *p_data_dst; |
667 | void *p_data_dst; | 670 | u8 data_src_size; |
668 | u8 data_dst_size; | 671 | u8 data_dst_size; |
669 | u32 mcp_resp; | 672 | u32 mcp_resp; |
670 | u32 mcp_param; | 673 | u32 mcp_param; |
674 | u32 flags; | ||
675 | #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) | ||
676 | #define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1) | ||
677 | #define QED_MB_FLAGS_IS_SET(params, flag) \ | ||
678 | ({ typeof(params) __params = (params); \ | ||
679 | (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) | ||
671 | }; | 680 | }; |
672 | 681 | ||
673 | struct qed_drv_tlv_hdr { | 682 | struct qed_drv_tlv_hdr { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index d8ad2dcad8d5..f736f70956fd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
@@ -562,8 +562,10 @@ | |||
562 | 0 | 562 | 0 |
563 | #define MCP_REG_CPU_STATE \ | 563 | #define MCP_REG_CPU_STATE \ |
564 | 0xe05004UL | 564 | 0xe05004UL |
565 | #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) | ||
565 | #define MCP_REG_CPU_EVENT_MASK \ | 566 | #define MCP_REG_CPU_EVENT_MASK \ |
566 | 0xe05008UL | 567 | 0xe05008UL |
568 | #define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL | ||
567 | #define PGLUE_B_REG_PF_BAR0_SIZE \ | 569 | #define PGLUE_B_REG_PF_BAR0_SIZE \ |
568 | 0x2aae60UL | 570 | 0x2aae60UL |
569 | #define PGLUE_B_REG_PF_BAR1_SIZE \ | 571 | #define PGLUE_B_REG_PF_BAR1_SIZE \ |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 9673d19308e6..b16ce7d93caf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c | |||
@@ -2006,18 +2006,16 @@ unlock: | |||
2006 | static int qede_parse_actions(struct qede_dev *edev, | 2006 | static int qede_parse_actions(struct qede_dev *edev, |
2007 | struct tcf_exts *exts) | 2007 | struct tcf_exts *exts) |
2008 | { | 2008 | { |
2009 | int rc = -EINVAL, num_act = 0; | 2009 | int rc = -EINVAL, num_act = 0, i; |
2010 | const struct tc_action *a; | 2010 | const struct tc_action *a; |
2011 | bool is_drop = false; | 2011 | bool is_drop = false; |
2012 | LIST_HEAD(actions); | ||
2013 | 2012 | ||
2014 | if (!tcf_exts_has_actions(exts)) { | 2013 | if (!tcf_exts_has_actions(exts)) { |
2015 | DP_NOTICE(edev, "No tc actions received\n"); | 2014 | DP_NOTICE(edev, "No tc actions received\n"); |
2016 | return rc; | 2015 | return rc; |
2017 | } | 2016 | } |
2018 | 2017 | ||
2019 | tcf_exts_to_list(exts, &actions); | 2018 | tcf_exts_for_each_action(i, a, exts) { |
2020 | list_for_each_entry(a, &actions, list) { | ||
2021 | num_act++; | 2019 | num_act++; |
2022 | 2020 | ||
2023 | if (is_tcf_gact_shot(a)) | 2021 | if (is_tcf_gact_shot(a)) |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 353f1c129af1..059ba9429e51 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, | |||
2384 | return status; | 2384 | return status; |
2385 | } | 2385 | } |
2386 | 2386 | ||
2387 | static netdev_features_t qlge_fix_features(struct net_device *ndev, | ||
2388 | netdev_features_t features) | ||
2389 | { | ||
2390 | int err; | ||
2391 | |||
2392 | /* Update the behavior of vlan accel in the adapter */ | ||
2393 | err = qlge_update_hw_vlan_features(ndev, features); | ||
2394 | if (err) | ||
2395 | return err; | ||
2396 | |||
2397 | return features; | ||
2398 | } | ||
2399 | |||
2400 | static int qlge_set_features(struct net_device *ndev, | 2387 | static int qlge_set_features(struct net_device *ndev, |
2401 | netdev_features_t features) | 2388 | netdev_features_t features) |
2402 | { | 2389 | { |
2403 | netdev_features_t changed = ndev->features ^ features; | 2390 | netdev_features_t changed = ndev->features ^ features; |
2391 | int err; | ||
2392 | |||
2393 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) { | ||
2394 | /* Update the behavior of vlan accel in the adapter */ | ||
2395 | err = qlge_update_hw_vlan_features(ndev, features); | ||
2396 | if (err) | ||
2397 | return err; | ||
2404 | 2398 | ||
2405 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) | ||
2406 | qlge_vlan_mode(ndev, features); | 2399 | qlge_vlan_mode(ndev, features); |
2400 | } | ||
2407 | 2401 | ||
2408 | return 0; | 2402 | return 0; |
2409 | } | 2403 | } |
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = { | |||
4719 | .ndo_set_mac_address = qlge_set_mac_address, | 4713 | .ndo_set_mac_address = qlge_set_mac_address, |
4720 | .ndo_validate_addr = eth_validate_addr, | 4714 | .ndo_validate_addr = eth_validate_addr, |
4721 | .ndo_tx_timeout = qlge_tx_timeout, | 4715 | .ndo_tx_timeout = qlge_tx_timeout, |
4722 | .ndo_fix_features = qlge_fix_features, | ||
4723 | .ndo_set_features = qlge_set_features, | 4716 | .ndo_set_features = qlge_set_features, |
4724 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, | 4717 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, |
4725 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, | 4718 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, |