aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2012-08-03 12:36:13 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-09-14 09:41:19 -0400
commit67d1273385d454a3f1b083b807f2cdda95e995ec (patch)
tree93f6ff11c119b3e9460e6f302e09eaedf1844259 /drivers/scsi/lpfc
parentaa6fbb757ab6fce4647bafd28f9a49e5b0fa07db (diff)
[SCSI] lpfc 8.3.33: Tie parallel I/O queues into separate MSIX vectors
Add fcp_io_channel module attribute to control amount of parallel I/O queues Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c78
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h69
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c752
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c184
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h28
8 files changed, 454 insertions, 687 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a870af1b5478..d9f21fbc4099 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -695,6 +695,7 @@ struct lpfc_hba {
695 uint32_t cfg_fcp_imax; 695 uint32_t cfg_fcp_imax;
696 uint32_t cfg_fcp_wq_count; 696 uint32_t cfg_fcp_wq_count;
697 uint32_t cfg_fcp_eq_count; 697 uint32_t cfg_fcp_eq_count;
698 uint32_t cfg_fcp_io_channel;
698 uint32_t cfg_sg_seg_cnt; 699 uint32_t cfg_sg_seg_cnt;
699 uint32_t cfg_prot_sg_seg_cnt; 700 uint32_t cfg_prot_sg_seg_cnt;
700 uint32_t cfg_sg_dma_buf_size; 701 uint32_t cfg_sg_dma_buf_size;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index adef5bb2100e..2910208b5dfa 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3654,7 +3654,7 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
3654 return -EINVAL; 3654 return -EINVAL;
3655 3655
3656 phba->cfg_fcp_imax = (uint32_t)val; 3656 phba->cfg_fcp_imax = (uint32_t)val;
3657 for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY) 3657 for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
3658 lpfc_modify_fcp_eq_delay(phba, i); 3658 lpfc_modify_fcp_eq_delay(phba, i);
3659 3659
3660 return strlen(buf); 3660 return strlen(buf);
@@ -3844,21 +3844,33 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
3844 3844
3845/* 3845/*
3846# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues 3846# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
3847# This parameter is ignored and will eventually be depricated
3847# 3848#
3848# Value range is [1,31]. Default value is 4. 3849# Value range is [1,7]. Default value is 4.
3849*/ 3850*/
3850LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, 3851LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3852 LPFC_FCP_IO_CHAN_MAX,
3851 "Set the number of fast-path FCP work queues, if possible"); 3853 "Set the number of fast-path FCP work queues, if possible");
3852 3854
3853/* 3855/*
3854# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues 3856# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
3855# 3857#
3856# Value range is [1,7]. Default value is 1. 3858# Value range is [1,7]. Default value is 4.
3857*/ 3859*/
3858LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, 3860LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3861 LPFC_FCP_IO_CHAN_MAX,
3859 "Set the number of fast-path FCP event queues, if possible"); 3862 "Set the number of fast-path FCP event queues, if possible");
3860 3863
3861/* 3864/*
3865# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
3866#
3867# Value range is [1,7]. Default value is 4.
3868*/
3869LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3870 LPFC_FCP_IO_CHAN_MAX,
3871 "Set the number of FCP I/O channels");
3872
3873/*
3862# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 3874# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
3863# 0 = HBA resets disabled 3875# 0 = HBA resets disabled
3864# 1 = HBA resets enabled (default) 3876# 1 = HBA resets enabled (default)
@@ -4002,6 +4014,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4002 &dev_attr_lpfc_fcp_imax, 4014 &dev_attr_lpfc_fcp_imax,
4003 &dev_attr_lpfc_fcp_wq_count, 4015 &dev_attr_lpfc_fcp_wq_count,
4004 &dev_attr_lpfc_fcp_eq_count, 4016 &dev_attr_lpfc_fcp_eq_count,
4017 &dev_attr_lpfc_fcp_io_channel,
4005 &dev_attr_lpfc_enable_bg, 4018 &dev_attr_lpfc_enable_bg,
4006 &dev_attr_lpfc_soft_wwnn, 4019 &dev_attr_lpfc_soft_wwnn,
4007 &dev_attr_lpfc_soft_wwpn, 4020 &dev_attr_lpfc_soft_wwpn,
@@ -4980,6 +4993,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4980 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 4993 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4981 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); 4994 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4982 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); 4995 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
4996 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
4983 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4997 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4984 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4998 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4985 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4999 lpfc_enable_bg_init(phba, lpfc_enable_bg);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8a2a514a2553..08e3a9b60e45 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -196,8 +196,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *);
196irqreturn_t lpfc_sli_sp_intr_handler(int, void *); 196irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
197irqreturn_t lpfc_sli_fp_intr_handler(int, void *); 197irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
198irqreturn_t lpfc_sli4_intr_handler(int, void *); 198irqreturn_t lpfc_sli4_intr_handler(int, void *);
199irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); 199irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
200irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
201 200
202void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 201void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
203void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); 202void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5eac0942f13d..6e5e565094f2 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2013,38 +2013,23 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2013 if (*ppos) 2013 if (*ppos)
2014 return 0; 2014 return 0;
2015 2015
2016 /* Get slow-path event queue information */
2017 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2018 "Slow-path EQ information:\n");
2019 if (phba->sli4_hba.sp_eq) {
2020 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2021 "\tEQID[%02d], "
2022 "QE-COUNT[%04d], QE-SIZE[%04d], "
2023 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
2024 phba->sli4_hba.sp_eq->queue_id,
2025 phba->sli4_hba.sp_eq->entry_count,
2026 phba->sli4_hba.sp_eq->entry_size,
2027 phba->sli4_hba.sp_eq->host_index,
2028 phba->sli4_hba.sp_eq->hba_index);
2029 }
2030
2031 /* Get fast-path event queue information */ 2016 /* Get fast-path event queue information */
2032 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2017 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2033 "Fast-path EQ information:\n"); 2018 "HBA EQ information:\n");
2034 if (phba->sli4_hba.fp_eq) { 2019 if (phba->sli4_hba.hba_eq) {
2035 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 2020 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
2036 fcp_qidx++) { 2021 fcp_qidx++) {
2037 if (phba->sli4_hba.fp_eq[fcp_qidx]) { 2022 if (phba->sli4_hba.hba_eq[fcp_qidx]) {
2038 len += snprintf(pbuffer+len, 2023 len += snprintf(pbuffer+len,
2039 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2024 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2040 "\tEQID[%02d], " 2025 "\tEQID[%02d], "
2041 "QE-COUNT[%04d], QE-SIZE[%04d], " 2026 "QE-COUNT[%04d], QE-SIZE[%04d], "
2042 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2027 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
2043 phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, 2028 phba->sli4_hba.hba_eq[fcp_qidx]->queue_id,
2044 phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, 2029 phba->sli4_hba.hba_eq[fcp_qidx]->entry_count,
2045 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, 2030 phba->sli4_hba.hba_eq[fcp_qidx]->entry_size,
2046 phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 2031 phba->sli4_hba.hba_eq[fcp_qidx]->host_index,
2047 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 2032 phba->sli4_hba.hba_eq[fcp_qidx]->hba_index);
2048 } 2033 }
2049 } 2034 }
2050 } 2035 }
@@ -2108,7 +2093,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2108 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 2093 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
2109 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 2094 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
2110 } 2095 }
2111 } while (++fcp_qidx < phba->cfg_fcp_eq_count); 2096 } while (++fcp_qidx < phba->cfg_fcp_io_channel);
2112 len += snprintf(pbuffer+len, 2097 len += snprintf(pbuffer+len,
2113 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2098 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2114 } 2099 }
@@ -2153,7 +2138,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2153 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2138 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2154 "Fast-path FCP WQ information:\n"); 2139 "Fast-path FCP WQ information:\n");
2155 if (phba->sli4_hba.fcp_wq) { 2140 if (phba->sli4_hba.fcp_wq) {
2156 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; 2141 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
2157 fcp_qidx++) { 2142 fcp_qidx++) {
2158 if (!phba->sli4_hba.fcp_wq[fcp_qidx]) 2143 if (!phba->sli4_hba.fcp_wq[fcp_qidx])
2159 continue; 2144 continue;
@@ -2410,31 +2395,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2410 2395
2411 switch (quetp) { 2396 switch (quetp) {
2412 case LPFC_IDIAG_EQ: 2397 case LPFC_IDIAG_EQ:
2413 /* Slow-path event queue */ 2398 /* HBA event queue */
2414 if (phba->sli4_hba.sp_eq && 2399 if (phba->sli4_hba.hba_eq) {
2415 phba->sli4_hba.sp_eq->queue_id == queid) { 2400 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
2416 /* Sanity check */ 2401 qidx++) {
2417 rc = lpfc_idiag_que_param_check( 2402 if (phba->sli4_hba.hba_eq[qidx] &&
2418 phba->sli4_hba.sp_eq, index, count); 2403 phba->sli4_hba.hba_eq[qidx]->queue_id ==
2419 if (rc)
2420 goto error_out;
2421 idiag.ptr_private = phba->sli4_hba.sp_eq;
2422 goto pass_check;
2423 }
2424 /* Fast-path event queue */
2425 if (phba->sli4_hba.fp_eq) {
2426 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
2427 if (phba->sli4_hba.fp_eq[qidx] &&
2428 phba->sli4_hba.fp_eq[qidx]->queue_id ==
2429 queid) { 2404 queid) {
2430 /* Sanity check */ 2405 /* Sanity check */
2431 rc = lpfc_idiag_que_param_check( 2406 rc = lpfc_idiag_que_param_check(
2432 phba->sli4_hba.fp_eq[qidx], 2407 phba->sli4_hba.hba_eq[qidx],
2433 index, count); 2408 index, count);
2434 if (rc) 2409 if (rc)
2435 goto error_out; 2410 goto error_out;
2436 idiag.ptr_private = 2411 idiag.ptr_private =
2437 phba->sli4_hba.fp_eq[qidx]; 2412 phba->sli4_hba.hba_eq[qidx];
2438 goto pass_check; 2413 goto pass_check;
2439 } 2414 }
2440 } 2415 }
@@ -2481,7 +2456,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2481 phba->sli4_hba.fcp_cq[qidx]; 2456 phba->sli4_hba.fcp_cq[qidx];
2482 goto pass_check; 2457 goto pass_check;
2483 } 2458 }
2484 } while (++qidx < phba->cfg_fcp_eq_count); 2459 } while (++qidx < phba->cfg_fcp_io_channel);
2485 } 2460 }
2486 goto error_out; 2461 goto error_out;
2487 break; 2462 break;
@@ -2513,7 +2488,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2513 } 2488 }
2514 /* FCP work queue */ 2489 /* FCP work queue */
2515 if (phba->sli4_hba.fcp_wq) { 2490 if (phba->sli4_hba.fcp_wq) {
2516 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { 2491 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
2492 qidx++) {
2517 if (!phba->sli4_hba.fcp_wq[qidx]) 2493 if (!phba->sli4_hba.fcp_wq[qidx])
2518 continue; 2494 continue;
2519 if (phba->sli4_hba.fcp_wq[qidx]->queue_id == 2495 if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
@@ -4492,7 +4468,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
4492 lpfc_debug_dump_mbx_wq(phba); 4468 lpfc_debug_dump_mbx_wq(phba);
4493 lpfc_debug_dump_els_wq(phba); 4469 lpfc_debug_dump_els_wq(phba);
4494 4470
4495 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4471 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4496 lpfc_debug_dump_fcp_wq(phba, fcp_wqidx); 4472 lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
4497 4473
4498 lpfc_debug_dump_hdr_rq(phba); 4474 lpfc_debug_dump_hdr_rq(phba);
@@ -4503,14 +4479,12 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
4503 lpfc_debug_dump_mbx_cq(phba); 4479 lpfc_debug_dump_mbx_cq(phba);
4504 lpfc_debug_dump_els_cq(phba); 4480 lpfc_debug_dump_els_cq(phba);
4505 4481
4506 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4482 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4507 lpfc_debug_dump_fcp_cq(phba, fcp_wqidx); 4483 lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
4508 4484
4509 /* 4485 /*
4510 * Dump Event Queues (EQs) 4486 * Dump Event Queues (EQs)
4511 */ 4487 */
4512 lpfc_debug_dump_sp_eq(phba); 4488 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4513 4489 lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
4514 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
4515 lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
4516} 4490}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index a9593ac9c134..8b2b6a3bfc25 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -369,7 +369,7 @@ static inline void
369lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx) 369lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
370{ 370{
371 /* sanity check */ 371 /* sanity check */
372 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 372 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
373 return; 373 return;
374 374
375 printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n", 375 printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
@@ -391,15 +391,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
391 int fcp_cqidx, fcp_cqid; 391 int fcp_cqidx, fcp_cqid;
392 392
393 /* sanity check */ 393 /* sanity check */
394 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 394 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
395 return; 395 return;
396 396
397 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; 397 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
398 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 398 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
399 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 399 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
400 break; 400 break;
401 if (phba->intr_type == MSIX) { 401 if (phba->intr_type == MSIX) {
402 if (fcp_cqidx >= phba->cfg_fcp_eq_count) 402 if (fcp_cqidx >= phba->cfg_fcp_io_channel)
403 return; 403 return;
404 } else { 404 } else {
405 if (fcp_cqidx > 0) 405 if (fcp_cqidx > 0)
@@ -413,7 +413,7 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
413} 413}
414 414
415/** 415/**
416 * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue 416 * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
417 * @phba: Pointer to HBA context object. 417 * @phba: Pointer to HBA context object.
418 * @fcp_wqidx: Index to a FCP work queue. 418 * @fcp_wqidx: Index to a FCP work queue.
419 * 419 *
@@ -421,36 +421,30 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
421 * associated to the FCP work queue specified by the @fcp_wqidx. 421 * associated to the FCP work queue specified by the @fcp_wqidx.
422 **/ 422 **/
423static inline void 423static inline void
424lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx) 424lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
425{ 425{
426 struct lpfc_queue *qdesc; 426 struct lpfc_queue *qdesc;
427 int fcp_eqidx, fcp_eqid; 427 int fcp_eqidx, fcp_eqid;
428 int fcp_cqidx, fcp_cqid; 428 int fcp_cqidx, fcp_cqid;
429 429
430 /* sanity check */ 430 /* sanity check */
431 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 431 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
432 return; 432 return;
433 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; 433 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
434 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 434 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
435 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 435 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
436 break; 436 break;
437 if (phba->intr_type == MSIX) { 437 if (phba->intr_type == MSIX) {
438 if (fcp_cqidx >= phba->cfg_fcp_eq_count) 438 if (fcp_cqidx >= phba->cfg_fcp_io_channel)
439 return; 439 return;
440 } else { 440 } else {
441 if (fcp_cqidx > 0) 441 if (fcp_cqidx > 0)
442 return; 442 return;
443 } 443 }
444 444
445 if (phba->cfg_fcp_eq_count == 0) { 445 fcp_eqidx = fcp_cqidx;
446 fcp_eqidx = -1; 446 fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
447 fcp_eqid = phba->sli4_hba.sp_eq->queue_id; 447 qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
448 qdesc = phba->sli4_hba.sp_eq;
449 } else {
450 fcp_eqidx = fcp_cqidx;
451 fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
452 qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
453 }
454 448
455 printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->" 449 printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
456 "EQ[Idx:%d|Qid:%d]\n", 450 "EQ[Idx:%d|Qid:%d]\n",
@@ -546,25 +540,6 @@ lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
546} 540}
547 541
548/** 542/**
549 * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
550 * @phba: Pointer to HBA context object.
551 *
552 * This function dumps all entries from the slow-path event queue.
553 **/
554static inline void
555lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
556{
557 printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
558 "EQ[Qid:%d]:\n",
559 phba->sli4_hba.mbx_wq->queue_id,
560 phba->sli4_hba.els_wq->queue_id,
561 phba->sli4_hba.mbx_cq->queue_id,
562 phba->sli4_hba.els_cq->queue_id,
563 phba->sli4_hba.sp_eq->queue_id);
564 lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
565}
566
567/**
568 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id 543 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
569 * @phba: Pointer to HBA context object. 544 * @phba: Pointer to HBA context object.
570 * @qid: Work queue identifier. 545 * @qid: Work queue identifier.
@@ -577,10 +552,10 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
577{ 552{
578 int wq_idx; 553 int wq_idx;
579 554
580 for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++) 555 for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
581 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) 556 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
582 break; 557 break;
583 if (wq_idx < phba->cfg_fcp_wq_count) { 558 if (wq_idx < phba->cfg_fcp_io_channel) {
584 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); 559 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
585 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); 560 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
586 return; 561 return;
@@ -647,9 +622,9 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
647 do { 622 do {
648 if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) 623 if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
649 break; 624 break;
650 } while (++cq_idx < phba->cfg_fcp_eq_count); 625 } while (++cq_idx < phba->cfg_fcp_io_channel);
651 626
652 if (cq_idx < phba->cfg_fcp_eq_count) { 627 if (cq_idx < phba->cfg_fcp_io_channel) {
653 printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); 628 printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
654 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); 629 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
655 return; 630 return;
@@ -680,21 +655,17 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
680{ 655{
681 int eq_idx; 656 int eq_idx;
682 657
683 for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) { 658 for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
684 if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid) 659 if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
685 break; 660 break;
686 } 661 }
687 662
688 if (eq_idx < phba->cfg_fcp_eq_count) { 663 if (eq_idx < phba->cfg_fcp_io_channel) {
689 printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); 664 printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
690 lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]); 665 lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
691 return; 666 return;
692 } 667 }
693 668
694 if (phba->sli4_hba.sp_eq->queue_id == qid) {
695 printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
696 lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
697 }
698} 669}
699 670
700void lpfc_debug_dump_all_queues(struct lpfc_hba *); 671void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 176302f0e02c..164aa87734b8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4702,6 +4702,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4702 /* Get all the module params for configuring this host */ 4702 /* Get all the module params for configuring this host */
4703 lpfc_get_cfgparam(phba); 4703 lpfc_get_cfgparam(phba);
4704 phba->max_vpi = LPFC_MAX_VPI; 4704 phba->max_vpi = LPFC_MAX_VPI;
4705
4706 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
4707 phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
4708
4705 /* This will be set to correct value after the read_config mbox */ 4709 /* This will be set to correct value after the read_config mbox */
4706 phba->max_vports = 0; 4710 phba->max_vports = 0;
4707 4711
@@ -4722,7 +4726,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4722 */ 4726 */
4723 if (!phba->sli.ring) 4727 if (!phba->sli.ring)
4724 phba->sli.ring = kzalloc( 4728 phba->sli.ring = kzalloc(
4725 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_eq_count) * 4729 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
4726 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4730 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4727 if (!phba->sli.ring) 4731 if (!phba->sli.ring)
4728 return -ENOMEM; 4732 return -ENOMEM;
@@ -4931,21 +4935,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4931 goto out_remove_rpi_hdrs; 4935 goto out_remove_rpi_hdrs;
4932 } 4936 }
4933 4937
4934 /* 4938 phba->sli4_hba.fcp_eq_hdl =
4935 * The cfg_fcp_eq_count can be zero whenever there is exactly one 4939 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4936 * interrupt vector. This is not an error 4940 phba->cfg_fcp_io_channel), GFP_KERNEL);
4937 */ 4941 if (!phba->sli4_hba.fcp_eq_hdl) {
4938 if (phba->cfg_fcp_eq_count) { 4942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4939 phba->sli4_hba.fcp_eq_hdl = 4943 "2572 Failed allocate memory for "
4940 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4944 "fast-path per-EQ handle array\n");
4941 phba->cfg_fcp_eq_count), GFP_KERNEL); 4945 rc = -ENOMEM;
4942 if (!phba->sli4_hba.fcp_eq_hdl) { 4946 goto out_free_fcf_rr_bmask;
4943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4944 "2572 Failed allocate memory for "
4945 "fast-path per-EQ handle array\n");
4946 rc = -ENOMEM;
4947 goto out_free_fcf_rr_bmask;
4948 }
4949 } 4947 }
4950 4948
4951 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4949 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -6538,53 +6536,26 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
6538static int 6536static int
6539lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6537lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6540{ 6538{
6541 int cfg_fcp_wq_count; 6539 int cfg_fcp_io_channel;
6542 int cfg_fcp_eq_count;
6543 6540
6544 /* 6541 /*
6545 * Sanity check for confiugred queue parameters against the run-time 6542 * Sanity check for configured queue parameters against the run-time
6546 * device parameters 6543 * device parameters
6547 */ 6544 */
6548 6545
6549 /* Sanity check on FCP fast-path WQ parameters */ 6546 /* Sanity check on HBA EQ parameters */
6550 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 6547 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6551 if (cfg_fcp_wq_count > 6548
6552 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 6549 if (cfg_fcp_io_channel >
6553 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 6550 phba->sli4_hba.max_cfg_param.max_eq) {
6554 LPFC_SP_WQN_DEF; 6551 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6555 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 6552 if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) {
6556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6557 "2581 Not enough WQs (%d) from "
6558 "the pci function for supporting "
6559 "FCP WQs (%d)\n",
6560 phba->sli4_hba.max_cfg_param.max_wq,
6561 phba->cfg_fcp_wq_count);
6562 goto out_error;
6563 }
6564 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6565 "2582 Not enough WQs (%d) from the pci "
6566 "function for supporting the requested "
6567 "FCP WQs (%d), the actual FCP WQs can "
6568 "be supported: %d\n",
6569 phba->sli4_hba.max_cfg_param.max_wq,
6570 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6571 }
6572 /* The actual number of FCP work queues adopted */
6573 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6574
6575 /* Sanity check on FCP fast-path EQ parameters */
6576 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6577 if (cfg_fcp_eq_count >
6578 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6579 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6580 LPFC_SP_EQN_DEF;
6581 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6583 "2574 Not enough EQs (%d) from the " 6554 "2574 Not enough EQs (%d) from the "
6584 "pci function for supporting FCP " 6555 "pci function for supporting FCP "
6585 "EQs (%d)\n", 6556 "EQs (%d)\n",
6586 phba->sli4_hba.max_cfg_param.max_eq, 6557 phba->sli4_hba.max_cfg_param.max_eq,
6587 phba->cfg_fcp_eq_count); 6558 phba->cfg_fcp_io_channel);
6588 goto out_error; 6559 goto out_error;
6589 } 6560 }
6590 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6561 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -6593,22 +6564,16 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6593 "FCP EQs (%d), the actual FCP EQs can " 6564 "FCP EQs (%d), the actual FCP EQs can "
6594 "be supported: %d\n", 6565 "be supported: %d\n",
6595 phba->sli4_hba.max_cfg_param.max_eq, 6566 phba->sli4_hba.max_cfg_param.max_eq,
6596 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 6567 phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
6597 }
6598 /* It does not make sense to have more EQs than WQs */
6599 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6600 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6601 "2593 The FCP EQ count(%d) cannot be greater "
6602 "than the FCP WQ count(%d), limiting the "
6603 "FCP EQ count to %d\n", cfg_fcp_eq_count,
6604 phba->cfg_fcp_wq_count,
6605 phba->cfg_fcp_wq_count);
6606 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6607 } 6568 }
6569
6570 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
6571
6608 /* The actual number of FCP event queues adopted */ 6572 /* The actual number of FCP event queues adopted */
6609 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 6573 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6610 /* The overall number of event queues used */ 6574 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6611 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6575 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6576 phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
6612 6577
6613 /* Get EQ depth from module parameter, fake the default for now */ 6578 /* Get EQ depth from module parameter, fake the default for now */
6614 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6579 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -6641,50 +6606,104 @@ int
6641lpfc_sli4_queue_create(struct lpfc_hba *phba) 6606lpfc_sli4_queue_create(struct lpfc_hba *phba)
6642{ 6607{
6643 struct lpfc_queue *qdesc; 6608 struct lpfc_queue *qdesc;
6644 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6609 int idx;
6645 6610
6646 /* 6611 /*
6647 * Create Event Queues (EQs) 6612 * Create HBA Record arrays.
6648 */ 6613 */
6614 if (!phba->cfg_fcp_io_channel)
6615 return -ERANGE;
6649 6616
6650 /* Create slow path event queue */ 6617 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6651 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6618 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6652 phba->sli4_hba.eq_ecount); 6619 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6653 if (!qdesc) { 6620 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6621 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6622 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6623
6624 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
6625 phba->cfg_fcp_io_channel), GFP_KERNEL);
6626 if (!phba->sli4_hba.hba_eq) {
6627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6628 "2576 Failed allocate memory for "
6629 "fast-path EQ record array\n");
6630 goto out_error;
6631 }
6632
6633 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6634 phba->cfg_fcp_io_channel), GFP_KERNEL);
6635 if (!phba->sli4_hba.fcp_cq) {
6654 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6655 "0496 Failed allocate slow-path EQ\n"); 6637 "2577 Failed allocate memory for fast-path "
6638 "CQ record array\n");
6639 goto out_error;
6640 }
6641
6642 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6643 phba->cfg_fcp_io_channel), GFP_KERNEL);
6644 if (!phba->sli4_hba.fcp_wq) {
6645 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6646 "2578 Failed allocate memory for fast-path "
6647 "WQ record array\n");
6656 goto out_error; 6648 goto out_error;
6657 } 6649 }
6658 phba->sli4_hba.sp_eq = qdesc;
6659 6650
6660 /* 6651 /*
6661 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be 6652 * Since the first EQ can have multiple CQs associated with it,
6662 * zero whenever there is exactly one interrupt vector. This is not 6653 * this array is used to quickly see if we have a FCP fast-path
6663 * an error. 6654 * CQ match.
6664 */ 6655 */
6665 if (phba->cfg_fcp_eq_count) { 6656 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6666 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6657 phba->cfg_fcp_io_channel), GFP_KERNEL);
6667 phba->cfg_fcp_eq_count), GFP_KERNEL); 6658 if (!phba->sli4_hba.fcp_cq_map) {
6668 if (!phba->sli4_hba.fp_eq) { 6659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6660 "2545 Failed allocate memory for fast-path "
6670 "2576 Failed allocate memory for " 6661 "CQ map\n");
6671 "fast-path EQ record array\n"); 6662 goto out_error;
6672 goto out_free_sp_eq;
6673 }
6674 } 6663 }
6675 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6664
6665 /*
6666 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
6667 * how many EQs to create.
6668 */
6669 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6670
6671 /* Create EQs */
6676 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6672 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6677 phba->sli4_hba.eq_ecount); 6673 phba->sli4_hba.eq_ecount);
6678 if (!qdesc) { 6674 if (!qdesc) {
6679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6680 "0497 Failed allocate fast-path EQ\n"); 6676 "0497 Failed allocate EQ (%d)\n", idx);
6681 goto out_free_fp_eq; 6677 goto out_error;
6682 } 6678 }
6683 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6679 phba->sli4_hba.hba_eq[idx] = qdesc;
6680
6681 /* Create Fast Path FCP CQs */
6682 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6683 phba->sli4_hba.cq_ecount);
6684 if (!qdesc) {
6685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6686 "0499 Failed allocate fast-path FCP "
6687 "CQ (%d)\n", idx);
6688 goto out_error;
6689 }
6690 phba->sli4_hba.fcp_cq[idx] = qdesc;
6691
6692 /* Create Fast Path FCP WQs */
6693 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6694 phba->sli4_hba.wq_ecount);
6695 if (!qdesc) {
6696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6697 "0503 Failed allocate fast-path FCP "
6698 "WQ (%d)\n", idx);
6699 goto out_error;
6700 }
6701 phba->sli4_hba.fcp_wq[idx] = qdesc;
6684 } 6702 }
6685 6703
6704
6686 /* 6705 /*
6687 * Create Complete Queues (CQs) 6706 * Create Slow Path Completion Queues (CQs)
6688 */ 6707 */
6689 6708
6690 /* Create slow-path Mailbox Command Complete Queue */ 6709 /* Create slow-path Mailbox Command Complete Queue */
@@ -6693,7 +6712,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6693 if (!qdesc) { 6712 if (!qdesc) {
6694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6695 "0500 Failed allocate slow-path mailbox CQ\n"); 6714 "0500 Failed allocate slow-path mailbox CQ\n");
6696 goto out_free_fp_eq; 6715 goto out_error;
6697 } 6716 }
6698 phba->sli4_hba.mbx_cq = qdesc; 6717 phba->sli4_hba.mbx_cq = qdesc;
6699 6718
@@ -6703,59 +6722,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6703 if (!qdesc) { 6722 if (!qdesc) {
6704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6705 "0501 Failed allocate slow-path ELS CQ\n"); 6724 "0501 Failed allocate slow-path ELS CQ\n");
6706 goto out_free_mbx_cq; 6725 goto out_error;
6707 } 6726 }
6708 phba->sli4_hba.els_cq = qdesc; 6727 phba->sli4_hba.els_cq = qdesc;
6709 6728
6710 6729
6711 /* 6730 /*
6712 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. 6731 * Create Slow Path Work Queues (WQs)
6713 * If there are no FCP EQs then create exactly one FCP CQ.
6714 */ 6732 */
6715 if (phba->cfg_fcp_eq_count)
6716 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6717 phba->cfg_fcp_eq_count),
6718 GFP_KERNEL);
6719 else
6720 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6721 GFP_KERNEL);
6722 if (!phba->sli4_hba.fcp_cq) {
6723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6724 "2577 Failed allocate memory for fast-path "
6725 "CQ record array\n");
6726 goto out_free_els_cq;
6727 }
6728 fcp_cqidx = 0;
6729 do {
6730 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6731 phba->sli4_hba.cq_ecount);
6732 if (!qdesc) {
6733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6734 "0499 Failed allocate fast-path FCP "
6735 "CQ (%d)\n", fcp_cqidx);
6736 goto out_free_fcp_cq;
6737 }
6738 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6739 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6740 6733
6741 /* Create Mailbox Command Queue */ 6734 /* Create Mailbox Command Queue */
6742 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6743 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6744 6735
6745 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6736 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6746 phba->sli4_hba.mq_ecount); 6737 phba->sli4_hba.mq_ecount);
6747 if (!qdesc) { 6738 if (!qdesc) {
6748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6749 "0505 Failed allocate slow-path MQ\n"); 6740 "0505 Failed allocate slow-path MQ\n");
6750 goto out_free_fcp_cq; 6741 goto out_error;
6751 } 6742 }
6752 phba->sli4_hba.mbx_wq = qdesc; 6743 phba->sli4_hba.mbx_wq = qdesc;
6753 6744
6754 /* 6745 /*
6755 * Create all the Work Queues (WQs) 6746 * Create ELS Work Queues
6756 */ 6747 */
6757 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6758 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6759 6748
6760 /* Create slow-path ELS Work Queue */ 6749 /* Create slow-path ELS Work Queue */
6761 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6750 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
@@ -6763,36 +6752,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6763 if (!qdesc) { 6752 if (!qdesc) {
6764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6765 "0504 Failed allocate slow-path ELS WQ\n"); 6754 "0504 Failed allocate slow-path ELS WQ\n");
6766 goto out_free_mbx_wq; 6755 goto out_error;
6767 } 6756 }
6768 phba->sli4_hba.els_wq = qdesc; 6757 phba->sli4_hba.els_wq = qdesc;
6769 6758
6770 /* Create fast-path FCP Work Queue(s) */
6771 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6772 phba->cfg_fcp_wq_count), GFP_KERNEL);
6773 if (!phba->sli4_hba.fcp_wq) {
6774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6775 "2578 Failed allocate memory for fast-path "
6776 "WQ record array\n");
6777 goto out_free_els_wq;
6778 }
6779 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6780 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6781 phba->sli4_hba.wq_ecount);
6782 if (!qdesc) {
6783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6784 "0503 Failed allocate fast-path FCP "
6785 "WQ (%d)\n", fcp_wqidx);
6786 goto out_free_fcp_wq;
6787 }
6788 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6789 }
6790
6791 /* 6759 /*
6792 * Create Receive Queue (RQ) 6760 * Create Receive Queue (RQ)
6793 */ 6761 */
6794 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6795 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6796 6762
6797 /* Create Receive Queue for header */ 6763 /* Create Receive Queue for header */
6798 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6764 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
@@ -6800,7 +6766,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6800 if (!qdesc) { 6766 if (!qdesc) {
6801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6802 "0506 Failed allocate receive HRQ\n"); 6768 "0506 Failed allocate receive HRQ\n");
6803 goto out_free_fcp_wq; 6769 goto out_error;
6804 } 6770 }
6805 phba->sli4_hba.hdr_rq = qdesc; 6771 phba->sli4_hba.hdr_rq = qdesc;
6806 6772
@@ -6810,52 +6776,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6810 if (!qdesc) { 6776 if (!qdesc) {
6811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6812 "0507 Failed allocate receive DRQ\n"); 6778 "0507 Failed allocate receive DRQ\n");
6813 goto out_free_hdr_rq; 6779 goto out_error;
6814 } 6780 }
6815 phba->sli4_hba.dat_rq = qdesc; 6781 phba->sli4_hba.dat_rq = qdesc;
6816 6782
6817 return 0; 6783 return 0;
6818 6784
6819out_free_hdr_rq:
6820 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6821 phba->sli4_hba.hdr_rq = NULL;
6822out_free_fcp_wq:
6823 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6824 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6825 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6826 }
6827 kfree(phba->sli4_hba.fcp_wq);
6828 phba->sli4_hba.fcp_wq = NULL;
6829out_free_els_wq:
6830 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6831 phba->sli4_hba.els_wq = NULL;
6832out_free_mbx_wq:
6833 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6834 phba->sli4_hba.mbx_wq = NULL;
6835out_free_fcp_cq:
6836 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6837 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6838 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6839 }
6840 kfree(phba->sli4_hba.fcp_cq);
6841 phba->sli4_hba.fcp_cq = NULL;
6842out_free_els_cq:
6843 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6844 phba->sli4_hba.els_cq = NULL;
6845out_free_mbx_cq:
6846 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6847 phba->sli4_hba.mbx_cq = NULL;
6848out_free_fp_eq:
6849 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6850 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6851 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6852 }
6853 kfree(phba->sli4_hba.fp_eq);
6854 phba->sli4_hba.fp_eq = NULL;
6855out_free_sp_eq:
6856 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6857 phba->sli4_hba.sp_eq = NULL;
6858out_error: 6785out_error:
6786 lpfc_sli4_queue_destroy(phba);
6859 return -ENOMEM; 6787 return -ENOMEM;
6860} 6788}
6861 6789
@@ -6874,58 +6802,86 @@ out_error:
6874void 6802void
6875lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6803lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6876{ 6804{
6877 int fcp_qidx; 6805 int idx;
6806
6807 if (phba->sli4_hba.hba_eq != NULL) {
6808 /* Release HBA event queue */
6809 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6810 if (phba->sli4_hba.hba_eq[idx] != NULL) {
6811 lpfc_sli4_queue_free(
6812 phba->sli4_hba.hba_eq[idx]);
6813 phba->sli4_hba.hba_eq[idx] = NULL;
6814 }
6815 }
6816 kfree(phba->sli4_hba.hba_eq);
6817 phba->sli4_hba.hba_eq = NULL;
6818 }
6819
6820 if (phba->sli4_hba.fcp_cq != NULL) {
6821 /* Release FCP completion queue */
6822 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6823 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
6824 lpfc_sli4_queue_free(
6825 phba->sli4_hba.fcp_cq[idx]);
6826 phba->sli4_hba.fcp_cq[idx] = NULL;
6827 }
6828 }
6829 kfree(phba->sli4_hba.fcp_cq);
6830 phba->sli4_hba.fcp_cq = NULL;
6831 }
6832
6833 if (phba->sli4_hba.fcp_wq != NULL) {
6834 /* Release FCP work queue */
6835 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6836 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
6837 lpfc_sli4_queue_free(
6838 phba->sli4_hba.fcp_wq[idx]);
6839 phba->sli4_hba.fcp_wq[idx] = NULL;
6840 }
6841 }
6842 kfree(phba->sli4_hba.fcp_wq);
6843 phba->sli4_hba.fcp_wq = NULL;
6844 }
6845
6846 /* Release FCP CQ mapping array */
6847 if (phba->sli4_hba.fcp_cq_map != NULL) {
6848 kfree(phba->sli4_hba.fcp_cq_map);
6849 phba->sli4_hba.fcp_cq_map = NULL;
6850 }
6878 6851
6879 /* Release mailbox command work queue */ 6852 /* Release mailbox command work queue */
6880 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6853 if (phba->sli4_hba.mbx_wq != NULL) {
6881 phba->sli4_hba.mbx_wq = NULL; 6854 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6855 phba->sli4_hba.mbx_wq = NULL;
6856 }
6882 6857
6883 /* Release ELS work queue */ 6858 /* Release ELS work queue */
6884 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6859 if (phba->sli4_hba.els_wq != NULL) {
6885 phba->sli4_hba.els_wq = NULL; 6860 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6886 6861 phba->sli4_hba.els_wq = NULL;
6887 /* Release FCP work queue */ 6862 }
6888 if (phba->sli4_hba.fcp_wq != NULL)
6889 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6890 fcp_qidx++)
6891 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6892 kfree(phba->sli4_hba.fcp_wq);
6893 phba->sli4_hba.fcp_wq = NULL;
6894 6863
6895 /* Release unsolicited receive queue */ 6864 /* Release unsolicited receive queue */
6896 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6865 if (phba->sli4_hba.hdr_rq != NULL) {
6897 phba->sli4_hba.hdr_rq = NULL; 6866 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6898 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6867 phba->sli4_hba.hdr_rq = NULL;
6899 phba->sli4_hba.dat_rq = NULL; 6868 }
6869 if (phba->sli4_hba.dat_rq != NULL) {
6870 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6871 phba->sli4_hba.dat_rq = NULL;
6872 }
6900 6873
6901 /* Release ELS complete queue */ 6874 /* Release ELS complete queue */
6902 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6875 if (phba->sli4_hba.els_cq != NULL) {
6903 phba->sli4_hba.els_cq = NULL; 6876 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6877 phba->sli4_hba.els_cq = NULL;
6878 }
6904 6879
6905 /* Release mailbox command complete queue */ 6880 /* Release mailbox command complete queue */
6906 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6881 if (phba->sli4_hba.mbx_cq != NULL) {
6907 phba->sli4_hba.mbx_cq = NULL; 6882 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6908 6883 phba->sli4_hba.mbx_cq = NULL;
6909 /* Release FCP response complete queue */ 6884 }
6910 fcp_qidx = 0;
6911 if (phba->sli4_hba.fcp_cq != NULL)
6912 do
6913 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6914 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6915 kfree(phba->sli4_hba.fcp_cq);
6916 phba->sli4_hba.fcp_cq = NULL;
6917
6918 /* Release fast-path event queue */
6919 if (phba->sli4_hba.fp_eq != NULL)
6920 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6921 fcp_qidx++)
6922 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6923 kfree(phba->sli4_hba.fp_eq);
6924 phba->sli4_hba.fp_eq = NULL;
6925
6926 /* Release slow-path event queue */
6927 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6928 phba->sli4_hba.sp_eq = NULL;
6929 6885
6930 return; 6886 return;
6931} 6887}
@@ -6952,56 +6908,117 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6952 int fcp_cq_index = 0; 6908 int fcp_cq_index = 0;
6953 6909
6954 /* 6910 /*
6955 * Set up Event Queues (EQs) 6911 * Set up HBA Event Queues (EQs)
6956 */ 6912 */
6957 6913
6958 /* Set up slow-path event queue */ 6914 /* Set up HBA event queue */
6959 if (!phba->sli4_hba.sp_eq) { 6915 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
6960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6961 "0520 Slow-path EQ not allocated\n");
6962 goto out_error;
6963 }
6964 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6965 LPFC_SP_DEF_IMAX);
6966 if (rc) {
6967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6968 "0521 Failed setup of slow-path EQ: "
6969 "rc = 0x%x\n", rc);
6970 goto out_error;
6971 }
6972 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6973 "2583 Slow-path EQ setup: queue-id=%d\n",
6974 phba->sli4_hba.sp_eq->queue_id);
6975
6976 /* Set up fast-path event queue */
6977 if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
6978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6979 "3147 Fast-path EQs not allocated\n"); 6917 "3147 Fast-path EQs not allocated\n");
6980 rc = -ENOMEM; 6918 rc = -ENOMEM;
6981 goto out_destroy_sp_eq; 6919 goto out_error;
6982 } 6920 }
6983 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6921 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
6984 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6922 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
6985 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6986 "0522 Fast-path EQ (%d) not " 6924 "0522 Fast-path EQ (%d) not "
6987 "allocated\n", fcp_eqidx); 6925 "allocated\n", fcp_eqidx);
6988 rc = -ENOMEM; 6926 rc = -ENOMEM;
6989 goto out_destroy_fp_eq; 6927 goto out_destroy_hba_eq;
6990 } 6928 }
6991 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6929 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
6992 phba->cfg_fcp_imax); 6930 phba->cfg_fcp_imax);
6993 if (rc) { 6931 if (rc) {
6994 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6995 "0523 Failed setup of fast-path EQ " 6933 "0523 Failed setup of fast-path EQ "
6996 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6934 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6997 goto out_destroy_fp_eq; 6935 goto out_destroy_hba_eq;
6998 } 6936 }
6999 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7000 "2584 Fast-path EQ setup: " 6938 "2584 HBA EQ setup: "
7001 "queue[%d]-id=%d\n", fcp_eqidx, 6939 "queue[%d]-id=%d\n", fcp_eqidx,
7002 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6940 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
7003 } 6941 }
7004 6942
6943 /* Set up fast-path FCP Response Complete Queue */
6944 if (!phba->sli4_hba.fcp_cq) {
6945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6946 "3148 Fast-path FCP CQ array not "
6947 "allocated\n");
6948 rc = -ENOMEM;
6949 goto out_destroy_hba_eq;
6950 }
6951
6952 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
6953 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6955 "0526 Fast-path FCP CQ (%d) not "
6956 "allocated\n", fcp_cqidx);
6957 rc = -ENOMEM;
6958 goto out_destroy_fcp_cq;
6959 }
6960 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6961 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
6962 if (rc) {
6963 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6964 "0527 Failed setup of fast-path FCP "
6965 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6966 goto out_destroy_fcp_cq;
6967 }
6968
6969 /* Setup fcp_cq_map for fast lookup */
6970 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
6971 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
6972
6973 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6974 "2588 FCP CQ setup: cq[%d]-id=%d, "
6975 "parent seq[%d]-id=%d\n",
6976 fcp_cqidx,
6977 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6978 fcp_cqidx,
6979 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
6980 }
6981
6982 /* Set up fast-path FCP Work Queue */
6983 if (!phba->sli4_hba.fcp_wq) {
6984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6985 "3149 Fast-path FCP WQ array not "
6986 "allocated\n");
6987 rc = -ENOMEM;
6988 goto out_destroy_fcp_cq;
6989 }
6990
6991 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
6992 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6994 "0534 Fast-path FCP WQ (%d) not "
6995 "allocated\n", fcp_wqidx);
6996 rc = -ENOMEM;
6997 goto out_destroy_fcp_wq;
6998 }
6999 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7000 phba->sli4_hba.fcp_cq[fcp_wqidx],
7001 LPFC_FCP);
7002 if (rc) {
7003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7004 "0535 Failed setup of fast-path FCP "
7005 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7006 goto out_destroy_fcp_wq;
7007 }
7008
7009 /* Bind this WQ to the next FCP ring */
7010 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7011 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7012 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7013
7014 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7015 "2591 FCP WQ setup: wq[%d]-id=%d, "
7016 "parent cq[%d]-id=%d\n",
7017 fcp_wqidx,
7018 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7019 fcp_cq_index,
7020 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7021 }
7005 /* 7022 /*
7006 * Set up Complete Queues (CQs) 7023 * Set up Complete Queues (CQs)
7007 */ 7024 */
@@ -7011,20 +7028,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7012 "0528 Mailbox CQ not allocated\n"); 7029 "0528 Mailbox CQ not allocated\n");
7013 rc = -ENOMEM; 7030 rc = -ENOMEM;
7014 goto out_destroy_fp_eq; 7031 goto out_destroy_fcp_wq;
7015 } 7032 }
7016 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 7033 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7017 LPFC_MCQ, LPFC_MBOX); 7034 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
7018 if (rc) { 7035 if (rc) {
7019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7020 "0529 Failed setup of slow-path mailbox CQ: " 7037 "0529 Failed setup of slow-path mailbox CQ: "
7021 "rc = 0x%x\n", rc); 7038 "rc = 0x%x\n", rc);
7022 goto out_destroy_fp_eq; 7039 goto out_destroy_fcp_wq;
7023 } 7040 }
7024 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7041 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7025 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7042 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7026 phba->sli4_hba.mbx_cq->queue_id, 7043 phba->sli4_hba.mbx_cq->queue_id,
7027 phba->sli4_hba.sp_eq->queue_id); 7044 phba->sli4_hba.hba_eq[0]->queue_id);
7028 7045
7029 /* Set up slow-path ELS Complete Queue */ 7046 /* Set up slow-path ELS Complete Queue */
7030 if (!phba->sli4_hba.els_cq) { 7047 if (!phba->sli4_hba.els_cq) {
@@ -7033,8 +7050,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7033 rc = -ENOMEM; 7050 rc = -ENOMEM;
7034 goto out_destroy_mbx_cq; 7051 goto out_destroy_mbx_cq;
7035 } 7052 }
7036 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 7053 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7037 LPFC_WCQ, LPFC_ELS); 7054 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7038 if (rc) { 7055 if (rc) {
7039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7040 "0531 Failed setup of slow-path ELS CQ: " 7057 "0531 Failed setup of slow-path ELS CQ: "
@@ -7044,52 +7061,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7044 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7061 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7045 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7062 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7046 phba->sli4_hba.els_cq->queue_id, 7063 phba->sli4_hba.els_cq->queue_id,
7047 phba->sli4_hba.sp_eq->queue_id); 7064 phba->sli4_hba.hba_eq[0]->queue_id);
7048
7049 /* Set up fast-path FCP Response Complete Queue */
7050 if (!phba->sli4_hba.fcp_cq) {
7051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7052 "3148 Fast-path FCP CQ array not "
7053 "allocated\n");
7054 rc = -ENOMEM;
7055 goto out_destroy_els_cq;
7056 }
7057 fcp_cqidx = 0;
7058 do {
7059 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7061 "0526 Fast-path FCP CQ (%d) not "
7062 "allocated\n", fcp_cqidx);
7063 rc = -ENOMEM;
7064 goto out_destroy_fcp_cq;
7065 }
7066 if (phba->cfg_fcp_eq_count)
7067 rc = lpfc_cq_create(phba,
7068 phba->sli4_hba.fcp_cq[fcp_cqidx],
7069 phba->sli4_hba.fp_eq[fcp_cqidx],
7070 LPFC_WCQ, LPFC_FCP);
7071 else
7072 rc = lpfc_cq_create(phba,
7073 phba->sli4_hba.fcp_cq[fcp_cqidx],
7074 phba->sli4_hba.sp_eq,
7075 LPFC_WCQ, LPFC_FCP);
7076 if (rc) {
7077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7078 "0527 Failed setup of fast-path FCP "
7079 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7080 goto out_destroy_fcp_cq;
7081 }
7082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7083 "2588 FCP CQ setup: cq[%d]-id=%d, "
7084 "parent %seq[%d]-id=%d\n",
7085 fcp_cqidx,
7086 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7087 (phba->cfg_fcp_eq_count) ? "" : "sp_",
7088 fcp_cqidx,
7089 (phba->cfg_fcp_eq_count) ?
7090 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
7091 phba->sli4_hba.sp_eq->queue_id);
7092 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
7093 7065
7094 /* 7066 /*
7095 * Set up all the Work Queues (WQs) 7067 * Set up all the Work Queues (WQs)
@@ -7100,7 +7072,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7100 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7101 "0538 Slow-path MQ not allocated\n"); 7073 "0538 Slow-path MQ not allocated\n");
7102 rc = -ENOMEM; 7074 rc = -ENOMEM;
7103 goto out_destroy_fcp_cq; 7075 goto out_destroy_els_cq;
7104 } 7076 }
7105 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7077 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7106 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7078 phba->sli4_hba.mbx_cq, LPFC_MBOX);
@@ -7108,7 +7080,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7109 "0539 Failed setup of slow-path MQ: " 7081 "0539 Failed setup of slow-path MQ: "
7110 "rc = 0x%x\n", rc); 7082 "rc = 0x%x\n", rc);
7111 goto out_destroy_fcp_cq; 7083 goto out_destroy_els_cq;
7112 } 7084 }
7113 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7114 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7086 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
@@ -7141,50 +7113,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7141 phba->sli4_hba.els_wq->queue_id, 7113 phba->sli4_hba.els_wq->queue_id,
7142 phba->sli4_hba.els_cq->queue_id); 7114 phba->sli4_hba.els_cq->queue_id);
7143 7115
7144 /* Set up fast-path FCP Work Queue */
7145 if (!phba->sli4_hba.fcp_wq) {
7146 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7147 "3149 Fast-path FCP WQ array not "
7148 "allocated\n");
7149 rc = -ENOMEM;
7150 goto out_destroy_els_wq;
7151 }
7152 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
7153 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7155 "0534 Fast-path FCP WQ (%d) not "
7156 "allocated\n", fcp_wqidx);
7157 rc = -ENOMEM;
7158 goto out_destroy_fcp_wq;
7159 }
7160 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7161 phba->sli4_hba.fcp_cq[fcp_cq_index],
7162 LPFC_FCP);
7163 if (rc) {
7164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7165 "0535 Failed setup of fast-path FCP "
7166 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7167 goto out_destroy_fcp_wq;
7168 }
7169
7170 /* Bind this WQ to the next FCP ring */
7171 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7172 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7173 phba->sli4_hba.fcp_cq[fcp_cq_index]->pring = pring;
7174
7175 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7176 "2591 FCP WQ setup: wq[%d]-id=%d, "
7177 "parent cq[%d]-id=%d\n",
7178 fcp_wqidx,
7179 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7180 fcp_cq_index,
7181 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
7182 /* Round robin FCP Work Queue's Completion Queue assignment */
7183 if (phba->cfg_fcp_eq_count)
7184 fcp_cq_index = ((fcp_cq_index + 1) %
7185 phba->cfg_fcp_eq_count);
7186 }
7187
7188 /* 7116 /*
7189 * Create Receive Queue (RQ) 7117 * Create Receive Queue (RQ)
7190 */ 7118 */
@@ -7192,7 +7120,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7193 "0540 Receive Queue not allocated\n"); 7121 "0540 Receive Queue not allocated\n");
7194 rc = -ENOMEM; 7122 rc = -ENOMEM;
7195 goto out_destroy_fcp_wq; 7123 goto out_destroy_els_wq;
7196 } 7124 }
7197 7125
7198 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7126 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
@@ -7215,25 +7143,23 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7215 phba->sli4_hba.els_cq->queue_id); 7143 phba->sli4_hba.els_cq->queue_id);
7216 return 0; 7144 return 0;
7217 7145
7218out_destroy_fcp_wq:
7219 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7220 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7221out_destroy_els_wq: 7146out_destroy_els_wq:
7222 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7147 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7223out_destroy_mbx_wq: 7148out_destroy_mbx_wq:
7224 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7149 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7225out_destroy_fcp_cq:
7226 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7227 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7228out_destroy_els_cq: 7150out_destroy_els_cq:
7229 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7151 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7230out_destroy_mbx_cq: 7152out_destroy_mbx_cq:
7231 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7153 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7232out_destroy_fp_eq: 7154out_destroy_fcp_wq:
7155 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7156 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7157out_destroy_fcp_cq:
7158 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7159 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7160out_destroy_hba_eq:
7233 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7161 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7234 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 7162 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7235out_destroy_sp_eq:
7236 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7237out_error: 7163out_error:
7238 return rc; 7164 return rc;
7239} 7165}
@@ -7262,27 +7188,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7262 /* Unset unsolicited receive queue */ 7188 /* Unset unsolicited receive queue */
7263 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7189 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7264 /* Unset FCP work queue */ 7190 /* Unset FCP work queue */
7265 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 7191 if (phba->sli4_hba.fcp_wq) {
7266 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7192 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7193 fcp_qidx++)
7194 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7195 }
7267 /* Unset mailbox command complete queue */ 7196 /* Unset mailbox command complete queue */
7268 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7197 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7269 /* Unset ELS complete queue */ 7198 /* Unset ELS complete queue */
7270 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7199 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7271 /* Unset FCP response complete queue */ 7200 /* Unset FCP response complete queue */
7272 if (phba->sli4_hba.fcp_cq) { 7201 if (phba->sli4_hba.fcp_cq) {
7273 fcp_qidx = 0; 7202 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7274 do { 7203 fcp_qidx++)
7275 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7204 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7276 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
7277 } 7205 }
7278 /* Unset fast-path event queue */ 7206 /* Unset fast-path event queue */
7279 if (phba->sli4_hba.fp_eq) { 7207 if (phba->sli4_hba.hba_eq) {
7280 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 7208 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7281 fcp_qidx++) 7209 fcp_qidx++)
7282 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 7210 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7283 } 7211 }
7284 /* Unset slow-path event queue */
7285 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7286} 7212}
7287 7213
7288/** 7214/**
@@ -8174,33 +8100,17 @@ enable_msix_vectors:
8174 "message=%d\n", index, 8100 "message=%d\n", index,
8175 phba->sli4_hba.msix_entries[index].vector, 8101 phba->sli4_hba.msix_entries[index].vector,
8176 phba->sli4_hba.msix_entries[index].entry); 8102 phba->sli4_hba.msix_entries[index].entry);
8103
8177 /* 8104 /*
8178 * Assign MSI-X vectors to interrupt handlers 8105 * Assign MSI-X vectors to interrupt handlers
8179 */ 8106 */
8180 if (vectors > 1) 8107 for (index = 0; index < vectors; index++) {
8181 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 8108 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8182 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 8109 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8183 LPFC_SP_DRIVER_HANDLER_NAME, phba);
8184 else
8185 /* All Interrupts need to be handled by one EQ */
8186 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
8187 &lpfc_sli4_intr_handler, IRQF_SHARED,
8188 LPFC_DRIVER_NAME, phba);
8189 if (rc) {
8190 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8191 "0485 MSI-X slow-path request_irq failed "
8192 "(%d)\n", rc);
8193 goto msi_fail_out;
8194 }
8195
8196 /* The rest of the vector(s) are associated to fast-path handler(s) */
8197 for (index = 1; index < vectors; index++) {
8198 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
8199 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
8200 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 8110 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
8201 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 8111 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8202 LPFC_FP_DRIVER_HANDLER_NAME, 8112 LPFC_FP_DRIVER_HANDLER_NAME,
8203 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8113 &phba->sli4_hba.fcp_eq_hdl[index]);
8204 if (rc) { 8114 if (rc) {
8205 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8206 "0486 MSI-X fast-path (%d) " 8116 "0486 MSI-X fast-path (%d) "
@@ -8214,12 +8124,9 @@ enable_msix_vectors:
8214 8124
8215cfg_fail_out: 8125cfg_fail_out:
8216 /* free the irq already requested */ 8126 /* free the irq already requested */
8217 for (--index; index >= 1; index--) 8127 for (--index; index >= 0; index--)
8218 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 8128 free_irq(phba->sli4_hba.msix_entries[index].vector,
8219 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8129 &phba->sli4_hba.fcp_eq_hdl[index]);
8220
8221 /* free the irq already requested */
8222 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
8223 8130
8224msi_fail_out: 8131msi_fail_out:
8225 /* Unconfigure MSI-X capability structure */ 8132 /* Unconfigure MSI-X capability structure */
@@ -8240,11 +8147,9 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8240 int index; 8147 int index;
8241 8148
8242 /* Free up MSI-X multi-message vectors */ 8149 /* Free up MSI-X multi-message vectors */
8243 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 8150 for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++)
8244
8245 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
8246 free_irq(phba->sli4_hba.msix_entries[index].vector, 8151 free_irq(phba->sli4_hba.msix_entries[index].vector,
8247 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8152 &phba->sli4_hba.fcp_eq_hdl[index]);
8248 8153
8249 /* Disable MSI-X */ 8154 /* Disable MSI-X */
8250 pci_disable_msix(phba->pcidev); 8155 pci_disable_msix(phba->pcidev);
@@ -8290,7 +8195,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8290 return rc; 8195 return rc;
8291 } 8196 }
8292 8197
8293 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 8198 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8294 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8199 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8295 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8200 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8296 } 8201 }
@@ -8370,7 +8275,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8370 /* Indicate initialization to INTx mode */ 8275 /* Indicate initialization to INTx mode */
8371 phba->intr_type = INTx; 8276 phba->intr_type = INTx;
8372 intr_mode = 0; 8277 intr_mode = 0;
8373 for (index = 0; index < phba->cfg_fcp_eq_count; 8278 for (index = 0; index < phba->cfg_fcp_io_channel;
8374 index++) { 8279 index++) {
8375 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8280 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8376 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8281 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
@@ -9490,7 +9395,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9490 int error; 9395 int error;
9491 uint32_t cfg_mode, intr_mode; 9396 uint32_t cfg_mode, intr_mode;
9492 int mcnt; 9397 int mcnt;
9493 int adjusted_fcp_eq_count; 9398 int adjusted_fcp_io_channel;
9494 const struct firmware *fw; 9399 const struct firmware *fw;
9495 uint8_t file_name[16]; 9400 uint8_t file_name[16];
9496 9401
@@ -9593,13 +9498,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9593 } 9498 }
9594 /* Default to single EQ for non-MSI-X */ 9499 /* Default to single EQ for non-MSI-X */
9595 if (phba->intr_type != MSIX) 9500 if (phba->intr_type != MSIX)
9596 adjusted_fcp_eq_count = 0; 9501 adjusted_fcp_io_channel = 0;
9597 else if (phba->sli4_hba.msix_vec_nr < 9502 else if (phba->sli4_hba.msix_vec_nr <
9598 phba->cfg_fcp_eq_count + 1) 9503 phba->cfg_fcp_io_channel)
9599 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9504 adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
9600 else 9505 else
9601 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9506 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9602 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9507 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9603 /* Set up SLI-4 HBA */ 9508 /* Set up SLI-4 HBA */
9604 if (lpfc_sli4_hba_setup(phba)) { 9509 if (lpfc_sli4_hba_setup(phba)) {
9605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9735,6 +9640,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9735 * buffers are released to their corresponding pools here. 9640 * buffers are released to their corresponding pools here.
9736 */ 9641 */
9737 lpfc_scsi_free(phba); 9642 lpfc_scsi_free(phba);
9643
9738 lpfc_sli4_driver_resource_unset(phba); 9644 lpfc_sli4_driver_resource_unset(phba);
9739 9645
9740 /* Unmap adapter Control and Doorbell registers */ 9646 /* Unmap adapter Control and Doorbell registers */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 83a1c9e4902d..8cbbd815c030 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4921,16 +4921,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4921 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4921 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4922 fcp_eqidx = 0; 4922 fcp_eqidx = 0;
4923 if (phba->sli4_hba.fcp_cq) { 4923 if (phba->sli4_hba.fcp_cq) {
4924 do 4924 do {
4925 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4925 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4926 LPFC_QUEUE_REARM); 4926 LPFC_QUEUE_REARM);
4927 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4927 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4928 } 4928 }
4929 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4929 if (phba->sli4_hba.hba_eq) {
4930 if (phba->sli4_hba.fp_eq) { 4930 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4931 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
4932 fcp_eqidx++) 4931 fcp_eqidx++)
4933 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4932 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4934 LPFC_QUEUE_REARM); 4933 LPFC_QUEUE_REARM);
4935 } 4934 }
4936} 4935}
@@ -7818,7 +7817,7 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7818 int i; 7817 int i;
7819 7818
7820 i = atomic_add_return(1, &phba->fcp_qidx); 7819 i = atomic_add_return(1, &phba->fcp_qidx);
7821 i = (i % phba->cfg_fcp_wq_count); 7820 i = (i % phba->cfg_fcp_io_channel);
7822 return i; 7821 return i;
7823} 7822}
7824 7823
@@ -8727,7 +8726,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8727 8726
8728 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 8727 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8729 if (phba->sli_rev == LPFC_SLI_REV4) 8728 if (phba->sli_rev == LPFC_SLI_REV4)
8730 psli->num_rings += phba->cfg_fcp_eq_count; 8729 psli->num_rings += phba->cfg_fcp_io_channel;
8731 psli->sli_flag = 0; 8730 psli->sli_flag = 0;
8732 psli->fcp_ring = LPFC_FCP_RING; 8731 psli->fcp_ring = LPFC_FCP_RING;
8733 psli->next_ring = LPFC_FCP_NEXT_RING; 8732 psli->next_ring = LPFC_FCP_NEXT_RING;
@@ -11468,31 +11467,18 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11468 * 11467 *
11469 **/ 11468 **/
11470static void 11469static void
11471lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 11470lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11471 struct lpfc_queue *speq)
11472{ 11472{
11473 struct lpfc_queue *cq = NULL, *childq, *speq; 11473 struct lpfc_queue *cq = NULL, *childq;
11474 struct lpfc_cqe *cqe; 11474 struct lpfc_cqe *cqe;
11475 bool workposted = false; 11475 bool workposted = false;
11476 int ecount = 0; 11476 int ecount = 0;
11477 uint16_t cqid; 11477 uint16_t cqid;
11478 11478
11479 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
11480 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11481 "0359 Not a valid slow-path completion "
11482 "event: majorcode=x%x, minorcode=x%x\n",
11483 bf_get_le32(lpfc_eqe_major_code, eqe),
11484 bf_get_le32(lpfc_eqe_minor_code, eqe));
11485 return;
11486 }
11487
11488 /* Get the reference to the corresponding CQ */ 11479 /* Get the reference to the corresponding CQ */
11489 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11480 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11490 11481
11491 /* Search for completion queue pointer matching this cqid */
11492 speq = phba->sli4_hba.sp_eq;
11493 /* sanity check on queue memory */
11494 if (unlikely(!speq))
11495 return;
11496 list_for_each_entry(childq, &speq->child_list, list) { 11482 list_for_each_entry(childq, &speq->child_list, list) {
11497 if (childq->queue_id == cqid) { 11483 if (childq->queue_id == cqid) {
11498 cq = childq; 11484 cq = childq;
@@ -11711,7 +11697,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11711} 11697}
11712 11698
11713/** 11699/**
11714 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 11700 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
11715 * @phba: Pointer to HBA context object. 11701 * @phba: Pointer to HBA context object.
11716 * @eqe: Pointer to fast-path event queue entry. 11702 * @eqe: Pointer to fast-path event queue entry.
11717 * 11703 *
@@ -11723,8 +11709,8 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11723 * completion queue, and then return. 11709 * completion queue, and then return.
11724 **/ 11710 **/
11725static void 11711static void
11726lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11712lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11727 uint32_t fcp_cqidx) 11713 uint32_t qidx)
11728{ 11714{
11729 struct lpfc_queue *cq; 11715 struct lpfc_queue *cq;
11730 struct lpfc_cqe *cqe; 11716 struct lpfc_cqe *cqe;
@@ -11734,30 +11720,38 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11734 11720
11735 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11721 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
11736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11737 "0366 Not a valid fast-path completion " 11723 "0366 Not a valid completion "
11738 "event: majorcode=x%x, minorcode=x%x\n", 11724 "event: majorcode=x%x, minorcode=x%x\n",
11739 bf_get_le32(lpfc_eqe_major_code, eqe), 11725 bf_get_le32(lpfc_eqe_major_code, eqe),
11740 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11726 bf_get_le32(lpfc_eqe_minor_code, eqe));
11741 return; 11727 return;
11742 } 11728 }
11743 11729
11730 /* Get the reference to the corresponding CQ */
11731 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11732
11733 /* Check if this is a Slow path event */
11734 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11735 lpfc_sli4_sp_handle_eqe(phba, eqe,
11736 phba->sli4_hba.hba_eq[qidx]);
11737 return;
11738 }
11739
11744 if (unlikely(!phba->sli4_hba.fcp_cq)) { 11740 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11745 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11741 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11746 "3146 Fast-path completion queues " 11742 "3146 Fast-path completion queues "
11747 "does not exist\n"); 11743 "does not exist\n");
11748 return; 11744 return;
11749 } 11745 }
11750 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11746 cq = phba->sli4_hba.fcp_cq[qidx];
11751 if (unlikely(!cq)) { 11747 if (unlikely(!cq)) {
11752 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11748 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11753 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11754 "0367 Fast-path completion queue " 11750 "0367 Fast-path completion queue "
11755 "(%d) does not exist\n", fcp_cqidx); 11751 "(%d) does not exist\n", qidx);
11756 return; 11752 return;
11757 } 11753 }
11758 11754
11759 /* Get the reference to the corresponding CQ */
11760 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11761 if (unlikely(cqid != cq->queue_id)) { 11755 if (unlikely(cqid != cq->queue_id)) {
11762 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11763 "0368 Miss-matched fast-path completion " 11757 "0368 Miss-matched fast-path completion "
@@ -11805,93 +11799,7 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11805} 11799}
11806 11800
11807/** 11801/**
11808 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11802 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
11809 * @irq: Interrupt number.
11810 * @dev_id: The device context pointer.
11811 *
11812 * This function is directly called from the PCI layer as an interrupt
11813 * service routine when device with SLI-4 interface spec is enabled with
11814 * MSI-X multi-message interrupt mode and there are slow-path events in
11815 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11816 * interrupt mode, this function is called as part of the device-level
11817 * interrupt handler. When the PCI slot is in error recovery or the HBA is
11818 * undergoing initialization, the interrupt handler will not process the
11819 * interrupt. The link attention and ELS ring attention events are handled
11820 * by the worker thread. The interrupt handler signals the worker thread
11821 * and returns for these events. This function is called without any lock
11822 * held. It gets the hbalock to access and update SLI data structures.
11823 *
11824 * This function returns IRQ_HANDLED when interrupt is handled else it
11825 * returns IRQ_NONE.
11826 **/
11827irqreturn_t
11828lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11829{
11830 struct lpfc_hba *phba;
11831 struct lpfc_queue *speq;
11832 struct lpfc_eqe *eqe;
11833 unsigned long iflag;
11834 int ecount = 0;
11835
11836 /*
11837 * Get the driver's phba structure from the dev_id
11838 */
11839 phba = (struct lpfc_hba *)dev_id;
11840
11841 if (unlikely(!phba))
11842 return IRQ_NONE;
11843
11844 /* Get to the EQ struct associated with this vector */
11845 speq = phba->sli4_hba.sp_eq;
11846 if (unlikely(!speq))
11847 return IRQ_NONE;
11848
11849 /* Check device state for handling interrupt */
11850 if (unlikely(lpfc_intr_state_check(phba))) {
11851 speq->EQ_badstate++;
11852 /* Check again for link_state with lock held */
11853 spin_lock_irqsave(&phba->hbalock, iflag);
11854 if (phba->link_state < LPFC_LINK_DOWN)
11855 /* Flush, clear interrupt, and rearm the EQ */
11856 lpfc_sli4_eq_flush(phba, speq);
11857 spin_unlock_irqrestore(&phba->hbalock, iflag);
11858 return IRQ_NONE;
11859 }
11860
11861 /*
11862 * Process all the event on FCP slow-path EQ
11863 */
11864 while ((eqe = lpfc_sli4_eq_get(speq))) {
11865 lpfc_sli4_sp_handle_eqe(phba, eqe);
11866 if (!(++ecount % speq->entry_repost))
11867 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11868 speq->EQ_processed++;
11869 }
11870
11871 /* Track the max number of EQEs processed in 1 intr */
11872 if (ecount > speq->EQ_max_eqe)
11873 speq->EQ_max_eqe = ecount;
11874
11875 /* Always clear and re-arm the slow-path EQ */
11876 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
11877
11878 /* Catch the no cq entry condition */
11879 if (unlikely(ecount == 0)) {
11880 speq->EQ_no_entry++;
11881 if (phba->intr_type == MSIX)
11882 /* MSI-X treated interrupt served as no EQ share INT */
11883 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11884 "0357 MSI-X interrupt with no EQE\n");
11885 else
11886 /* Non MSI-X treated on interrupt as EQ share INT */
11887 return IRQ_NONE;
11888 }
11889
11890 return IRQ_HANDLED;
11891} /* lpfc_sli4_sp_intr_handler */
11892
11893/**
11894 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
11895 * @irq: Interrupt number. 11803 * @irq: Interrupt number.
11896 * @dev_id: The device context pointer. 11804 * @dev_id: The device context pointer.
11897 * 11805 *
@@ -11908,11 +11816,16 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11908 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11816 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11909 * equal to that of FCP CQ index. 11817 * equal to that of FCP CQ index.
11910 * 11818 *
11819 * The link attention and ELS ring attention events are handled
11820 * by the worker thread. The interrupt handler signals the worker thread
11821 * and returns for these events. This function is called without any lock
11822 * held. It gets the hbalock to access and update SLI data structures.
11823 *
11911 * This function returns IRQ_HANDLED when interrupt is handled else it 11824 * This function returns IRQ_HANDLED when interrupt is handled else it
11912 * returns IRQ_NONE. 11825 * returns IRQ_NONE.
11913 **/ 11826 **/
11914irqreturn_t 11827irqreturn_t
11915lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11828lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11916{ 11829{
11917 struct lpfc_hba *phba; 11830 struct lpfc_hba *phba;
11918 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 11831 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
@@ -11929,11 +11842,11 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11929 11842
11930 if (unlikely(!phba)) 11843 if (unlikely(!phba))
11931 return IRQ_NONE; 11844 return IRQ_NONE;
11932 if (unlikely(!phba->sli4_hba.fp_eq)) 11845 if (unlikely(!phba->sli4_hba.hba_eq))
11933 return IRQ_NONE; 11846 return IRQ_NONE;
11934 11847
11935 /* Get to the EQ struct associated with this vector */ 11848 /* Get to the EQ struct associated with this vector */
11936 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11849 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
11937 if (unlikely(!fpeq)) 11850 if (unlikely(!fpeq))
11938 return IRQ_NONE; 11851 return IRQ_NONE;
11939 11852
@@ -11953,7 +11866,7 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11953 * Process all the event on FCP fast-path EQ 11866 * Process all the event on FCP fast-path EQ
11954 */ 11867 */
11955 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11868 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11956 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11869 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
11957 if (!(++ecount % fpeq->entry_repost)) 11870 if (!(++ecount % fpeq->entry_repost))
11958 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11871 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11959 fpeq->EQ_processed++; 11872 fpeq->EQ_processed++;
@@ -12001,8 +11914,8 @@ irqreturn_t
12001lpfc_sli4_intr_handler(int irq, void *dev_id) 11914lpfc_sli4_intr_handler(int irq, void *dev_id)
12002{ 11915{
12003 struct lpfc_hba *phba; 11916 struct lpfc_hba *phba;
12004 irqreturn_t sp_irq_rc, fp_irq_rc; 11917 irqreturn_t hba_irq_rc;
12005 bool fp_handled = false; 11918 bool hba_handled = false;
12006 uint32_t fcp_eqidx; 11919 uint32_t fcp_eqidx;
12007 11920
12008 /* Get the driver's phba structure from the dev_id */ 11921 /* Get the driver's phba structure from the dev_id */
@@ -12012,21 +11925,16 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
12012 return IRQ_NONE; 11925 return IRQ_NONE;
12013 11926
12014 /* 11927 /*
12015 * Invokes slow-path host attention interrupt handling as appropriate.
12016 */
12017 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
12018
12019 /*
12020 * Invoke fast-path host attention interrupt handling as appropriate. 11928 * Invoke fast-path host attention interrupt handling as appropriate.
12021 */ 11929 */
12022 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11930 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12023 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 11931 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
12024 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11932 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
12025 if (fp_irq_rc == IRQ_HANDLED) 11933 if (hba_irq_rc == IRQ_HANDLED)
12026 fp_handled |= true; 11934 hba_handled |= true;
12027 } 11935 }
12028 11936
12029 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 11937 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12030} /* lpfc_sli4_intr_handler */ 11938} /* lpfc_sli4_intr_handler */
12031 11939
12032/** 11940/**
@@ -12157,7 +12065,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12157 union lpfc_sli4_cfg_shdr *shdr; 12065 union lpfc_sli4_cfg_shdr *shdr;
12158 uint16_t dmult; 12066 uint16_t dmult;
12159 12067
12160 if (startq >= phba->cfg_fcp_eq_count) 12068 if (startq >= phba->cfg_fcp_io_channel)
12161 return 0; 12069 return 0;
12162 12070
12163 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12071 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -12174,9 +12082,9 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12174 dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1; 12082 dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1;
12175 12083
12176 cnt = 0; 12084 cnt = 0;
12177 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count; 12085 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
12178 fcp_eqidx++) { 12086 fcp_eqidx++) {
12179 eq = phba->sli4_hba.fp_eq[fcp_eqidx]; 12087 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
12180 if (!eq) 12088 if (!eq)
12181 continue; 12089 continue;
12182 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 12090 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index e7d84134beb1..b4829d1d9e83 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -34,18 +34,10 @@
34/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ 34/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
35#define LPFC_NEMBED_MBOX_SGL_CNT 254 35#define LPFC_NEMBED_MBOX_SGL_CNT 254
36 36
37/* Multi-queue arrangement for fast-path FCP work queues */ 37/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
38#define LPFC_FN_EQN_MAX 8 38#define LPFC_FCP_IO_CHAN_DEF 4
39#define LPFC_SP_EQN_DEF 1 39#define LPFC_FCP_IO_CHAN_MIN 1
40#define LPFC_FP_EQN_DEF 4 40#define LPFC_FCP_IO_CHAN_MAX 8
41#define LPFC_FP_EQN_MIN 1
42#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
43
44#define LPFC_FN_WQN_MAX 32
45#define LPFC_SP_WQN_DEF 1
46#define LPFC_FP_WQN_DEF 4
47#define LPFC_FP_WQN_MIN 1
48#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
49 41
50/* 42/*
51 * Provide the default FCF Record attributes used by the driver 43 * Provide the default FCF Record attributes used by the driver
@@ -497,17 +489,19 @@ struct lpfc_sli4_hba {
497 uint32_t cfg_eqn; 489 uint32_t cfg_eqn;
498 uint32_t msix_vec_nr; 490 uint32_t msix_vec_nr;
499 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 491 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
492
500 /* Pointers to the constructed SLI4 queues */ 493 /* Pointers to the constructed SLI4 queues */
501 struct lpfc_queue **fp_eq; /* Fast-path event queue */ 494 struct lpfc_queue **hba_eq;/* Event queues for HBA */
502 struct lpfc_queue *sp_eq; /* Slow-path event queue */ 495 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
503 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ 496 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
497 uint16_t *fcp_cq_map;
498
499 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
500 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
504 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ 501 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
505 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ 502 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
506 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ 503 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
507 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ 504 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
508 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
509 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
510 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
511 505
512 /* Setup information for various queue parameters */ 506 /* Setup information for various queue parameters */
513 int eq_esize; 507 int eq_esize;