diff options
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 7 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 96 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 303 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.h | 3 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 3 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 503 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvme.c | 18 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 28 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 148 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 64 |
11 files changed, 831 insertions, 343 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 0f8964fdfecf..9fd2811ffa8b 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -84,8 +84,6 @@ struct lpfc_sli2_slim; | |||
84 | #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ | 84 | #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ |
85 | #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ | 85 | #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ |
86 | 86 | ||
87 | #define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */ | ||
88 | |||
89 | /* Error Attention event polling interval */ | 87 | /* Error Attention event polling interval */ |
90 | #define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ | 88 | #define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ |
91 | 89 | ||
@@ -821,6 +819,7 @@ struct lpfc_hba { | |||
821 | uint32_t cfg_fcp_imax; | 819 | uint32_t cfg_fcp_imax; |
822 | uint32_t cfg_fcp_cpu_map; | 820 | uint32_t cfg_fcp_cpu_map; |
823 | uint32_t cfg_hdw_queue; | 821 | uint32_t cfg_hdw_queue; |
822 | uint32_t cfg_irq_chann; | ||
824 | uint32_t cfg_suppress_rsp; | 823 | uint32_t cfg_suppress_rsp; |
825 | uint32_t cfg_nvme_oas; | 824 | uint32_t cfg_nvme_oas; |
826 | uint32_t cfg_nvme_embed_cmd; | 825 | uint32_t cfg_nvme_embed_cmd; |
@@ -1042,6 +1041,9 @@ struct lpfc_hba { | |||
1042 | struct dentry *debug_nvmeio_trc; | 1041 | struct dentry *debug_nvmeio_trc; |
1043 | struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; | 1042 | struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; |
1044 | struct dentry *debug_hdwqinfo; | 1043 | struct dentry *debug_hdwqinfo; |
1044 | #ifdef LPFC_HDWQ_LOCK_STAT | ||
1045 | struct dentry *debug_lockstat; | ||
1046 | #endif | ||
1045 | atomic_t nvmeio_trc_cnt; | 1047 | atomic_t nvmeio_trc_cnt; |
1046 | uint32_t nvmeio_trc_size; | 1048 | uint32_t nvmeio_trc_size; |
1047 | uint32_t nvmeio_trc_output_idx; | 1049 | uint32_t nvmeio_trc_output_idx; |
@@ -1161,6 +1163,7 @@ struct lpfc_hba { | |||
1161 | #define LPFC_CHECK_NVME_IO 1 | 1163 | #define LPFC_CHECK_NVME_IO 1 |
1162 | #define LPFC_CHECK_NVMET_RCV 2 | 1164 | #define LPFC_CHECK_NVMET_RCV 2 |
1163 | #define LPFC_CHECK_NVMET_IO 4 | 1165 | #define LPFC_CHECK_NVMET_IO 4 |
1166 | #define LPFC_CHECK_SCSI_IO 8 | ||
1164 | uint16_t ktime_on; | 1167 | uint16_t ktime_on; |
1165 | uint64_t ktime_data_samples; | 1168 | uint64_t ktime_data_samples; |
1166 | uint64_t ktime_status_samples; | 1169 | uint64_t ktime_status_samples; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 787812dd57a9..fc7f80d68638 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -4958,7 +4958,7 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, | |||
4958 | phba->cfg_fcp_imax = (uint32_t)val; | 4958 | phba->cfg_fcp_imax = (uint32_t)val; |
4959 | phba->initial_imax = phba->cfg_fcp_imax; | 4959 | phba->initial_imax = phba->cfg_fcp_imax; |
4960 | 4960 | ||
4961 | for (i = 0; i < phba->cfg_hdw_queue; i += LPFC_MAX_EQ_DELAY_EQID_CNT) | 4961 | for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT) |
4962 | lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, | 4962 | lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, |
4963 | val); | 4963 | val); |
4964 | 4964 | ||
@@ -5059,13 +5059,6 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, | |||
5059 | phba->cfg_fcp_cpu_map, | 5059 | phba->cfg_fcp_cpu_map, |
5060 | phba->sli4_hba.num_online_cpu); | 5060 | phba->sli4_hba.num_online_cpu); |
5061 | break; | 5061 | break; |
5062 | case 2: | ||
5063 | len += snprintf(buf + len, PAGE_SIZE-len, | ||
5064 | "fcp_cpu_map: Driver centric mapping (%d): " | ||
5065 | "%d online CPUs\n", | ||
5066 | phba->cfg_fcp_cpu_map, | ||
5067 | phba->sli4_hba.num_online_cpu); | ||
5068 | break; | ||
5069 | } | 5062 | } |
5070 | 5063 | ||
5071 | while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { | 5064 | while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { |
@@ -5076,35 +5069,35 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, | |||
5076 | len += snprintf( | 5069 | len += snprintf( |
5077 | buf + len, PAGE_SIZE - len, | 5070 | buf + len, PAGE_SIZE - len, |
5078 | "CPU %02d hdwq None " | 5071 | "CPU %02d hdwq None " |
5079 | "physid %d coreid %d\n", | 5072 | "physid %d coreid %d ht %d\n", |
5080 | phba->sli4_hba.curr_disp_cpu, | 5073 | phba->sli4_hba.curr_disp_cpu, |
5081 | cpup->phys_id, | 5074 | cpup->phys_id, |
5082 | cpup->core_id); | 5075 | cpup->core_id, cpup->hyper); |
5083 | else | 5076 | else |
5084 | len += snprintf( | 5077 | len += snprintf( |
5085 | buf + len, PAGE_SIZE - len, | 5078 | buf + len, PAGE_SIZE - len, |
5086 | "CPU %02d hdwq %04d " | 5079 | "CPU %02d EQ %04d hdwq %04d " |
5087 | "physid %d coreid %d\n", | 5080 | "physid %d coreid %d ht %d\n", |
5088 | phba->sli4_hba.curr_disp_cpu, | 5081 | phba->sli4_hba.curr_disp_cpu, |
5089 | cpup->hdwq, cpup->phys_id, | 5082 | cpup->eq, cpup->hdwq, cpup->phys_id, |
5090 | cpup->core_id); | 5083 | cpup->core_id, cpup->hyper); |
5091 | } else { | 5084 | } else { |
5092 | if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) | 5085 | if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) |
5093 | len += snprintf( | 5086 | len += snprintf( |
5094 | buf + len, PAGE_SIZE - len, | 5087 | buf + len, PAGE_SIZE - len, |
5095 | "CPU %02d hdwq None " | 5088 | "CPU %02d hdwq None " |
5096 | "physid %d coreid %d IRQ %d\n", | 5089 | "physid %d coreid %d ht %d IRQ %d\n", |
5097 | phba->sli4_hba.curr_disp_cpu, | 5090 | phba->sli4_hba.curr_disp_cpu, |
5098 | cpup->phys_id, | 5091 | cpup->phys_id, |
5099 | cpup->core_id, cpup->irq); | 5092 | cpup->core_id, cpup->hyper, cpup->irq); |
5100 | else | 5093 | else |
5101 | len += snprintf( | 5094 | len += snprintf( |
5102 | buf + len, PAGE_SIZE - len, | 5095 | buf + len, PAGE_SIZE - len, |
5103 | "CPU %02d hdwq %04d " | 5096 | "CPU %02d EQ %04d hdwq %04d " |
5104 | "physid %d coreid %d IRQ %d\n", | 5097 | "physid %d coreid %d ht %d IRQ %d\n", |
5105 | phba->sli4_hba.curr_disp_cpu, | 5098 | phba->sli4_hba.curr_disp_cpu, |
5106 | cpup->hdwq, cpup->phys_id, | 5099 | cpup->eq, cpup->hdwq, cpup->phys_id, |
5107 | cpup->core_id, cpup->irq); | 5100 | cpup->core_id, cpup->hyper, cpup->irq); |
5108 | } | 5101 | } |
5109 | 5102 | ||
5110 | phba->sli4_hba.curr_disp_cpu++; | 5103 | phba->sli4_hba.curr_disp_cpu++; |
@@ -5146,14 +5139,13 @@ lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, | |||
5146 | # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors | 5139 | # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors |
5147 | # for the HBA. | 5140 | # for the HBA. |
5148 | # | 5141 | # |
5149 | # Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2). | 5142 | # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1). |
5150 | # 0 - Do not affinitze IRQ vectors | 5143 | # 0 - Do not affinitze IRQ vectors |
5151 | # 1 - Affintize HBA vectors with respect to each HBA | 5144 | # 1 - Affintize HBA vectors with respect to each HBA |
5152 | # (start with CPU0 for each HBA) | 5145 | # (start with CPU0 for each HBA) |
5153 | # 2 - Affintize HBA vectors with respect to the entire driver | 5146 | # This also defines how Hardware Queues are mapped to specific CPUs. |
5154 | # (round robin thru all CPUs across all HBAs) | ||
5155 | */ | 5147 | */ |
5156 | static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; | 5148 | static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP; |
5157 | module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); | 5149 | module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); |
5158 | MODULE_PARM_DESC(lpfc_fcp_cpu_map, | 5150 | MODULE_PARM_DESC(lpfc_fcp_cpu_map, |
5159 | "Defines how to map CPUs to IRQ vectors per HBA"); | 5151 | "Defines how to map CPUs to IRQ vectors per HBA"); |
@@ -5187,7 +5179,7 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) | |||
5187 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5179 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5188 | "3326 lpfc_fcp_cpu_map: %d out of range, using " | 5180 | "3326 lpfc_fcp_cpu_map: %d out of range, using " |
5189 | "default\n", val); | 5181 | "default\n", val); |
5190 | phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; | 5182 | phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP; |
5191 | 5183 | ||
5192 | return 0; | 5184 | return 0; |
5193 | } | 5185 | } |
@@ -5308,7 +5300,7 @@ LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing"); | |||
5308 | * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os | 5300 | * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os |
5309 | * through WQs will be used. | 5301 | * through WQs will be used. |
5310 | */ | 5302 | */ |
5311 | LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_HDWQ, | 5303 | LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU, |
5312 | LPFC_FCP_SCHED_BY_HDWQ, | 5304 | LPFC_FCP_SCHED_BY_HDWQ, |
5313 | LPFC_FCP_SCHED_BY_CPU, | 5305 | LPFC_FCP_SCHED_BY_CPU, |
5314 | "Determine scheduling algorithm for " | 5306 | "Determine scheduling algorithm for " |
@@ -5474,18 +5466,18 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, | |||
5474 | "Embed NVME Command in WQE"); | 5466 | "Embed NVME Command in WQE"); |
5475 | 5467 | ||
5476 | /* | 5468 | /* |
5477 | * lpfc_hdw_queue: Set the number of IO channels the driver | 5469 | * lpfc_hdw_queue: Set the number of Hardware Queues the driver |
5478 | * will advertise it supports to the NVME and SCSI layers. This also | 5470 | * will advertise it supports to the NVME and SCSI layers. This also |
5479 | * will map to the number of EQ/CQ/WQs the driver will create. | 5471 | * will map to the number of CQ/WQ pairs the driver will create. |
5480 | * | 5472 | * |
5481 | * The NVME Layer will try to create this many, plus 1 administrative | 5473 | * The NVME Layer will try to create this many, plus 1 administrative |
5482 | * hardware queue. The administrative queue will always map to WQ 0 | 5474 | * hardware queue. The administrative queue will always map to WQ 0 |
5483 | * A hardware IO queue maps (qidx) to a specific driver WQ. | 5475 | * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. |
5484 | * | 5476 | * |
5485 | * 0 = Configure the number of hdw queues to the number of active CPUs. | 5477 | * 0 = Configure the number of hdw queues to the number of active CPUs. |
5486 | * 1,64 = Manually specify how many hdw queues to use. | 5478 | * 1,128 = Manually specify how many hdw queues to use. |
5487 | * | 5479 | * |
5488 | * Value range is [0,64]. Default value is 0. | 5480 | * Value range is [0,128]. Default value is 0. |
5489 | */ | 5481 | */ |
5490 | LPFC_ATTR_R(hdw_queue, | 5482 | LPFC_ATTR_R(hdw_queue, |
5491 | LPFC_HBA_HDWQ_DEF, | 5483 | LPFC_HBA_HDWQ_DEF, |
@@ -5493,6 +5485,22 @@ LPFC_ATTR_R(hdw_queue, | |||
5493 | "Set the number of I/O Hardware Queues"); | 5485 | "Set the number of I/O Hardware Queues"); |
5494 | 5486 | ||
5495 | /* | 5487 | /* |
5488 | * lpfc_irq_chann: Set the number of IRQ vectors that are available | ||
5489 | * for Hardware Queues to utilize. This also will map to the number | ||
5490 | * of EQ / MSI-X vectors the driver will create. This should never be | ||
5491 | * more than the number of Hardware Queues | ||
5492 | * | ||
5493 | * 0 = Configure number of IRQ Channels to the number of active CPUs. | ||
5494 | * 1,128 = Manually specify how many IRQ Channels to use. | ||
5495 | * | ||
5496 | * Value range is [0,128]. Default value is 0. | ||
5497 | */ | ||
5498 | LPFC_ATTR_R(irq_chann, | ||
5499 | LPFC_HBA_HDWQ_DEF, | ||
5500 | LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, | ||
5501 | "Set the number of I/O IRQ Channels"); | ||
5502 | |||
5503 | /* | ||
5496 | # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. | 5504 | # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. |
5497 | # 0 = HBA resets disabled | 5505 | # 0 = HBA resets disabled |
5498 | # 1 = HBA resets enabled (default) | 5506 | # 1 = HBA resets enabled (default) |
@@ -5533,16 +5541,6 @@ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); | |||
5533 | LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); | 5541 | LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); |
5534 | 5542 | ||
5535 | /* | 5543 | /* |
5536 | # lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine | ||
5537 | # 0 = disabled (default) | ||
5538 | # 1 = enabled | ||
5539 | # Value range is [0,1]. Default value is 0. | ||
5540 | # | ||
5541 | # This feature in under investigation and may be supported in the future. | ||
5542 | */ | ||
5543 | unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; | ||
5544 | |||
5545 | /* | ||
5546 | # lpfc_prot_mask: i | 5544 | # lpfc_prot_mask: i |
5547 | # - Bit mask of host protection capabilities used to register with the | 5545 | # - Bit mask of host protection capabilities used to register with the |
5548 | # SCSI mid-layer | 5546 | # SCSI mid-layer |
@@ -5788,6 +5786,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
5788 | &dev_attr_lpfc_fcp_imax, | 5786 | &dev_attr_lpfc_fcp_imax, |
5789 | &dev_attr_lpfc_fcp_cpu_map, | 5787 | &dev_attr_lpfc_fcp_cpu_map, |
5790 | &dev_attr_lpfc_hdw_queue, | 5788 | &dev_attr_lpfc_hdw_queue, |
5789 | &dev_attr_lpfc_irq_chann, | ||
5791 | &dev_attr_lpfc_suppress_rsp, | 5790 | &dev_attr_lpfc_suppress_rsp, |
5792 | &dev_attr_lpfc_nvmet_mrq, | 5791 | &dev_attr_lpfc_nvmet_mrq, |
5793 | &dev_attr_lpfc_nvmet_mrq_post, | 5792 | &dev_attr_lpfc_nvmet_mrq_post, |
@@ -6867,6 +6866,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
6867 | lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); | 6866 | lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); |
6868 | lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); | 6867 | lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); |
6869 | lpfc_hdw_queue_init(phba, lpfc_hdw_queue); | 6868 | lpfc_hdw_queue_init(phba, lpfc_hdw_queue); |
6869 | lpfc_irq_chann_init(phba, lpfc_irq_chann); | ||
6870 | lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); | 6870 | lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); |
6871 | lpfc_enable_dpp_init(phba, lpfc_enable_dpp); | 6871 | lpfc_enable_dpp_init(phba, lpfc_enable_dpp); |
6872 | 6872 | ||
@@ -6891,6 +6891,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
6891 | /* A value of 0 means use the number of CPUs found in the system */ | 6891 | /* A value of 0 means use the number of CPUs found in the system */ |
6892 | if (phba->cfg_hdw_queue == 0) | 6892 | if (phba->cfg_hdw_queue == 0) |
6893 | phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; | 6893 | phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; |
6894 | if (phba->cfg_irq_chann == 0) | ||
6895 | phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; | ||
6896 | if (phba->cfg_irq_chann > phba->cfg_hdw_queue) | ||
6897 | phba->cfg_irq_chann = phba->cfg_hdw_queue; | ||
6894 | 6898 | ||
6895 | phba->cfg_soft_wwnn = 0L; | 6899 | phba->cfg_soft_wwnn = 0L; |
6896 | phba->cfg_soft_wwpn = 0L; | 6900 | phba->cfg_soft_wwpn = 0L; |
@@ -6933,6 +6937,10 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) | |||
6933 | { | 6937 | { |
6934 | if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) | 6938 | if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) |
6935 | phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; | 6939 | phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; |
6940 | if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) | ||
6941 | phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; | ||
6942 | if (phba->cfg_irq_chann > phba->cfg_hdw_queue) | ||
6943 | phba->cfg_irq_chann = phba->cfg_hdw_queue; | ||
6936 | 6944 | ||
6937 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && | 6945 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && |
6938 | phba->nvmet_support) { | 6946 | phba->nvmet_support) { |
@@ -6953,11 +6961,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) | |||
6953 | } | 6961 | } |
6954 | 6962 | ||
6955 | if (!phba->cfg_nvmet_mrq) | 6963 | if (!phba->cfg_nvmet_mrq) |
6956 | phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; | 6964 | phba->cfg_nvmet_mrq = phba->cfg_irq_chann; |
6957 | 6965 | ||
6958 | /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ | 6966 | /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ |
6959 | if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) { | 6967 | if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) { |
6960 | phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; | 6968 | phba->cfg_nvmet_mrq = phba->cfg_irq_chann; |
6961 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, | 6969 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, |
6962 | "6018 Adjust lpfc_nvmet_mrq to %d\n", | 6970 | "6018 Adjust lpfc_nvmet_mrq to %d\n", |
6963 | phba->cfg_nvmet_mrq); | 6971 | phba->cfg_nvmet_mrq); |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 726cd6a7c452..982401c31c12 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -440,7 +440,6 @@ extern spinlock_t _dump_buf_lock; | |||
440 | extern int _dump_buf_done; | 440 | extern int _dump_buf_done; |
441 | extern spinlock_t pgcnt_lock; | 441 | extern spinlock_t pgcnt_lock; |
442 | extern unsigned int pgcnt; | 442 | extern unsigned int pgcnt; |
443 | extern unsigned int lpfc_fcp_look_ahead; | ||
444 | 443 | ||
445 | /* Interface exported by fabric iocb scheduler */ | 444 | /* Interface exported by fabric iocb scheduler */ |
446 | void lpfc_fabric_abort_nport(struct lpfc_nodelist *); | 445 | void lpfc_fabric_abort_nport(struct lpfc_nodelist *); |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 2a2c46766eb6..72076b2cd4ff 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -378,6 +378,67 @@ skipit: | |||
378 | return len; | 378 | return len; |
379 | } | 379 | } |
380 | 380 | ||
381 | static int lpfc_debugfs_last_xripool; | ||
382 | |||
383 | /** | ||
384 | * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer | ||
385 | * @phba: The HBA to gather host buffer info from. | ||
386 | * @buf: The buffer to dump log into. | ||
387 | * @size: The maximum amount of data to process. | ||
388 | * | ||
389 | * Description: | ||
390 | * This routine dumps the Hardware Queue info from the @phba to @buf up to | ||
391 | * @size number of bytes. A header that describes the current hdwq state will be | ||
392 | * dumped to @buf first and then info on each hdwq entry will be dumped to @buf | ||
393 | * until @size bytes have been dumped or all the hdwq info has been dumped. | ||
394 | * | ||
395 | * Notes: | ||
396 | * This routine will rotate through each configured Hardware Queue each | ||
397 | * time called. | ||
398 | * | ||
399 | * Return Value: | ||
400 | * This routine returns the amount of bytes that were dumped into @buf and will | ||
401 | * not exceed @size. | ||
402 | **/ | ||
403 | static int | ||
404 | lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size) | ||
405 | { | ||
406 | struct lpfc_sli4_hdw_queue *qp; | ||
407 | int len = 0; | ||
408 | int i, out; | ||
409 | unsigned long iflag; | ||
410 | |||
411 | for (i = 0; i < phba->cfg_hdw_queue; i++) { | ||
412 | if (len > (LPFC_DUMP_MULTIXRIPOOL_SIZE - 80)) | ||
413 | break; | ||
414 | qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool]; | ||
415 | |||
416 | len += snprintf(buf + len, size - len, "HdwQ %d Info ", i); | ||
417 | spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); | ||
418 | spin_lock(&qp->abts_nvme_buf_list_lock); | ||
419 | spin_lock(&qp->io_buf_list_get_lock); | ||
420 | spin_lock(&qp->io_buf_list_put_lock); | ||
421 | out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + | ||
422 | qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs); | ||
423 | len += snprintf(buf + len, size - len, | ||
424 | "tot:%d get:%d put:%d mt:%d " | ||
425 | "ABTS scsi:%d nvme:%d Out:%d\n", | ||
426 | qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs, | ||
427 | qp->empty_io_bufs, qp->abts_scsi_io_bufs, | ||
428 | qp->abts_nvme_io_bufs, out); | ||
429 | spin_unlock(&qp->io_buf_list_put_lock); | ||
430 | spin_unlock(&qp->io_buf_list_get_lock); | ||
431 | spin_unlock(&qp->abts_nvme_buf_list_lock); | ||
432 | spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); | ||
433 | |||
434 | lpfc_debugfs_last_xripool++; | ||
435 | if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue) | ||
436 | lpfc_debugfs_last_xripool = 0; | ||
437 | } | ||
438 | |||
439 | return len; | ||
440 | } | ||
441 | |||
381 | /** | 442 | /** |
382 | * lpfc_debugfs_multixripools_data - Display multi-XRI pools information | 443 | * lpfc_debugfs_multixripools_data - Display multi-XRI pools information |
383 | * @phba: The HBA to gather host buffer info from. | 444 | * @phba: The HBA to gather host buffer info from. |
@@ -405,6 +466,17 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size) | |||
405 | u32 txcmplq_cnt; | 466 | u32 txcmplq_cnt; |
406 | char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0}; | 467 | char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0}; |
407 | 468 | ||
469 | if (phba->sli_rev != LPFC_SLI_REV4) | ||
470 | return 0; | ||
471 | |||
472 | if (!phba->sli4_hba.hdwq) | ||
473 | return 0; | ||
474 | |||
475 | if (!phba->cfg_xri_rebalancing) { | ||
476 | i = lpfc_debugfs_commonxripools_data(phba, buf, size); | ||
477 | return i; | ||
478 | } | ||
479 | |||
408 | /* | 480 | /* |
409 | * Pbl: Current number of free XRIs in public pool | 481 | * Pbl: Current number of free XRIs in public pool |
410 | * Pvt: Current number of free XRIs in private pool | 482 | * Pvt: Current number of free XRIs in private pool |
@@ -498,10 +570,12 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size) | |||
498 | return strnlen(buf, size); | 570 | return strnlen(buf, size); |
499 | } | 571 | } |
500 | 572 | ||
501 | static int lpfc_debugfs_last_hdwq; | 573 | |
574 | #ifdef LPFC_HDWQ_LOCK_STAT | ||
575 | static int lpfc_debugfs_last_lock; | ||
502 | 576 | ||
503 | /** | 577 | /** |
504 | * lpfc_debugfs_hdwqinfo_data - Dump Hardware Queue info to a buffer | 578 | * lpfc_debugfs_lockstat_data - Dump Hardware Queue info to a buffer |
505 | * @phba: The HBA to gather host buffer info from. | 579 | * @phba: The HBA to gather host buffer info from. |
506 | * @buf: The buffer to dump log into. | 580 | * @buf: The buffer to dump log into. |
507 | * @size: The maximum amount of data to process. | 581 | * @size: The maximum amount of data to process. |
@@ -521,12 +595,11 @@ static int lpfc_debugfs_last_hdwq; | |||
521 | * not exceed @size. | 595 | * not exceed @size. |
522 | **/ | 596 | **/ |
523 | static int | 597 | static int |
524 | lpfc_debugfs_hdwqinfo_data(struct lpfc_hba *phba, char *buf, int size) | 598 | lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size) |
525 | { | 599 | { |
526 | struct lpfc_sli4_hdw_queue *qp; | 600 | struct lpfc_sli4_hdw_queue *qp; |
527 | int len = 0; | 601 | int len = 0; |
528 | int i, out; | 602 | int i; |
529 | unsigned long iflag; | ||
530 | 603 | ||
531 | if (phba->sli_rev != LPFC_SLI_REV4) | 604 | if (phba->sli_rev != LPFC_SLI_REV4) |
532 | return 0; | 605 | return 0; |
@@ -535,35 +608,40 @@ lpfc_debugfs_hdwqinfo_data(struct lpfc_hba *phba, char *buf, int size) | |||
535 | return 0; | 608 | return 0; |
536 | 609 | ||
537 | for (i = 0; i < phba->cfg_hdw_queue; i++) { | 610 | for (i = 0; i < phba->cfg_hdw_queue; i++) { |
538 | if (len > (LPFC_HDWQINFO_SIZE - 80)) | 611 | if (len > (LPFC_HDWQINFO_SIZE - 100)) |
539 | break; | 612 | break; |
540 | qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_hdwq]; | 613 | qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock]; |
541 | 614 | ||
542 | len += snprintf(buf + len, size - len, "HdwQ %d Info ", i); | 615 | len += snprintf(buf + len, size - len, "HdwQ %03d Lock ", i); |
543 | spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); | 616 | if (phba->cfg_xri_rebalancing) { |
544 | spin_lock(&qp->abts_nvme_buf_list_lock); | 617 | len += snprintf(buf + len, size - len, |
545 | spin_lock(&qp->io_buf_list_get_lock); | 618 | "get_pvt:%d mv_pvt:%d " |
546 | spin_lock(&qp->io_buf_list_put_lock); | 619 | "mv2pub:%d mv2pvt:%d " |
547 | out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + | 620 | "put_pvt:%d put_pub:%d wq:%d\n", |
548 | qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs); | 621 | qp->lock_conflict.alloc_pvt_pool, |
549 | len += snprintf(buf + len, size - len, | 622 | qp->lock_conflict.mv_from_pvt_pool, |
550 | "tot:%d get:%d put:%d mt:%d " | 623 | qp->lock_conflict.mv_to_pub_pool, |
551 | "ABTS scsi:%d nvme:%d Out:%d\n", | 624 | qp->lock_conflict.mv_to_pvt_pool, |
552 | qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs, | 625 | qp->lock_conflict.free_pvt_pool, |
553 | qp->empty_io_bufs, qp->abts_scsi_io_bufs, | 626 | qp->lock_conflict.free_pub_pool, |
554 | qp->abts_nvme_io_bufs, out); | 627 | qp->lock_conflict.wq_access); |
555 | spin_unlock(&qp->io_buf_list_put_lock); | 628 | } else { |
556 | spin_unlock(&qp->io_buf_list_get_lock); | 629 | len += snprintf(buf + len, size - len, |
557 | spin_unlock(&qp->abts_nvme_buf_list_lock); | 630 | "get:%d put:%d free:%d wq:%d\n", |
558 | spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); | 631 | qp->lock_conflict.alloc_xri_get, |
632 | qp->lock_conflict.alloc_xri_put, | ||
633 | qp->lock_conflict.free_xri, | ||
634 | qp->lock_conflict.wq_access); | ||
635 | } | ||
559 | 636 | ||
560 | lpfc_debugfs_last_hdwq++; | 637 | lpfc_debugfs_last_lock++; |
561 | if (lpfc_debugfs_last_hdwq >= phba->cfg_hdw_queue) | 638 | if (lpfc_debugfs_last_lock >= phba->cfg_hdw_queue) |
562 | lpfc_debugfs_last_hdwq = 0; | 639 | lpfc_debugfs_last_lock = 0; |
563 | } | 640 | } |
564 | 641 | ||
565 | return len; | 642 | return len; |
566 | } | 643 | } |
644 | #endif | ||
567 | 645 | ||
568 | static int lpfc_debugfs_last_hba_slim_off; | 646 | static int lpfc_debugfs_last_hba_slim_off; |
569 | 647 | ||
@@ -964,7 +1042,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) | |||
964 | struct lpfc_nvme_lport *lport; | 1042 | struct lpfc_nvme_lport *lport; |
965 | uint64_t data1, data2, data3; | 1043 | uint64_t data1, data2, data3; |
966 | uint64_t tot, totin, totout; | 1044 | uint64_t tot, totin, totout; |
967 | int cnt, i, maxch; | 1045 | int cnt, i; |
968 | int len = 0; | 1046 | int len = 0; |
969 | 1047 | ||
970 | if (phba->nvmet_support) { | 1048 | if (phba->nvmet_support) { |
@@ -1106,10 +1184,6 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) | |||
1106 | atomic_read(&lport->fc4NvmeLsRequests), | 1184 | atomic_read(&lport->fc4NvmeLsRequests), |
1107 | atomic_read(&lport->fc4NvmeLsCmpls)); | 1185 | atomic_read(&lport->fc4NvmeLsCmpls)); |
1108 | 1186 | ||
1109 | if (phba->cfg_hdw_queue < LPFC_HBA_HDWQ_MAX) | ||
1110 | maxch = phba->cfg_hdw_queue; | ||
1111 | else | ||
1112 | maxch = LPFC_HBA_HDWQ_MAX; | ||
1113 | totin = 0; | 1187 | totin = 0; |
1114 | totout = 0; | 1188 | totout = 0; |
1115 | for (i = 0; i < phba->cfg_hdw_queue; i++) { | 1189 | for (i = 0; i < phba->cfg_hdw_queue; i++) { |
@@ -1547,7 +1621,7 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) | |||
1547 | { | 1621 | { |
1548 | struct lpfc_hba *phba = vport->phba; | 1622 | struct lpfc_hba *phba = vport->phba; |
1549 | struct lpfc_sli4_hdw_queue *qp; | 1623 | struct lpfc_sli4_hdw_queue *qp; |
1550 | int i, j; | 1624 | int i, j, max_cnt; |
1551 | int len = 0; | 1625 | int len = 0; |
1552 | uint32_t tot_xmt; | 1626 | uint32_t tot_xmt; |
1553 | uint32_t tot_rcv; | 1627 | uint32_t tot_rcv; |
@@ -1565,6 +1639,7 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) | |||
1565 | } else { | 1639 | } else { |
1566 | len += snprintf(buf + len, PAGE_SIZE - len, "\n"); | 1640 | len += snprintf(buf + len, PAGE_SIZE - len, "\n"); |
1567 | } | 1641 | } |
1642 | max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ; | ||
1568 | 1643 | ||
1569 | for (i = 0; i < phba->cfg_hdw_queue; i++) { | 1644 | for (i = 0; i < phba->cfg_hdw_queue; i++) { |
1570 | qp = &phba->sli4_hba.hdwq[i]; | 1645 | qp = &phba->sli4_hba.hdwq[i]; |
@@ -1606,6 +1681,11 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) | |||
1606 | } | 1681 | } |
1607 | len += snprintf(buf + len, PAGE_SIZE - len, | 1682 | len += snprintf(buf + len, PAGE_SIZE - len, |
1608 | "Total: %x\n", tot_xmt); | 1683 | "Total: %x\n", tot_xmt); |
1684 | if (len >= max_cnt) { | ||
1685 | len += snprintf(buf + len, PAGE_SIZE - len, | ||
1686 | "Truncated ...\n"); | ||
1687 | return len; | ||
1688 | } | ||
1609 | } | 1689 | } |
1610 | return len; | 1690 | return len; |
1611 | } | 1691 | } |
@@ -1904,11 +1984,8 @@ lpfc_debugfs_multixripools_open(struct inode *inode, struct file *file) | |||
1904 | goto out; | 1984 | goto out; |
1905 | } | 1985 | } |
1906 | 1986 | ||
1907 | if (phba->cfg_xri_rebalancing) | 1987 | debug->len = lpfc_debugfs_multixripools_data( |
1908 | debug->len = lpfc_debugfs_multixripools_data( | 1988 | phba, debug->buffer, LPFC_DUMP_MULTIXRIPOOL_SIZE); |
1909 | phba, debug->buffer, LPFC_DUMP_MULTIXRIPOOL_SIZE); | ||
1910 | else | ||
1911 | debug->len = 0; | ||
1912 | 1989 | ||
1913 | debug->i_private = inode->i_private; | 1990 | debug->i_private = inode->i_private; |
1914 | file->private_data = debug; | 1991 | file->private_data = debug; |
@@ -1918,8 +1995,9 @@ out: | |||
1918 | return rc; | 1995 | return rc; |
1919 | } | 1996 | } |
1920 | 1997 | ||
1998 | #ifdef LPFC_HDWQ_LOCK_STAT | ||
1921 | /** | 1999 | /** |
1922 | * lpfc_debugfs_hdwqinfo_open - Open the hdwqinfo debugfs buffer | 2000 | * lpfc_debugfs_lockstat_open - Open the lockstat debugfs buffer |
1923 | * @inode: The inode pointer that contains a vport pointer. | 2001 | * @inode: The inode pointer that contains a vport pointer. |
1924 | * @file: The file pointer to attach the log output. | 2002 | * @file: The file pointer to attach the log output. |
1925 | * | 2003 | * |
@@ -1934,7 +2012,7 @@ out: | |||
1934 | * error value. | 2012 | * error value. |
1935 | **/ | 2013 | **/ |
1936 | static int | 2014 | static int |
1937 | lpfc_debugfs_hdwqinfo_open(struct inode *inode, struct file *file) | 2015 | lpfc_debugfs_lockstat_open(struct inode *inode, struct file *file) |
1938 | { | 2016 | { |
1939 | struct lpfc_hba *phba = inode->i_private; | 2017 | struct lpfc_hba *phba = inode->i_private; |
1940 | struct lpfc_debug *debug; | 2018 | struct lpfc_debug *debug; |
@@ -1951,7 +2029,7 @@ lpfc_debugfs_hdwqinfo_open(struct inode *inode, struct file *file) | |||
1951 | goto out; | 2029 | goto out; |
1952 | } | 2030 | } |
1953 | 2031 | ||
1954 | debug->len = lpfc_debugfs_hdwqinfo_data(phba, debug->buffer, | 2032 | debug->len = lpfc_debugfs_lockstat_data(phba, debug->buffer, |
1955 | LPFC_HBQINFO_SIZE); | 2033 | LPFC_HBQINFO_SIZE); |
1956 | file->private_data = debug; | 2034 | file->private_data = debug; |
1957 | 2035 | ||
@@ -1960,6 +2038,48 @@ out: | |||
1960 | return rc; | 2038 | return rc; |
1961 | } | 2039 | } |
1962 | 2040 | ||
2041 | static ssize_t | ||
2042 | lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, | ||
2043 | size_t nbytes, loff_t *ppos) | ||
2044 | { | ||
2045 | struct lpfc_debug *debug = file->private_data; | ||
2046 | struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; | ||
2047 | struct lpfc_sli4_hdw_queue *qp; | ||
2048 | char mybuf[64]; | ||
2049 | char *pbuf; | ||
2050 | int i; | ||
2051 | |||
2052 | /* Protect copy from user */ | ||
2053 | if (!access_ok(buf, nbytes)) | ||
2054 | return -EFAULT; | ||
2055 | |||
2056 | memset(mybuf, 0, sizeof(mybuf)); | ||
2057 | |||
2058 | if (copy_from_user(mybuf, buf, nbytes)) | ||
2059 | return -EFAULT; | ||
2060 | pbuf = &mybuf[0]; | ||
2061 | |||
2062 | if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || | ||
2063 | (strncmp(pbuf, "zero", strlen("zero")) == 0)) { | ||
2064 | for (i = 0; i < phba->cfg_hdw_queue; i++) { | ||
2065 | qp = &phba->sli4_hba.hdwq[i]; | ||
2066 | qp->lock_conflict.alloc_xri_get = 0; | ||
2067 | qp->lock_conflict.alloc_xri_put = 0; | ||
2068 | qp->lock_conflict.free_xri = 0; | ||
2069 | qp->lock_conflict.wq_access = 0; | ||
2070 | qp->lock_conflict.alloc_pvt_pool = 0; | ||
2071 | qp->lock_conflict.mv_from_pvt_pool = 0; | ||
2072 | qp->lock_conflict.mv_to_pub_pool = 0; | ||
2073 | qp->lock_conflict.mv_to_pvt_pool = 0; | ||
2074 | qp->lock_conflict.free_pvt_pool = 0; | ||
2075 | qp->lock_conflict.free_pub_pool = 0; | ||
2076 | qp->lock_conflict.wq_access = 0; | ||
2077 | } | ||
2078 | } | ||
2079 | return nbytes; | ||
2080 | } | ||
2081 | #endif | ||
2082 | |||
1963 | /** | 2083 | /** |
1964 | * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer | 2084 | * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer |
1965 | * @inode: The inode pointer that contains a vport pointer. | 2085 | * @inode: The inode pointer that contains a vport pointer. |
@@ -2816,7 +2936,7 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file) | |||
2816 | } | 2936 | } |
2817 | 2937 | ||
2818 | debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer, | 2938 | debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer, |
2819 | LPFC_NVMEKTIME_SIZE); | 2939 | LPFC_CPUCHECK_SIZE); |
2820 | 2940 | ||
2821 | debug->i_private = inode->i_private; | 2941 | debug->i_private = inode->i_private; |
2822 | file->private_data = debug; | 2942 | file->private_data = debug; |
@@ -2851,8 +2971,18 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, | |||
2851 | if (phba->nvmet_support) | 2971 | if (phba->nvmet_support) |
2852 | phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; | 2972 | phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; |
2853 | else | 2973 | else |
2974 | phba->cpucheck_on |= (LPFC_CHECK_NVME_IO | | ||
2975 | LPFC_CHECK_SCSI_IO); | ||
2976 | return strlen(pbuf); | ||
2977 | } else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) { | ||
2978 | if (phba->nvmet_support) | ||
2979 | phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; | ||
2980 | else | ||
2854 | phba->cpucheck_on |= LPFC_CHECK_NVME_IO; | 2981 | phba->cpucheck_on |= LPFC_CHECK_NVME_IO; |
2855 | return strlen(pbuf); | 2982 | return strlen(pbuf); |
2983 | } else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) { | ||
2984 | phba->cpucheck_on |= LPFC_CHECK_SCSI_IO; | ||
2985 | return strlen(pbuf); | ||
2856 | } else if ((strncmp(pbuf, "rcv", | 2986 | } else if ((strncmp(pbuf, "rcv", |
2857 | sizeof("rcv") - 1) == 0)) { | 2987 | sizeof("rcv") - 1) == 0)) { |
2858 | if (phba->nvmet_support) | 2988 | if (phba->nvmet_support) |
@@ -3732,46 +3862,38 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, | |||
3732 | int *len, int max_cnt, int eqidx, int eq_id) | 3862 | int *len, int max_cnt, int eqidx, int eq_id) |
3733 | { | 3863 | { |
3734 | struct lpfc_queue *qp; | 3864 | struct lpfc_queue *qp; |
3735 | int qidx, rc; | 3865 | int rc; |
3736 | 3866 | ||
3737 | for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { | 3867 | qp = phba->sli4_hba.hdwq[eqidx].fcp_cq; |
3738 | qp = phba->sli4_hba.hdwq[qidx].fcp_cq; | ||
3739 | if (qp->assoc_qid != eq_id) | ||
3740 | continue; | ||
3741 | 3868 | ||
3742 | *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); | 3869 | *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); |
3743 | 3870 | ||
3744 | /* Reset max counter */ | 3871 | /* Reset max counter */ |
3745 | qp->CQ_max_cqe = 0; | 3872 | qp->CQ_max_cqe = 0; |
3746 | 3873 | ||
3747 | if (*len >= max_cnt) | 3874 | if (*len >= max_cnt) |
3748 | return 1; | 3875 | return 1; |
3749 | 3876 | ||
3750 | rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, | 3877 | rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, |
3751 | max_cnt, qp->queue_id); | 3878 | max_cnt, qp->queue_id); |
3752 | if (rc) | 3879 | if (rc) |
3753 | return 1; | 3880 | return 1; |
3754 | } | ||
3755 | 3881 | ||
3756 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { | 3882 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
3757 | for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { | 3883 | qp = phba->sli4_hba.hdwq[eqidx].nvme_cq; |
3758 | qp = phba->sli4_hba.hdwq[qidx].nvme_cq; | ||
3759 | if (qp->assoc_qid != eq_id) | ||
3760 | continue; | ||
3761 | 3884 | ||
3762 | *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len); | 3885 | *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len); |
3763 | 3886 | ||
3764 | /* Reset max counter */ | 3887 | /* Reset max counter */ |
3765 | qp->CQ_max_cqe = 0; | 3888 | qp->CQ_max_cqe = 0; |
3766 | 3889 | ||
3767 | if (*len >= max_cnt) | 3890 | if (*len >= max_cnt) |
3768 | return 1; | 3891 | return 1; |
3769 | 3892 | ||
3770 | rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len, | 3893 | rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len, |
3771 | max_cnt, qp->queue_id); | 3894 | max_cnt, qp->queue_id); |
3772 | if (rc) | 3895 | if (rc) |
3773 | return 1; | 3896 | return 1; |
3774 | } | ||
3775 | } | 3897 | } |
3776 | 3898 | ||
3777 | if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) { | 3899 | if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) { |
@@ -3812,9 +3934,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, | |||
3812 | (unsigned long long)qp->q_cnt_4, qp->q_mode); | 3934 | (unsigned long long)qp->q_cnt_4, qp->q_mode); |
3813 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, | 3935 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, |
3814 | "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " | 3936 | "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " |
3815 | "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", | 3937 | "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d] AFFIN[%03d]", |
3816 | qp->queue_id, qp->entry_count, qp->entry_size, | 3938 | qp->queue_id, qp->entry_count, qp->entry_size, |
3817 | qp->host_index, qp->hba_index, qp->entry_repost); | 3939 | qp->host_index, qp->hba_index, qp->entry_repost, |
3940 | qp->chann); | ||
3818 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); | 3941 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); |
3819 | 3942 | ||
3820 | return len; | 3943 | return len; |
@@ -3869,7 +3992,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, | |||
3869 | phba->lpfc_idiag_last_eq = 0; | 3992 | phba->lpfc_idiag_last_eq = 0; |
3870 | 3993 | ||
3871 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, | 3994 | len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, |
3872 | "EQ %d out of %d HBA EQs\n", | 3995 | "HDWQ %d out of %d HBA HDWQs\n", |
3873 | x, phba->cfg_hdw_queue); | 3996 | x, phba->cfg_hdw_queue); |
3874 | 3997 | ||
3875 | /* Fast-path EQ */ | 3998 | /* Fast-path EQ */ |
@@ -5299,14 +5422,17 @@ static const struct file_operations lpfc_debugfs_op_hbqinfo = { | |||
5299 | .release = lpfc_debugfs_release, | 5422 | .release = lpfc_debugfs_release, |
5300 | }; | 5423 | }; |
5301 | 5424 | ||
5302 | #undef lpfc_debugfs_op_hdwqinfo | 5425 | #ifdef LPFC_HDWQ_LOCK_STAT |
5303 | static const struct file_operations lpfc_debugfs_op_hdwqinfo = { | 5426 | #undef lpfc_debugfs_op_lockstat |
5427 | static const struct file_operations lpfc_debugfs_op_lockstat = { | ||
5304 | .owner = THIS_MODULE, | 5428 | .owner = THIS_MODULE, |
5305 | .open = lpfc_debugfs_hdwqinfo_open, | 5429 | .open = lpfc_debugfs_lockstat_open, |
5306 | .llseek = lpfc_debugfs_lseek, | 5430 | .llseek = lpfc_debugfs_lseek, |
5307 | .read = lpfc_debugfs_read, | 5431 | .read = lpfc_debugfs_read, |
5432 | .write = lpfc_debugfs_lockstat_write, | ||
5308 | .release = lpfc_debugfs_release, | 5433 | .release = lpfc_debugfs_release, |
5309 | }; | 5434 | }; |
5435 | #endif | ||
5310 | 5436 | ||
5311 | #undef lpfc_debugfs_op_dumpHBASlim | 5437 | #undef lpfc_debugfs_op_dumpHBASlim |
5312 | static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { | 5438 | static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { |
@@ -5756,17 +5882,19 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
5756 | phba->hba_debugfs_root, | 5882 | phba->hba_debugfs_root, |
5757 | phba, &lpfc_debugfs_op_hbqinfo); | 5883 | phba, &lpfc_debugfs_op_hbqinfo); |
5758 | 5884 | ||
5759 | /* Setup hdwqinfo */ | 5885 | #ifdef LPFC_HDWQ_LOCK_STAT |
5760 | snprintf(name, sizeof(name), "hdwqinfo"); | 5886 | /* Setup lockstat */ |
5761 | phba->debug_hdwqinfo = | 5887 | snprintf(name, sizeof(name), "lockstat"); |
5888 | phba->debug_lockstat = | ||
5762 | debugfs_create_file(name, S_IFREG | 0644, | 5889 | debugfs_create_file(name, S_IFREG | 0644, |
5763 | phba->hba_debugfs_root, | 5890 | phba->hba_debugfs_root, |
5764 | phba, &lpfc_debugfs_op_hdwqinfo); | 5891 | phba, &lpfc_debugfs_op_lockstat); |
5765 | if (!phba->debug_hdwqinfo) { | 5892 | if (!phba->debug_lockstat) { |
5766 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 5893 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
5767 | "0511 Cant create debugfs hdwqinfo\n"); | 5894 | "0913 Cant create debugfs lockstat\n"); |
5768 | goto debug_failed; | 5895 | goto debug_failed; |
5769 | } | 5896 | } |
5897 | #endif | ||
5770 | 5898 | ||
5771 | /* Setup dumpHBASlim */ | 5899 | /* Setup dumpHBASlim */ |
5772 | if (phba->sli_rev < LPFC_SLI_REV4) { | 5900 | if (phba->sli_rev < LPFC_SLI_REV4) { |
@@ -6006,7 +6134,7 @@ nvmeio_off: | |||
6006 | vport, &lpfc_debugfs_op_scsistat); | 6134 | vport, &lpfc_debugfs_op_scsistat); |
6007 | if (!vport->debug_scsistat) { | 6135 | if (!vport->debug_scsistat) { |
6008 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 6136 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
6009 | "0811 Cannot create debugfs scsistat\n"); | 6137 | "0914 Cannot create debugfs scsistat\n"); |
6010 | goto debug_failed; | 6138 | goto debug_failed; |
6011 | } | 6139 | } |
6012 | 6140 | ||
@@ -6171,9 +6299,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) | |||
6171 | debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ | 6299 | debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ |
6172 | phba->debug_hbqinfo = NULL; | 6300 | phba->debug_hbqinfo = NULL; |
6173 | 6301 | ||
6174 | debugfs_remove(phba->debug_hdwqinfo); /* hdwqinfo */ | 6302 | #ifdef LPFC_HDWQ_LOCK_STAT |
6175 | phba->debug_hdwqinfo = NULL; | 6303 | debugfs_remove(phba->debug_lockstat); /* lockstat */ |
6176 | 6304 | phba->debug_lockstat = NULL; | |
6305 | #endif | ||
6177 | debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ | 6306 | debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ |
6178 | phba->debug_dumpHBASlim = NULL; | 6307 | phba->debug_dumpHBASlim = NULL; |
6179 | 6308 | ||
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index cf256a6dca42..1fbee6496f85 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h | |||
@@ -290,9 +290,6 @@ struct lpfc_idiag { | |||
290 | /* multixripool output buffer size */ | 290 | /* multixripool output buffer size */ |
291 | #define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 | 291 | #define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 |
292 | 292 | ||
293 | /* hdwqinfo output buffer size */ | ||
294 | #define LPFC_HDWQINFO_SIZE 8192 | ||
295 | |||
296 | enum { | 293 | enum { |
297 | DUMP_FCP, | 294 | DUMP_FCP, |
298 | DUMP_NVME, | 295 | DUMP_NVME, |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index cd39845c909f..665852291a4f 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -211,9 +211,8 @@ struct lpfc_sli_intf { | |||
211 | #define LPFC_DEF_IMAX 150000 | 211 | #define LPFC_DEF_IMAX 150000 |
212 | 212 | ||
213 | #define LPFC_MIN_CPU_MAP 0 | 213 | #define LPFC_MIN_CPU_MAP 0 |
214 | #define LPFC_MAX_CPU_MAP 2 | 214 | #define LPFC_MAX_CPU_MAP 1 |
215 | #define LPFC_HBA_CPU_MAP 1 | 215 | #define LPFC_HBA_CPU_MAP 1 |
216 | #define LPFC_DRIVER_CPU_MAP 2 /* Default */ | ||
217 | 216 | ||
218 | /* PORT_CAPABILITIES constants. */ | 217 | /* PORT_CAPABILITIES constants. */ |
219 | #define LPFC_MAX_SUPPORTED_PAGES 8 | 218 | #define LPFC_MAX_SUPPORTED_PAGES 8 |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d9db29817f6b..145c08f112a3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/miscdevice.h> | 37 | #include <linux/miscdevice.h> |
38 | #include <linux/percpu.h> | 38 | #include <linux/percpu.h> |
39 | #include <linux/msi.h> | 39 | #include <linux/msi.h> |
40 | #include <linux/irq.h> | ||
40 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
41 | 42 | ||
42 | #include <scsi/scsi.h> | 43 | #include <scsi/scsi.h> |
@@ -92,6 +93,8 @@ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); | |||
92 | static void lpfc_sli4_disable_intr(struct lpfc_hba *); | 93 | static void lpfc_sli4_disable_intr(struct lpfc_hba *); |
93 | static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); | 94 | static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); |
94 | static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); | 95 | static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); |
96 | static uint16_t lpfc_find_eq_handle(struct lpfc_hba *, uint16_t); | ||
97 | static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); | ||
95 | 98 | ||
96 | static struct scsi_transport_template *lpfc_transport_template = NULL; | 99 | static struct scsi_transport_template *lpfc_transport_template = NULL; |
97 | static struct scsi_transport_template *lpfc_vport_transport_template = NULL; | 100 | static struct scsi_transport_template *lpfc_vport_transport_template = NULL; |
@@ -1367,13 +1370,13 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) | |||
1367 | } | 1370 | } |
1368 | 1371 | ||
1369 | /* Interrupts per sec per EQ */ | 1372 | /* Interrupts per sec per EQ */ |
1370 | val = phba->cfg_fcp_imax / phba->cfg_hdw_queue; | 1373 | val = phba->cfg_fcp_imax / phba->cfg_irq_chann; |
1371 | tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */ | 1374 | tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */ |
1372 | 1375 | ||
1373 | /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */ | 1376 | /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */ |
1374 | max_cqe = time_elapsed * tick_cqe; | 1377 | max_cqe = time_elapsed * tick_cqe; |
1375 | 1378 | ||
1376 | for (i = 0; i < phba->cfg_hdw_queue; i++) { | 1379 | for (i = 0; i < phba->cfg_irq_chann; i++) { |
1377 | /* Fast-path EQ */ | 1380 | /* Fast-path EQ */ |
1378 | qp = phba->sli4_hba.hdwq[i].hba_eq; | 1381 | qp = phba->sli4_hba.hdwq[i].hba_eq; |
1379 | if (!qp) | 1382 | if (!qp) |
@@ -1397,7 +1400,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) | |||
1397 | if (val) { | 1400 | if (val) { |
1398 | /* First, interrupts per sec per EQ */ | 1401 | /* First, interrupts per sec per EQ */ |
1399 | val = phba->cfg_fcp_imax / | 1402 | val = phba->cfg_fcp_imax / |
1400 | phba->cfg_hdw_queue; | 1403 | phba->cfg_irq_chann; |
1401 | 1404 | ||
1402 | /* us delay between each interrupt */ | 1405 | /* us delay between each interrupt */ |
1403 | val = LPFC_SEC_TO_USEC / val; | 1406 | val = LPFC_SEC_TO_USEC / val; |
@@ -4335,8 +4338,13 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | |||
4335 | shost->max_lun = vport->cfg_max_luns; | 4338 | shost->max_lun = vport->cfg_max_luns; |
4336 | shost->this_id = -1; | 4339 | shost->this_id = -1; |
4337 | shost->max_cmd_len = 16; | 4340 | shost->max_cmd_len = 16; |
4341 | |||
4338 | if (phba->sli_rev == LPFC_SLI_REV4) { | 4342 | if (phba->sli_rev == LPFC_SLI_REV4) { |
4339 | shost->nr_hw_queues = phba->cfg_hdw_queue; | 4343 | if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) |
4344 | shost->nr_hw_queues = phba->cfg_hdw_queue; | ||
4345 | else | ||
4346 | shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; | ||
4347 | |||
4340 | shost->dma_boundary = | 4348 | shost->dma_boundary = |
4341 | phba->sli4_hba.pc_sli4_params.sge_supp_len-1; | 4349 | phba->sli4_hba.pc_sli4_params.sge_supp_len-1; |
4342 | shost->sg_tablesize = phba->cfg_scsi_seg_cnt; | 4350 | shost->sg_tablesize = phba->cfg_scsi_seg_cnt; |
@@ -6819,7 +6827,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
6819 | goto out_remove_rpi_hdrs; | 6827 | goto out_remove_rpi_hdrs; |
6820 | } | 6828 | } |
6821 | 6829 | ||
6822 | phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_hdw_queue, | 6830 | phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, |
6823 | sizeof(struct lpfc_hba_eq_hdl), | 6831 | sizeof(struct lpfc_hba_eq_hdl), |
6824 | GFP_KERNEL); | 6832 | GFP_KERNEL); |
6825 | if (!phba->sli4_hba.hba_eq_hdl) { | 6833 | if (!phba->sli4_hba.hba_eq_hdl) { |
@@ -8257,7 +8265,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) | |||
8257 | struct lpfc_rsrc_desc_fcfcoe *desc; | 8265 | struct lpfc_rsrc_desc_fcfcoe *desc; |
8258 | char *pdesc_0; | 8266 | char *pdesc_0; |
8259 | uint16_t forced_link_speed; | 8267 | uint16_t forced_link_speed; |
8260 | uint32_t if_type; | 8268 | uint32_t if_type, qmin; |
8261 | int length, i, rc = 0, rc2; | 8269 | int length, i, rc = 0, rc2; |
8262 | 8270 | ||
8263 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 8271 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
@@ -8362,40 +8370,44 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) | |||
8362 | phba->sli4_hba.max_cfg_param.max_rq); | 8370 | phba->sli4_hba.max_cfg_param.max_rq); |
8363 | 8371 | ||
8364 | /* | 8372 | /* |
8365 | * Calculate NVME queue resources based on how | 8373 | * Calculate queue resources based on how |
8366 | * many WQ/CQs are available. | 8374 | * many WQ/CQ/EQs are available. |
8367 | */ | 8375 | */ |
8368 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { | 8376 | qmin = phba->sli4_hba.max_cfg_param.max_wq; |
8369 | length = phba->sli4_hba.max_cfg_param.max_wq; | 8377 | if (phba->sli4_hba.max_cfg_param.max_cq < qmin) |
8370 | if (phba->sli4_hba.max_cfg_param.max_cq < | 8378 | qmin = phba->sli4_hba.max_cfg_param.max_cq; |
8371 | phba->sli4_hba.max_cfg_param.max_wq) | 8379 | if (phba->sli4_hba.max_cfg_param.max_eq < qmin) |
8372 | length = phba->sli4_hba.max_cfg_param.max_cq; | 8380 | qmin = phba->sli4_hba.max_cfg_param.max_eq; |
8381 | /* | ||
8382 | * Whats left after this can go toward NVME / FCP. | ||
8383 | * The minus 4 accounts for ELS, NVME LS, MBOX | ||
8384 | * plus one extra. When configured for | ||
8385 | * NVMET, FCP io channel WQs are not created. | ||
8386 | */ | ||
8387 | qmin -= 4; | ||
8373 | 8388 | ||
8374 | /* | 8389 | /* If NVME is configured, double the number of CQ/WQs needed */ |
8375 | * Whats left after this can go toward NVME. | 8390 | if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && |
8376 | * The minus 6 accounts for ELS, NVME LS, MBOX | 8391 | !phba->nvmet_support) |
8377 | * plus a couple extra. When configured for | 8392 | qmin /= 2; |
8378 | * NVMET, FCP io channel WQs are not created. | 8393 | |
8379 | */ | 8394 | /* Check to see if there is enough for NVME */ |
8380 | length -= 6; | 8395 | if ((phba->cfg_irq_chann > qmin) || |
8381 | 8396 | (phba->cfg_hdw_queue > qmin)) { | |
8382 | /* Take off FCP queues */ | 8397 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
8383 | if (!phba->nvmet_support) | 8398 | "2005 Reducing Queues: " |
8384 | length -= phba->cfg_hdw_queue; | 8399 | "WQ %d CQ %d EQ %d: min %d: " |
8385 | 8400 | "IRQ %d HDWQ %d\n", | |
8386 | /* Check to see if there is enough for NVME */ | ||
8387 | if (phba->cfg_hdw_queue > length) { | ||
8388 | lpfc_printf_log( | ||
8389 | phba, KERN_ERR, LOG_SLI, | ||
8390 | "2005 Reducing NVME IO channel to %d: " | ||
8391 | "WQ %d CQ %d CommonIO %d\n", | ||
8392 | length, | ||
8393 | phba->sli4_hba.max_cfg_param.max_wq, | 8401 | phba->sli4_hba.max_cfg_param.max_wq, |
8394 | phba->sli4_hba.max_cfg_param.max_cq, | 8402 | phba->sli4_hba.max_cfg_param.max_cq, |
8403 | phba->sli4_hba.max_cfg_param.max_eq, | ||
8404 | qmin, phba->cfg_irq_chann, | ||
8395 | phba->cfg_hdw_queue); | 8405 | phba->cfg_hdw_queue); |
8396 | 8406 | ||
8397 | phba->cfg_hdw_queue = length; | 8407 | if (phba->cfg_irq_chann > qmin) |
8398 | } | 8408 | phba->cfg_irq_chann = qmin; |
8409 | if (phba->cfg_hdw_queue > qmin) | ||
8410 | phba->cfg_hdw_queue = qmin; | ||
8399 | } | 8411 | } |
8400 | } | 8412 | } |
8401 | 8413 | ||
@@ -8612,25 +8624,17 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) | |||
8612 | * device parameters | 8624 | * device parameters |
8613 | */ | 8625 | */ |
8614 | 8626 | ||
8615 | if (phba->cfg_hdw_queue > phba->sli4_hba.max_cfg_param.max_eq) { | ||
8616 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
8617 | "2575 Reducing IO channels to match number of " | ||
8618 | "available EQs: from %d to %d\n", | ||
8619 | phba->cfg_hdw_queue, | ||
8620 | phba->sli4_hba.max_cfg_param.max_eq); | ||
8621 | phba->cfg_hdw_queue = phba->sli4_hba.max_cfg_param.max_eq; | ||
8622 | } | ||
8623 | |||
8624 | if (phba->nvmet_support) { | 8627 | if (phba->nvmet_support) { |
8625 | if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) | 8628 | if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq) |
8626 | phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; | 8629 | phba->cfg_nvmet_mrq = phba->cfg_irq_chann; |
8627 | } | 8630 | } |
8628 | if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) | 8631 | if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) |
8629 | phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; | 8632 | phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; |
8630 | 8633 | ||
8631 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8634 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8632 | "2574 IO channels: hdwQ %d MRQ: %d\n", | 8635 | "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", |
8633 | phba->cfg_hdw_queue, phba->cfg_nvmet_mrq); | 8636 | phba->cfg_hdw_queue, phba->cfg_irq_chann, |
8637 | phba->cfg_nvmet_mrq); | ||
8634 | 8638 | ||
8635 | /* Get EQ depth from module parameter, fake the default for now */ | 8639 | /* Get EQ depth from module parameter, fake the default for now */ |
8636 | phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; | 8640 | phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; |
@@ -8658,6 +8662,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) | |||
8658 | } | 8662 | } |
8659 | qdesc->qe_valid = 1; | 8663 | qdesc->qe_valid = 1; |
8660 | qdesc->hdwq = wqidx; | 8664 | qdesc->hdwq = wqidx; |
8665 | qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); | ||
8661 | phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; | 8666 | phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; |
8662 | 8667 | ||
8663 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, | 8668 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, |
@@ -8669,6 +8674,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) | |||
8669 | return 1; | 8674 | return 1; |
8670 | } | 8675 | } |
8671 | qdesc->hdwq = wqidx; | 8676 | qdesc->hdwq = wqidx; |
8677 | qdesc->chann = wqidx; | ||
8672 | phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; | 8678 | phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; |
8673 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); | 8679 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
8674 | return 0; | 8680 | return 0; |
@@ -8698,6 +8704,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) | |||
8698 | } | 8704 | } |
8699 | qdesc->qe_valid = 1; | 8705 | qdesc->qe_valid = 1; |
8700 | qdesc->hdwq = wqidx; | 8706 | qdesc->hdwq = wqidx; |
8707 | qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); | ||
8701 | phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; | 8708 | phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; |
8702 | 8709 | ||
8703 | /* Create Fast Path FCP WQs */ | 8710 | /* Create Fast Path FCP WQs */ |
@@ -8720,6 +8727,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) | |||
8720 | return 1; | 8727 | return 1; |
8721 | } | 8728 | } |
8722 | qdesc->hdwq = wqidx; | 8729 | qdesc->hdwq = wqidx; |
8730 | qdesc->chann = wqidx; | ||
8723 | phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; | 8731 | phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; |
8724 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); | 8732 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
8725 | return 0; | 8733 | return 0; |
@@ -8743,7 +8751,7 @@ int | |||
8743 | lpfc_sli4_queue_create(struct lpfc_hba *phba) | 8751 | lpfc_sli4_queue_create(struct lpfc_hba *phba) |
8744 | { | 8752 | { |
8745 | struct lpfc_queue *qdesc; | 8753 | struct lpfc_queue *qdesc; |
8746 | int idx; | 8754 | int idx, eqidx; |
8747 | struct lpfc_sli4_hdw_queue *qp; | 8755 | struct lpfc_sli4_hdw_queue *qp; |
8748 | 8756 | ||
8749 | /* | 8757 | /* |
@@ -8829,7 +8837,18 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8829 | 8837 | ||
8830 | /* Create HBA Event Queues (EQs) */ | 8838 | /* Create HBA Event Queues (EQs) */ |
8831 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { | 8839 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
8832 | /* Create EQs */ | 8840 | /* |
8841 | * If there are more Hardware Queues than available | ||
8842 | * CQs, multiple Hardware Queues may share a common EQ. | ||
8843 | */ | ||
8844 | if (idx >= phba->cfg_irq_chann) { | ||
8845 | /* Share an existing EQ */ | ||
8846 | eqidx = lpfc_find_eq_handle(phba, idx); | ||
8847 | phba->sli4_hba.hdwq[idx].hba_eq = | ||
8848 | phba->sli4_hba.hdwq[eqidx].hba_eq; | ||
8849 | continue; | ||
8850 | } | ||
8851 | /* Create an EQ */ | ||
8833 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, | 8852 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
8834 | phba->sli4_hba.eq_esize, | 8853 | phba->sli4_hba.eq_esize, |
8835 | phba->sli4_hba.eq_ecount); | 8854 | phba->sli4_hba.eq_ecount); |
@@ -8840,20 +8859,27 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8840 | } | 8859 | } |
8841 | qdesc->qe_valid = 1; | 8860 | qdesc->qe_valid = 1; |
8842 | qdesc->hdwq = idx; | 8861 | qdesc->hdwq = idx; |
8862 | |||
8863 | /* Save the CPU this EQ is affinitised to */ | ||
8864 | eqidx = lpfc_find_eq_handle(phba, idx); | ||
8865 | qdesc->chann = lpfc_find_cpu_handle(phba, eqidx, | ||
8866 | LPFC_FIND_BY_EQ); | ||
8843 | phba->sli4_hba.hdwq[idx].hba_eq = qdesc; | 8867 | phba->sli4_hba.hdwq[idx].hba_eq = qdesc; |
8844 | } | 8868 | } |
8845 | 8869 | ||
8846 | 8870 | ||
8847 | /* Allocate SCSI SLI4 CQ/WQs */ | 8871 | /* Allocate SCSI SLI4 CQ/WQs */ |
8848 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) | 8872 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
8849 | if (lpfc_alloc_fcp_wq_cq(phba, idx)) | 8873 | if (lpfc_alloc_fcp_wq_cq(phba, idx)) |
8850 | goto out_error; | 8874 | goto out_error; |
8875 | } | ||
8851 | 8876 | ||
8852 | /* Allocate NVME SLI4 CQ/WQs */ | 8877 | /* Allocate NVME SLI4 CQ/WQs */ |
8853 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { | 8878 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
8854 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) | 8879 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
8855 | if (lpfc_alloc_nvme_wq_cq(phba, idx)) | 8880 | if (lpfc_alloc_nvme_wq_cq(phba, idx)) |
8856 | goto out_error; | 8881 | goto out_error; |
8882 | } | ||
8857 | 8883 | ||
8858 | if (phba->nvmet_support) { | 8884 | if (phba->nvmet_support) { |
8859 | for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { | 8885 | for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { |
@@ -8871,6 +8897,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8871 | } | 8897 | } |
8872 | qdesc->qe_valid = 1; | 8898 | qdesc->qe_valid = 1; |
8873 | qdesc->hdwq = idx; | 8899 | qdesc->hdwq = idx; |
8900 | qdesc->chann = idx; | ||
8874 | phba->sli4_hba.nvmet_cqset[idx] = qdesc; | 8901 | phba->sli4_hba.nvmet_cqset[idx] = qdesc; |
8875 | } | 8902 | } |
8876 | } | 8903 | } |
@@ -8902,6 +8929,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8902 | goto out_error; | 8929 | goto out_error; |
8903 | } | 8930 | } |
8904 | qdesc->qe_valid = 1; | 8931 | qdesc->qe_valid = 1; |
8932 | qdesc->chann = 0; | ||
8905 | phba->sli4_hba.els_cq = qdesc; | 8933 | phba->sli4_hba.els_cq = qdesc; |
8906 | 8934 | ||
8907 | 8935 | ||
@@ -8919,6 +8947,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8919 | "0505 Failed allocate slow-path MQ\n"); | 8947 | "0505 Failed allocate slow-path MQ\n"); |
8920 | goto out_error; | 8948 | goto out_error; |
8921 | } | 8949 | } |
8950 | qdesc->chann = 0; | ||
8922 | phba->sli4_hba.mbx_wq = qdesc; | 8951 | phba->sli4_hba.mbx_wq = qdesc; |
8923 | 8952 | ||
8924 | /* | 8953 | /* |
@@ -8934,6 +8963,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8934 | "0504 Failed allocate slow-path ELS WQ\n"); | 8963 | "0504 Failed allocate slow-path ELS WQ\n"); |
8935 | goto out_error; | 8964 | goto out_error; |
8936 | } | 8965 | } |
8966 | qdesc->chann = 0; | ||
8937 | phba->sli4_hba.els_wq = qdesc; | 8967 | phba->sli4_hba.els_wq = qdesc; |
8938 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); | 8968 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
8939 | 8969 | ||
@@ -8947,6 +8977,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8947 | "6079 Failed allocate NVME LS CQ\n"); | 8977 | "6079 Failed allocate NVME LS CQ\n"); |
8948 | goto out_error; | 8978 | goto out_error; |
8949 | } | 8979 | } |
8980 | qdesc->chann = 0; | ||
8950 | qdesc->qe_valid = 1; | 8981 | qdesc->qe_valid = 1; |
8951 | phba->sli4_hba.nvmels_cq = qdesc; | 8982 | phba->sli4_hba.nvmels_cq = qdesc; |
8952 | 8983 | ||
@@ -8959,6 +8990,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) | |||
8959 | "6080 Failed allocate NVME LS WQ\n"); | 8990 | "6080 Failed allocate NVME LS WQ\n"); |
8960 | goto out_error; | 8991 | goto out_error; |
8961 | } | 8992 | } |
8993 | qdesc->chann = 0; | ||
8962 | phba->sli4_hba.nvmels_wq = qdesc; | 8994 | phba->sli4_hba.nvmels_wq = qdesc; |
8963 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); | 8995 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
8964 | } | 8996 | } |
@@ -9085,17 +9117,21 @@ lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) | |||
9085 | } | 9117 | } |
9086 | 9118 | ||
9087 | static inline void | 9119 | static inline void |
9088 | lpfc_sli4_release_hdwq(struct lpfc_sli4_hdw_queue *hdwq, int max) | 9120 | lpfc_sli4_release_hdwq(struct lpfc_hba *phba) |
9089 | { | 9121 | { |
9122 | struct lpfc_sli4_hdw_queue *hdwq; | ||
9090 | uint32_t idx; | 9123 | uint32_t idx; |
9091 | 9124 | ||
9092 | for (idx = 0; idx < max; idx++) { | 9125 | hdwq = phba->sli4_hba.hdwq; |
9093 | lpfc_sli4_queue_free(hdwq[idx].hba_eq); | 9126 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
9127 | if (idx < phba->cfg_irq_chann) | ||
9128 | lpfc_sli4_queue_free(hdwq[idx].hba_eq); | ||
9129 | hdwq[idx].hba_eq = NULL; | ||
9130 | |||
9094 | lpfc_sli4_queue_free(hdwq[idx].fcp_cq); | 9131 | lpfc_sli4_queue_free(hdwq[idx].fcp_cq); |
9095 | lpfc_sli4_queue_free(hdwq[idx].nvme_cq); | 9132 | lpfc_sli4_queue_free(hdwq[idx].nvme_cq); |
9096 | lpfc_sli4_queue_free(hdwq[idx].fcp_wq); | 9133 | lpfc_sli4_queue_free(hdwq[idx].fcp_wq); |
9097 | lpfc_sli4_queue_free(hdwq[idx].nvme_wq); | 9134 | lpfc_sli4_queue_free(hdwq[idx].nvme_wq); |
9098 | hdwq[idx].hba_eq = NULL; | ||
9099 | hdwq[idx].fcp_cq = NULL; | 9135 | hdwq[idx].fcp_cq = NULL; |
9100 | hdwq[idx].nvme_cq = NULL; | 9136 | hdwq[idx].nvme_cq = NULL; |
9101 | hdwq[idx].fcp_wq = NULL; | 9137 | hdwq[idx].fcp_wq = NULL; |
@@ -9120,8 +9156,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | |||
9120 | { | 9156 | { |
9121 | /* Release HBA eqs */ | 9157 | /* Release HBA eqs */ |
9122 | if (phba->sli4_hba.hdwq) | 9158 | if (phba->sli4_hba.hdwq) |
9123 | lpfc_sli4_release_hdwq(phba->sli4_hba.hdwq, | 9159 | lpfc_sli4_release_hdwq(phba); |
9124 | phba->cfg_hdw_queue); | ||
9125 | 9160 | ||
9126 | if (phba->nvmet_support) { | 9161 | if (phba->nvmet_support) { |
9127 | lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, | 9162 | lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, |
@@ -9202,7 +9237,6 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, | |||
9202 | qidx, (uint32_t)rc); | 9237 | qidx, (uint32_t)rc); |
9203 | return rc; | 9238 | return rc; |
9204 | } | 9239 | } |
9205 | cq->chann = qidx; | ||
9206 | 9240 | ||
9207 | if (qtype != LPFC_MBOX) { | 9241 | if (qtype != LPFC_MBOX) { |
9208 | /* Setup cq_map for fast lookup */ | 9242 | /* Setup cq_map for fast lookup */ |
@@ -9222,7 +9256,6 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, | |||
9222 | /* no need to tear down cq - caller will do so */ | 9256 | /* no need to tear down cq - caller will do so */ |
9223 | return rc; | 9257 | return rc; |
9224 | } | 9258 | } |
9225 | wq->chann = qidx; | ||
9226 | 9259 | ||
9227 | /* Bind this CQ/WQ to the NVME ring */ | 9260 | /* Bind this CQ/WQ to the NVME ring */ |
9228 | pring = wq->pring; | 9261 | pring = wq->pring; |
@@ -9252,6 +9285,38 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, | |||
9252 | } | 9285 | } |
9253 | 9286 | ||
9254 | /** | 9287 | /** |
9288 | * lpfc_setup_cq_lookup - Setup the CQ lookup table | ||
9289 | * @phba: pointer to lpfc hba data structure. | ||
9290 | * | ||
9291 | * This routine will populate the cq_lookup table by all | ||
9292 | * available CQ queue_id's. | ||
9293 | **/ | ||
9294 | void | ||
9295 | lpfc_setup_cq_lookup(struct lpfc_hba *phba) | ||
9296 | { | ||
9297 | struct lpfc_queue *eq, *childq; | ||
9298 | struct lpfc_sli4_hdw_queue *qp; | ||
9299 | int qidx; | ||
9300 | |||
9301 | qp = phba->sli4_hba.hdwq; | ||
9302 | memset(phba->sli4_hba.cq_lookup, 0, | ||
9303 | (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); | ||
9304 | for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { | ||
9305 | eq = qp[qidx].hba_eq; | ||
9306 | if (!eq) | ||
9307 | continue; | ||
9308 | list_for_each_entry(childq, &eq->child_list, list) { | ||
9309 | if (childq->queue_id > phba->sli4_hba.cq_max) | ||
9310 | continue; | ||
9311 | if ((childq->subtype == LPFC_FCP) || | ||
9312 | (childq->subtype == LPFC_NVME)) | ||
9313 | phba->sli4_hba.cq_lookup[childq->queue_id] = | ||
9314 | childq; | ||
9315 | } | ||
9316 | } | ||
9317 | } | ||
9318 | |||
9319 | /** | ||
9255 | * lpfc_sli4_queue_setup - Set up all the SLI4 queues | 9320 | * lpfc_sli4_queue_setup - Set up all the SLI4 queues |
9256 | * @phba: pointer to lpfc hba data structure. | 9321 | * @phba: pointer to lpfc hba data structure. |
9257 | * | 9322 | * |
@@ -9331,7 +9396,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) | |||
9331 | rc = -ENOMEM; | 9396 | rc = -ENOMEM; |
9332 | goto out_error; | 9397 | goto out_error; |
9333 | } | 9398 | } |
9334 | for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { | 9399 | for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
9335 | if (!qp[qidx].hba_eq) { | 9400 | if (!qp[qidx].hba_eq) { |
9336 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 9401 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
9337 | "0522 Fast-path EQ (%d) not " | 9402 | "0522 Fast-path EQ (%d) not " |
@@ -9578,11 +9643,23 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) | |||
9578 | phba->sli4_hba.dat_rq->queue_id, | 9643 | phba->sli4_hba.dat_rq->queue_id, |
9579 | phba->sli4_hba.els_cq->queue_id); | 9644 | phba->sli4_hba.els_cq->queue_id); |
9580 | 9645 | ||
9581 | for (qidx = 0; qidx < phba->cfg_hdw_queue; | 9646 | for (qidx = 0; qidx < phba->cfg_irq_chann; |
9582 | qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) | 9647 | qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) |
9583 | lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, | 9648 | lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, |
9584 | phba->cfg_fcp_imax); | 9649 | phba->cfg_fcp_imax); |
9585 | 9650 | ||
9651 | if (phba->sli4_hba.cq_max) { | ||
9652 | kfree(phba->sli4_hba.cq_lookup); | ||
9653 | phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), | ||
9654 | sizeof(struct lpfc_queue *), GFP_KERNEL); | ||
9655 | if (!phba->sli4_hba.cq_lookup) { | ||
9656 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9657 | "0549 Failed setup of CQ Lookup table: " | ||
9658 | "size 0x%x\n", phba->sli4_hba.cq_max); | ||
9659 | goto out_destroy; | ||
9660 | } | ||
9661 | lpfc_setup_cq_lookup(phba); | ||
9662 | } | ||
9586 | return 0; | 9663 | return 0; |
9587 | 9664 | ||
9588 | out_destroy: | 9665 | out_destroy: |
@@ -9664,9 +9741,14 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) | |||
9664 | lpfc_wq_destroy(phba, qp->nvme_wq); | 9741 | lpfc_wq_destroy(phba, qp->nvme_wq); |
9665 | lpfc_cq_destroy(phba, qp->fcp_cq); | 9742 | lpfc_cq_destroy(phba, qp->fcp_cq); |
9666 | lpfc_cq_destroy(phba, qp->nvme_cq); | 9743 | lpfc_cq_destroy(phba, qp->nvme_cq); |
9667 | lpfc_eq_destroy(phba, qp->hba_eq); | 9744 | if (qidx < phba->cfg_irq_chann) |
9745 | lpfc_eq_destroy(phba, qp->hba_eq); | ||
9668 | } | 9746 | } |
9669 | } | 9747 | } |
9748 | |||
9749 | kfree(phba->sli4_hba.cq_lookup); | ||
9750 | phba->sli4_hba.cq_lookup = NULL; | ||
9751 | phba->sli4_hba.cq_max = 0; | ||
9670 | } | 9752 | } |
9671 | 9753 | ||
9672 | /** | 9754 | /** |
@@ -10446,22 +10528,198 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) | |||
10446 | } | 10528 | } |
10447 | 10529 | ||
10448 | /** | 10530 | /** |
10531 | * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified EQ | ||
10532 | * @phba: pointer to lpfc hba data structure. | ||
10533 | * @id: EQ vector index or Hardware Queue index | ||
10534 | * @match: LPFC_FIND_BY_EQ = match by EQ | ||
10535 | * LPFC_FIND_BY_HDWQ = match by Hardware Queue | ||
10536 | */ | ||
10537 | static uint16_t | ||
10538 | lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) | ||
10539 | { | ||
10540 | struct lpfc_vector_map_info *cpup; | ||
10541 | int cpu; | ||
10542 | |||
10543 | /* Find the desired phys_id for the specified EQ */ | ||
10544 | cpup = phba->sli4_hba.cpu_map; | ||
10545 | for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { | ||
10546 | if ((match == LPFC_FIND_BY_EQ) && | ||
10547 | (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && | ||
10548 | (cpup->eq == id)) | ||
10549 | return cpu; | ||
10550 | if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) | ||
10551 | return cpu; | ||
10552 | cpup++; | ||
10553 | } | ||
10554 | return 0; | ||
10555 | } | ||
10556 | |||
10557 | /** | ||
10558 | * lpfc_find_eq_handle - Find the EQ that corresponds to the specified | ||
10559 | * Hardware Queue | ||
10560 | * @phba: pointer to lpfc hba data structure. | ||
10561 | * @hdwq: Hardware Queue index | ||
10562 | */ | ||
10563 | static uint16_t | ||
10564 | lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq) | ||
10565 | { | ||
10566 | struct lpfc_vector_map_info *cpup; | ||
10567 | int cpu; | ||
10568 | |||
10569 | /* Find the desired phys_id for the specified EQ */ | ||
10570 | cpup = phba->sli4_hba.cpu_map; | ||
10571 | for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { | ||
10572 | if (cpup->hdwq == hdwq) | ||
10573 | return cpup->eq; | ||
10574 | cpup++; | ||
10575 | } | ||
10576 | return 0; | ||
10577 | } | ||
10578 | |||
10579 | /** | ||
10580 | * lpfc_find_phys_id_eq - Find the next EQ that corresponds to the specified | ||
10581 | * Physical Id. | ||
10582 | * @phba: pointer to lpfc hba data structure. | ||
10583 | * @eqidx: EQ index | ||
10584 | * @phys_id: CPU package physical id | ||
10585 | */ | ||
10586 | static uint16_t | ||
10587 | lpfc_find_phys_id_eq(struct lpfc_hba *phba, uint16_t eqidx, uint16_t phys_id) | ||
10588 | { | ||
10589 | struct lpfc_vector_map_info *cpup; | ||
10590 | int cpu, desired_phys_id; | ||
10591 | |||
10592 | desired_phys_id = LPFC_VECTOR_MAP_EMPTY; | ||
10593 | |||
10594 | /* Find the desired phys_id for the specified EQ */ | ||
10595 | cpup = phba->sli4_hba.cpu_map; | ||
10596 | for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { | ||
10597 | if ((cpup->irq != LPFC_VECTOR_MAP_EMPTY) && | ||
10598 | (cpup->eq == eqidx)) { | ||
10599 | desired_phys_id = cpup->phys_id; | ||
10600 | break; | ||
10601 | } | ||
10602 | cpup++; | ||
10603 | } | ||
10604 | if (phys_id == desired_phys_id) | ||
10605 | return eqidx; | ||
10606 | |||
10607 | /* Find a EQ thats on the specified phys_id */ | ||
10608 | cpup = phba->sli4_hba.cpu_map; | ||
10609 | for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { | ||
10610 | if ((cpup->irq != LPFC_VECTOR_MAP_EMPTY) && | ||
10611 | (cpup->phys_id == phys_id)) | ||
10612 | return cpup->eq; | ||
10613 | cpup++; | ||
10614 | } | ||
10615 | return 0; | ||
10616 | } | ||
10617 | |||
10618 | /** | ||
10619 | * lpfc_find_cpu_map - Find next available CPU map entry that matches the | ||
10620 | * phys_id and core_id. | ||
10621 | * @phba: pointer to lpfc hba data structure. | ||
10622 | * @phys_id: CPU package physical id | ||
10623 | * @core_id: CPU core id | ||
10624 | * @hdwqidx: Hardware Queue index | ||
10625 | * @eqidx: EQ index | ||
10626 | * @isr_avail: Should an IRQ be associated with this entry | ||
10627 | */ | ||
10628 | static struct lpfc_vector_map_info * | ||
10629 | lpfc_find_cpu_map(struct lpfc_hba *phba, uint16_t phys_id, uint16_t core_id, | ||
10630 | uint16_t hdwqidx, uint16_t eqidx, int isr_avail) | ||
10631 | { | ||
10632 | struct lpfc_vector_map_info *cpup; | ||
10633 | int cpu; | ||
10634 | |||
10635 | cpup = phba->sli4_hba.cpu_map; | ||
10636 | for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { | ||
10637 | /* Does the cpup match the one we are looking for */ | ||
10638 | if ((cpup->phys_id == phys_id) && | ||
10639 | (cpup->core_id == core_id)) { | ||
10640 | /* If it has been already assigned, then skip it */ | ||
10641 | if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) { | ||
10642 | cpup++; | ||
10643 | continue; | ||
10644 | } | ||
10645 | /* Ensure we are on the same phys_id as the first one */ | ||
10646 | if (!isr_avail) | ||
10647 | cpup->eq = lpfc_find_phys_id_eq(phba, eqidx, | ||
10648 | phys_id); | ||
10649 | else | ||
10650 | cpup->eq = eqidx; | ||
10651 | |||
10652 | cpup->hdwq = hdwqidx; | ||
10653 | if (isr_avail) { | ||
10654 | cpup->irq = | ||
10655 | pci_irq_vector(phba->pcidev, eqidx); | ||
10656 | |||
10657 | /* Now affinitize to the selected CPU */ | ||
10658 | irq_set_affinity_hint(cpup->irq, | ||
10659 | get_cpu_mask(cpu)); | ||
10660 | irq_set_status_flags(cpup->irq, | ||
10661 | IRQ_NO_BALANCING); | ||
10662 | |||
10663 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
10664 | "3330 Set Affinity: CPU %d " | ||
10665 | "EQ %d irq %d (HDWQ %x)\n", | ||
10666 | cpu, cpup->eq, | ||
10667 | cpup->irq, cpup->hdwq); | ||
10668 | } | ||
10669 | return cpup; | ||
10670 | } | ||
10671 | cpup++; | ||
10672 | } | ||
10673 | return 0; | ||
10674 | } | ||
10675 | |||
10676 | #ifdef CONFIG_X86 | ||
10677 | /** | ||
10678 | * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded | ||
10679 | * @phba: pointer to lpfc hba data structure. | ||
10680 | * @cpu: CPU map index | ||
10681 | * @phys_id: CPU package physical id | ||
10682 | * @core_id: CPU core id | ||
10683 | */ | ||
10684 | static int | ||
10685 | lpfc_find_hyper(struct lpfc_hba *phba, int cpu, | ||
10686 | uint16_t phys_id, uint16_t core_id) | ||
10687 | { | ||
10688 | struct lpfc_vector_map_info *cpup; | ||
10689 | int idx; | ||
10690 | |||
10691 | cpup = phba->sli4_hba.cpu_map; | ||
10692 | for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { | ||
10693 | /* Does the cpup match the one we are looking for */ | ||
10694 | if ((cpup->phys_id == phys_id) && | ||
10695 | (cpup->core_id == core_id) && | ||
10696 | (cpu != idx)) { | ||
10697 | return 1; | ||
10698 | } | ||
10699 | cpup++; | ||
10700 | } | ||
10701 | return 0; | ||
10702 | } | ||
10703 | #endif | ||
10704 | |||
10705 | /** | ||
10449 | * lpfc_cpu_affinity_check - Check vector CPU affinity mappings | 10706 | * lpfc_cpu_affinity_check - Check vector CPU affinity mappings |
10450 | * @phba: pointer to lpfc hba data structure. | 10707 | * @phba: pointer to lpfc hba data structure. |
10708 | * @vectors: number of msix vectors allocated. | ||
10451 | * | 10709 | * |
10452 | * The routine will figure out the CPU affinity assignment for every | 10710 | * The routine will figure out the CPU affinity assignment for every |
10453 | * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated | 10711 | * MSI-X vector allocated for the HBA. |
10454 | * with a pointer to the CPU mask that defines ALL the CPUs this vector | ||
10455 | * can be associated with. If the vector can be unquely associated with | ||
10456 | * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu. | ||
10457 | * In addition, the CPU to IO channel mapping will be calculated | 10712 | * In addition, the CPU to IO channel mapping will be calculated |
10458 | * and the phba->sli4_hba.cpu_map array will reflect this. | 10713 | * and the phba->sli4_hba.cpu_map array will reflect this. |
10459 | */ | 10714 | */ |
10460 | static void | 10715 | static void |
10461 | lpfc_cpu_affinity_check(struct lpfc_hba *phba) | 10716 | lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) |
10462 | { | 10717 | { |
10718 | int i, j, idx, phys_id; | ||
10719 | int max_phys_id, min_phys_id; | ||
10720 | int max_core_id, min_core_id; | ||
10463 | struct lpfc_vector_map_info *cpup; | 10721 | struct lpfc_vector_map_info *cpup; |
10464 | int cpu, idx; | 10722 | int cpu, eqidx, hdwqidx, isr_avail; |
10465 | #ifdef CONFIG_X86 | 10723 | #ifdef CONFIG_X86 |
10466 | struct cpuinfo_x86 *cpuinfo; | 10724 | struct cpuinfo_x86 *cpuinfo; |
10467 | #endif | 10725 | #endif |
@@ -10471,6 +10729,12 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba) | |||
10471 | (sizeof(struct lpfc_vector_map_info) * | 10729 | (sizeof(struct lpfc_vector_map_info) * |
10472 | phba->sli4_hba.num_present_cpu)); | 10730 | phba->sli4_hba.num_present_cpu)); |
10473 | 10731 | ||
10732 | max_phys_id = 0; | ||
10733 | min_phys_id = 0xffff; | ||
10734 | max_core_id = 0; | ||
10735 | min_core_id = 0xffff; | ||
10736 | phys_id = 0; | ||
10737 | |||
10474 | /* Update CPU map with physical id and core id of each CPU */ | 10738 | /* Update CPU map with physical id and core id of each CPU */ |
10475 | cpup = phba->sli4_hba.cpu_map; | 10739 | cpup = phba->sli4_hba.cpu_map; |
10476 | for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { | 10740 | for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { |
@@ -10478,34 +10742,91 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba) | |||
10478 | cpuinfo = &cpu_data(cpu); | 10742 | cpuinfo = &cpu_data(cpu); |
10479 | cpup->phys_id = cpuinfo->phys_proc_id; | 10743 | cpup->phys_id = cpuinfo->phys_proc_id; |
10480 | cpup->core_id = cpuinfo->cpu_core_id; | 10744 | cpup->core_id = cpuinfo->cpu_core_id; |
10745 | cpup->hyper = lpfc_find_hyper(phba, cpu, | ||
10746 | cpup->phys_id, cpup->core_id); | ||
10481 | #else | 10747 | #else |
10482 | /* No distinction between CPUs for other platforms */ | 10748 | /* No distinction between CPUs for other platforms */ |
10483 | cpup->phys_id = 0; | 10749 | cpup->phys_id = 0; |
10484 | cpup->core_id = 0; | 10750 | cpup->core_id = cpu; |
10751 | cpup->hyper = 0; | ||
10485 | #endif | 10752 | #endif |
10753 | |||
10486 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 10754 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
10487 | "3328 CPU physid %d coreid %d\n", | 10755 | "3328 CPU physid %d coreid %d\n", |
10488 | cpup->phys_id, cpup->core_id); | 10756 | cpup->phys_id, cpup->core_id); |
10757 | |||
10758 | if (cpup->phys_id > max_phys_id) | ||
10759 | max_phys_id = cpup->phys_id; | ||
10760 | if (cpup->phys_id < min_phys_id) | ||
10761 | min_phys_id = cpup->phys_id; | ||
10762 | |||
10763 | if (cpup->core_id > max_core_id) | ||
10764 | max_core_id = cpup->core_id; | ||
10765 | if (cpup->core_id < min_core_id) | ||
10766 | min_core_id = cpup->core_id; | ||
10767 | |||
10489 | cpup++; | 10768 | cpup++; |
10490 | } | 10769 | } |
10491 | 10770 | ||
10492 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { | 10771 | /* |
10493 | cpup = &phba->sli4_hba.cpu_map[idx]; | 10772 | * If the number of IRQ vectors == number of CPUs, |
10494 | cpup->irq = pci_irq_vector(phba->pcidev, idx); | 10773 | * mapping is pretty simple: 1 to 1. |
10774 | * This is the desired path if NVME is enabled. | ||
10775 | */ | ||
10776 | if (vectors == phba->sli4_hba.num_present_cpu) { | ||
10777 | cpup = phba->sli4_hba.cpu_map; | ||
10778 | for (idx = 0; idx < vectors; idx++) { | ||
10779 | cpup->eq = idx; | ||
10780 | cpup->hdwq = idx; | ||
10781 | cpup->irq = pci_irq_vector(phba->pcidev, idx); | ||
10782 | |||
10783 | /* Now affinitize to the selected CPU */ | ||
10784 | irq_set_affinity_hint( | ||
10785 | pci_irq_vector(phba->pcidev, idx), | ||
10786 | get_cpu_mask(idx)); | ||
10787 | irq_set_status_flags(cpup->irq, IRQ_NO_BALANCING); | ||
10495 | 10788 | ||
10496 | /* For now assume vector N maps to CPU N */ | 10789 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
10497 | irq_set_affinity_hint(cpup->irq, get_cpu_mask(idx)); | 10790 | "3336 Set Affinity: CPU %d " |
10498 | cpup->hdwq = idx; | 10791 | "EQ %d irq %d\n", |
10792 | idx, cpup->eq, | ||
10793 | pci_irq_vector(phba->pcidev, idx)); | ||
10794 | cpup++; | ||
10795 | } | ||
10796 | return; | ||
10797 | } | ||
10499 | 10798 | ||
10500 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 10799 | idx = 0; |
10501 | "3336 Set Affinity: CPU %d " | 10800 | isr_avail = 1; |
10502 | "hdwq %d irq %d\n", | 10801 | eqidx = 0; |
10503 | cpu, cpup->hdwq, cpup->irq); | 10802 | hdwqidx = 0; |
10803 | |||
10804 | /* Mapping is more complicated for this case. Hardware Queues are | ||
10805 | * assigned in a "ping pong" fashion, ping pong-ing between the | ||
10806 | * available phys_id's. | ||
10807 | */ | ||
10808 | while (idx < phba->sli4_hba.num_present_cpu) { | ||
10809 | for (i = min_core_id; i <= max_core_id; i++) { | ||
10810 | for (j = min_phys_id; j <= max_phys_id; j++) { | ||
10811 | cpup = lpfc_find_cpu_map(phba, j, i, hdwqidx, | ||
10812 | eqidx, isr_avail); | ||
10813 | if (!cpup) | ||
10814 | continue; | ||
10815 | idx++; | ||
10816 | hdwqidx++; | ||
10817 | if (hdwqidx >= phba->cfg_hdw_queue) | ||
10818 | hdwqidx = 0; | ||
10819 | eqidx++; | ||
10820 | if (eqidx >= phba->cfg_irq_chann) { | ||
10821 | isr_avail = 0; | ||
10822 | eqidx = 0; | ||
10823 | } | ||
10824 | } | ||
10825 | } | ||
10504 | } | 10826 | } |
10505 | return; | 10827 | return; |
10506 | } | 10828 | } |
10507 | 10829 | ||
10508 | |||
10509 | /** | 10830 | /** |
10510 | * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device | 10831 | * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device |
10511 | * @phba: pointer to lpfc hba data structure. | 10832 | * @phba: pointer to lpfc hba data structure. |
@@ -10524,7 +10845,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) | |||
10524 | char *name; | 10845 | char *name; |
10525 | 10846 | ||
10526 | /* Set up MSI-X multi-message vectors */ | 10847 | /* Set up MSI-X multi-message vectors */ |
10527 | vectors = phba->cfg_hdw_queue; | 10848 | vectors = phba->cfg_irq_chann; |
10528 | 10849 | ||
10529 | rc = pci_alloc_irq_vectors(phba->pcidev, | 10850 | rc = pci_alloc_irq_vectors(phba->pcidev, |
10530 | (phba->nvmet_support) ? 1 : 2, | 10851 | (phba->nvmet_support) ? 1 : 2, |
@@ -10545,7 +10866,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) | |||
10545 | 10866 | ||
10546 | phba->sli4_hba.hba_eq_hdl[index].idx = index; | 10867 | phba->sli4_hba.hba_eq_hdl[index].idx = index; |
10547 | phba->sli4_hba.hba_eq_hdl[index].phba = phba; | 10868 | phba->sli4_hba.hba_eq_hdl[index].phba = phba; |
10548 | atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); | ||
10549 | rc = request_irq(pci_irq_vector(phba->pcidev, index), | 10869 | rc = request_irq(pci_irq_vector(phba->pcidev, index), |
10550 | &lpfc_sli4_hba_intr_handler, 0, | 10870 | &lpfc_sli4_hba_intr_handler, 0, |
10551 | name, | 10871 | name, |
@@ -10558,17 +10878,16 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) | |||
10558 | } | 10878 | } |
10559 | } | 10879 | } |
10560 | 10880 | ||
10561 | if (vectors != phba->cfg_hdw_queue) { | 10881 | if (vectors != phba->cfg_irq_chann) { |
10562 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 10882 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
10563 | "3238 Reducing IO channels to match number of " | 10883 | "3238 Reducing IO channels to match number of " |
10564 | "MSI-X vectors, requested %d got %d\n", | 10884 | "MSI-X vectors, requested %d got %d\n", |
10565 | phba->cfg_hdw_queue, vectors); | 10885 | phba->cfg_irq_chann, vectors); |
10566 | if (phba->cfg_hdw_queue > vectors) | 10886 | if (phba->cfg_irq_chann > vectors) |
10567 | phba->cfg_hdw_queue = vectors; | 10887 | phba->cfg_irq_chann = vectors; |
10568 | if (phba->cfg_nvmet_mrq > vectors) | 10888 | if (phba->cfg_nvmet_mrq > vectors) |
10569 | phba->cfg_nvmet_mrq = vectors; | 10889 | phba->cfg_nvmet_mrq = vectors; |
10570 | } | 10890 | } |
10571 | lpfc_cpu_affinity_check(phba); | ||
10572 | 10891 | ||
10573 | return rc; | 10892 | return rc; |
10574 | 10893 | ||
@@ -10623,7 +10942,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) | |||
10623 | return rc; | 10942 | return rc; |
10624 | } | 10943 | } |
10625 | 10944 | ||
10626 | for (index = 0; index < phba->cfg_hdw_queue; index++) { | 10945 | for (index = 0; index < phba->cfg_irq_chann; index++) { |
10627 | phba->sli4_hba.hba_eq_hdl[index].idx = index; | 10946 | phba->sli4_hba.hba_eq_hdl[index].idx = index; |
10628 | phba->sli4_hba.hba_eq_hdl[index].phba = phba; | 10947 | phba->sli4_hba.hba_eq_hdl[index].phba = phba; |
10629 | } | 10948 | } |
@@ -10688,11 +11007,10 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) | |||
10688 | phba->intr_type = INTx; | 11007 | phba->intr_type = INTx; |
10689 | intr_mode = 0; | 11008 | intr_mode = 0; |
10690 | 11009 | ||
10691 | for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { | 11010 | for (idx = 0; idx < phba->cfg_irq_chann; idx++) { |
10692 | eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; | 11011 | eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; |
10693 | eqhdl->idx = idx; | 11012 | eqhdl->idx = idx; |
10694 | eqhdl->phba = phba; | 11013 | eqhdl->phba = phba; |
10695 | atomic_set(&eqhdl->hba_eq_in_use, 1); | ||
10696 | } | 11014 | } |
10697 | } | 11015 | } |
10698 | } | 11016 | } |
@@ -10716,7 +11034,7 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba) | |||
10716 | int index; | 11034 | int index; |
10717 | 11035 | ||
10718 | /* Free up MSI-X multi-message vectors */ | 11036 | /* Free up MSI-X multi-message vectors */ |
10719 | for (index = 0; index < phba->cfg_hdw_queue; index++) { | 11037 | for (index = 0; index < phba->cfg_irq_chann; index++) { |
10720 | irq_set_affinity_hint( | 11038 | irq_set_affinity_hint( |
10721 | pci_irq_vector(phba->pcidev, index), | 11039 | pci_irq_vector(phba->pcidev, index), |
10722 | NULL); | 11040 | NULL); |
@@ -12092,12 +12410,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
12092 | } | 12410 | } |
12093 | /* Default to single EQ for non-MSI-X */ | 12411 | /* Default to single EQ for non-MSI-X */ |
12094 | if (phba->intr_type != MSIX) { | 12412 | if (phba->intr_type != MSIX) { |
12095 | phba->cfg_hdw_queue = 1; | 12413 | phba->cfg_irq_chann = 1; |
12096 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { | 12414 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
12097 | if (phba->nvmet_support) | 12415 | if (phba->nvmet_support) |
12098 | phba->cfg_nvmet_mrq = 1; | 12416 | phba->cfg_nvmet_mrq = 1; |
12099 | } | 12417 | } |
12100 | } | 12418 | } |
12419 | lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); | ||
12101 | 12420 | ||
12102 | /* Create SCSI host to the physical port */ | 12421 | /* Create SCSI host to the physical port */ |
12103 | error = lpfc_create_shost(phba); | 12422 | error = lpfc_create_shost(phba); |
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index c9aacd56a449..9480257c5143 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
@@ -239,7 +239,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, | |||
239 | if (qidx) { | 239 | if (qidx) { |
240 | str = "IO "; /* IO queue */ | 240 | str = "IO "; /* IO queue */ |
241 | qhandle->index = ((qidx - 1) % | 241 | qhandle->index = ((qidx - 1) % |
242 | vport->phba->cfg_hdw_queue); | 242 | lpfc_nvme_template.max_hw_queues); |
243 | } else { | 243 | } else { |
244 | str = "ADM"; /* Admin queue */ | 244 | str = "ADM"; /* Admin queue */ |
245 | qhandle->index = qidx; | 245 | qhandle->index = qidx; |
@@ -1546,14 +1546,12 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, | |||
1546 | } | 1546 | } |
1547 | } | 1547 | } |
1548 | 1548 | ||
1549 | /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ | ||
1549 | if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { | 1550 | if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { |
1550 | idx = lpfc_queue_info->index; | 1551 | idx = lpfc_queue_info->index; |
1551 | } else { | 1552 | } else { |
1552 | cpu = smp_processor_id(); | 1553 | cpu = smp_processor_id(); |
1553 | if (cpu < phba->cfg_hdw_queue) | 1554 | idx = phba->sli4_hba.cpu_map[cpu].hdwq; |
1554 | idx = cpu; | ||
1555 | else | ||
1556 | idx = cpu % phba->cfg_hdw_queue; | ||
1557 | } | 1555 | } |
1558 | 1556 | ||
1559 | lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); | 1557 | lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); |
@@ -2060,7 +2058,13 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) | |||
2060 | * allocate + 3, one for cmd, one for rsp and one for this alignment | 2058 | * allocate + 3, one for cmd, one for rsp and one for this alignment |
2061 | */ | 2059 | */ |
2062 | lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; | 2060 | lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; |
2063 | lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; | 2061 | |
2062 | /* Advertise how many hw queues we support based on fcp_io_sched */ | ||
2063 | if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) | ||
2064 | lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; | ||
2065 | else | ||
2066 | lpfc_nvme_template.max_hw_queues = | ||
2067 | phba->sli4_hba.num_present_cpu; | ||
2064 | 2068 | ||
2065 | /* localport is allocated from the stack, but the registration | 2069 | /* localport is allocated from the stack, but the registration |
2066 | * call allocates heap memory as well as the private area. | 2070 | * call allocates heap memory as well as the private area. |
@@ -2554,6 +2558,8 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) | |||
2554 | * WQEs have been removed from the txcmplqs. | 2558 | * WQEs have been removed from the txcmplqs. |
2555 | */ | 2559 | */ |
2556 | for (i = 0; i < phba->cfg_hdw_queue; i++) { | 2560 | for (i = 0; i < phba->cfg_hdw_queue; i++) { |
2561 | if (!phba->sli4_hba.hdwq[i].nvme_wq) | ||
2562 | continue; | ||
2557 | pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; | 2563 | pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; |
2558 | 2564 | ||
2559 | if (!pring) | 2565 | if (!pring) |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 7b22cc995d7f..a827520789f1 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -692,10 +692,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, | |||
692 | tag = blk_mq_unique_tag(cmnd->request); | 692 | tag = blk_mq_unique_tag(cmnd->request); |
693 | idx = blk_mq_unique_tag_to_hwq(tag); | 693 | idx = blk_mq_unique_tag_to_hwq(tag); |
694 | } else { | 694 | } else { |
695 | if (cpu < phba->cfg_hdw_queue) | 695 | idx = phba->sli4_hba.cpu_map[cpu].hdwq; |
696 | idx = cpu; | ||
697 | else | ||
698 | idx = cpu % phba->cfg_hdw_queue; | ||
699 | } | 696 | } |
700 | 697 | ||
701 | lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, | 698 | lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, |
@@ -3650,6 +3647,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
3650 | struct Scsi_Host *shost; | 3647 | struct Scsi_Host *shost; |
3651 | int idx; | 3648 | int idx; |
3652 | uint32_t logit = LOG_FCP; | 3649 | uint32_t logit = LOG_FCP; |
3650 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | ||
3651 | int cpu; | ||
3652 | #endif | ||
3653 | 3653 | ||
3654 | /* Sanity check on return of outstanding command */ | 3654 | /* Sanity check on return of outstanding command */ |
3655 | cmd = lpfc_cmd->pCmd; | 3655 | cmd = lpfc_cmd->pCmd; |
@@ -3660,6 +3660,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
3660 | if (phba->sli4_hba.hdwq) | 3660 | if (phba->sli4_hba.hdwq) |
3661 | phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; | 3661 | phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; |
3662 | 3662 | ||
3663 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | ||
3664 | if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { | ||
3665 | cpu = smp_processor_id(); | ||
3666 | if (cpu < LPFC_CHECK_CPU_CNT) | ||
3667 | phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; | ||
3668 | } | ||
3669 | #endif | ||
3663 | shost = cmd->device->host; | 3670 | shost = cmd->device->host; |
3664 | 3671 | ||
3665 | lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); | 3672 | lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); |
@@ -4336,6 +4343,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) | |||
4336 | struct lpfc_io_buf *lpfc_cmd; | 4343 | struct lpfc_io_buf *lpfc_cmd; |
4337 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | 4344 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
4338 | int err, idx; | 4345 | int err, idx; |
4346 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | ||
4347 | int cpu; | ||
4348 | #endif | ||
4339 | 4349 | ||
4340 | rdata = lpfc_rport_data_from_scsi_device(cmnd->device); | 4350 | rdata = lpfc_rport_data_from_scsi_device(cmnd->device); |
4341 | 4351 | ||
@@ -4450,6 +4460,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) | |||
4450 | 4460 | ||
4451 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); | 4461 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); |
4452 | 4462 | ||
4463 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | ||
4464 | if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { | ||
4465 | cpu = smp_processor_id(); | ||
4466 | if (cpu < LPFC_CHECK_CPU_CNT) { | ||
4467 | struct lpfc_sli4_hdw_queue *hdwq = | ||
4468 | &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no]; | ||
4469 | hdwq->cpucheck_xmt_io[cpu]++; | ||
4470 | } | ||
4471 | } | ||
4472 | #endif | ||
4453 | err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, | 4473 | err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, |
4454 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); | 4474 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); |
4455 | if (err) { | 4475 | if (err) { |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index c0f0adccdea7..0cc81321643d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -5586,7 +5586,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) | |||
5586 | LPFC_QUEUE_REARM); | 5586 | LPFC_QUEUE_REARM); |
5587 | } | 5587 | } |
5588 | 5588 | ||
5589 | for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) | 5589 | for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) |
5590 | sli4_hba->sli4_eq_release(qp[qidx].hba_eq, | 5590 | sli4_hba->sli4_eq_release(qp[qidx].hba_eq, |
5591 | LPFC_QUEUE_REARM); | 5591 | LPFC_QUEUE_REARM); |
5592 | } | 5592 | } |
@@ -7878,7 +7878,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) | |||
7878 | /* Find the eq associated with the mcq */ | 7878 | /* Find the eq associated with the mcq */ |
7879 | 7879 | ||
7880 | if (sli4_hba->hdwq) | 7880 | if (sli4_hba->hdwq) |
7881 | for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) | 7881 | for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) |
7882 | if (sli4_hba->hdwq[eqidx].hba_eq->queue_id == | 7882 | if (sli4_hba->hdwq[eqidx].hba_eq->queue_id == |
7883 | sli4_hba->mbx_cq->assoc_qid) { | 7883 | sli4_hba->mbx_cq->assoc_qid) { |
7884 | fpeq = sli4_hba->hdwq[eqidx].hba_eq; | 7884 | fpeq = sli4_hba->hdwq[eqidx].hba_eq; |
@@ -10058,12 +10058,9 @@ int | |||
10058 | lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, | 10058 | lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, |
10059 | struct lpfc_iocbq *piocb, uint32_t flag) | 10059 | struct lpfc_iocbq *piocb, uint32_t flag) |
10060 | { | 10060 | { |
10061 | struct lpfc_hba_eq_hdl *hba_eq_hdl; | ||
10062 | struct lpfc_sli_ring *pring; | 10061 | struct lpfc_sli_ring *pring; |
10063 | struct lpfc_queue *fpeq; | ||
10064 | struct lpfc_eqe *eqe; | ||
10065 | unsigned long iflags; | 10062 | unsigned long iflags; |
10066 | int rc, idx; | 10063 | int rc; |
10067 | 10064 | ||
10068 | if (phba->sli_rev == LPFC_SLI_REV4) { | 10065 | if (phba->sli_rev == LPFC_SLI_REV4) { |
10069 | pring = lpfc_sli4_calc_ring(phba, piocb); | 10066 | pring = lpfc_sli4_calc_ring(phba, piocb); |
@@ -10073,34 +10070,6 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, | |||
10073 | spin_lock_irqsave(&pring->ring_lock, iflags); | 10070 | spin_lock_irqsave(&pring->ring_lock, iflags); |
10074 | rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); | 10071 | rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); |
10075 | spin_unlock_irqrestore(&pring->ring_lock, iflags); | 10072 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
10076 | |||
10077 | if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { | ||
10078 | idx = piocb->hba_wqidx; | ||
10079 | hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; | ||
10080 | |||
10081 | if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { | ||
10082 | |||
10083 | /* Get associated EQ with this index */ | ||
10084 | fpeq = phba->sli4_hba.hdwq[idx].hba_eq; | ||
10085 | |||
10086 | /* Turn off interrupts from this EQ */ | ||
10087 | phba->sli4_hba.sli4_eq_clr_intr(fpeq); | ||
10088 | |||
10089 | /* | ||
10090 | * Process all the events on FCP EQ | ||
10091 | */ | ||
10092 | while ((eqe = lpfc_sli4_eq_get(fpeq))) { | ||
10093 | lpfc_sli4_hba_handle_eqe(phba, | ||
10094 | eqe, idx); | ||
10095 | fpeq->EQ_processed++; | ||
10096 | } | ||
10097 | |||
10098 | /* Always clear and re-arm the EQ */ | ||
10099 | phba->sli4_hba.sli4_eq_release(fpeq, | ||
10100 | LPFC_QUEUE_REARM); | ||
10101 | } | ||
10102 | atomic_inc(&hba_eq_hdl->hba_eq_in_use); | ||
10103 | } | ||
10104 | } else { | 10073 | } else { |
10105 | /* For now, SLI2/3 will still use hbalock */ | 10074 | /* For now, SLI2/3 will still use hbalock */ |
10106 | spin_lock_irqsave(&phba->hbalock, iflags); | 10075 | spin_lock_irqsave(&phba->hbalock, iflags); |
@@ -13651,7 +13620,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, | |||
13651 | /* Save EQ associated with this CQ */ | 13620 | /* Save EQ associated with this CQ */ |
13652 | cq->assoc_qp = speq; | 13621 | cq->assoc_qp = speq; |
13653 | 13622 | ||
13654 | if (!queue_work(phba->wq, &cq->spwork)) | 13623 | if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) |
13655 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 13624 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
13656 | "0390 Cannot schedule soft IRQ " | 13625 | "0390 Cannot schedule soft IRQ " |
13657 | "for CQ eqcqid=%d, cqid=%d on CPU %d\n", | 13626 | "for CQ eqcqid=%d, cqid=%d on CPU %d\n", |
@@ -14057,18 +14026,11 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, | |||
14057 | /* Get the reference to the corresponding CQ */ | 14026 | /* Get the reference to the corresponding CQ */ |
14058 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); | 14027 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
14059 | 14028 | ||
14060 | /* First check for NVME/SCSI completion */ | 14029 | /* Use the fast lookup method first */ |
14061 | if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && | 14030 | if (cqid <= phba->sli4_hba.cq_max) { |
14062 | (cqid == phba->sli4_hba.hdwq[qidx].nvme_cq_map)) { | 14031 | cq = phba->sli4_hba.cq_lookup[cqid]; |
14063 | /* Process NVME / NVMET command completion */ | 14032 | if (cq) |
14064 | cq = phba->sli4_hba.hdwq[qidx].nvme_cq; | 14033 | goto work_cq; |
14065 | goto process_cq; | ||
14066 | } | ||
14067 | |||
14068 | if (cqid == phba->sli4_hba.hdwq[qidx].fcp_cq_map) { | ||
14069 | /* Process FCP command completion */ | ||
14070 | cq = phba->sli4_hba.hdwq[qidx].fcp_cq; | ||
14071 | goto process_cq; | ||
14072 | } | 14034 | } |
14073 | 14035 | ||
14074 | /* Next check for NVMET completion */ | 14036 | /* Next check for NVMET completion */ |
@@ -14103,9 +14065,7 @@ process_cq: | |||
14103 | return; | 14065 | return; |
14104 | } | 14066 | } |
14105 | 14067 | ||
14106 | /* Save EQ associated with this CQ */ | 14068 | work_cq: |
14107 | cq->assoc_qp = phba->sli4_hba.hdwq[qidx].hba_eq; | ||
14108 | |||
14109 | if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) | 14069 | if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) |
14110 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 14070 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
14111 | "0363 Cannot schedule soft IRQ " | 14071 | "0363 Cannot schedule soft IRQ " |
@@ -14233,15 +14193,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) | |||
14233 | if (unlikely(!fpeq)) | 14193 | if (unlikely(!fpeq)) |
14234 | return IRQ_NONE; | 14194 | return IRQ_NONE; |
14235 | 14195 | ||
14236 | if (lpfc_fcp_look_ahead) { | ||
14237 | if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) | ||
14238 | phba->sli4_hba.sli4_eq_clr_intr(fpeq); | ||
14239 | else { | ||
14240 | atomic_inc(&hba_eq_hdl->hba_eq_in_use); | ||
14241 | return IRQ_NONE; | ||
14242 | } | ||
14243 | } | ||
14244 | |||
14245 | /* Check device state for handling interrupt */ | 14196 | /* Check device state for handling interrupt */ |
14246 | if (unlikely(lpfc_intr_state_check(phba))) { | 14197 | if (unlikely(lpfc_intr_state_check(phba))) { |
14247 | /* Check again for link_state with lock held */ | 14198 | /* Check again for link_state with lock held */ |
@@ -14250,8 +14201,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) | |||
14250 | /* Flush, clear interrupt, and rearm the EQ */ | 14201 | /* Flush, clear interrupt, and rearm the EQ */ |
14251 | lpfc_sli4_eq_flush(phba, fpeq); | 14202 | lpfc_sli4_eq_flush(phba, fpeq); |
14252 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 14203 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
14253 | if (lpfc_fcp_look_ahead) | ||
14254 | atomic_inc(&hba_eq_hdl->hba_eq_in_use); | ||
14255 | return IRQ_NONE; | 14204 | return IRQ_NONE; |
14256 | } | 14205 | } |
14257 | 14206 | ||
@@ -14274,12 +14223,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) | |||
14274 | 14223 | ||
14275 | if (unlikely(ecount == 0)) { | 14224 | if (unlikely(ecount == 0)) { |
14276 | fpeq->EQ_no_entry++; | 14225 | fpeq->EQ_no_entry++; |
14277 | |||
14278 | if (lpfc_fcp_look_ahead) { | ||
14279 | atomic_inc(&hba_eq_hdl->hba_eq_in_use); | ||
14280 | return IRQ_NONE; | ||
14281 | } | ||
14282 | |||
14283 | if (phba->intr_type == MSIX) | 14226 | if (phba->intr_type == MSIX) |
14284 | /* MSI-X treated interrupt served as no EQ share INT */ | 14227 | /* MSI-X treated interrupt served as no EQ share INT */ |
14285 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | 14228 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
@@ -14289,9 +14232,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) | |||
14289 | return IRQ_NONE; | 14232 | return IRQ_NONE; |
14290 | } | 14233 | } |
14291 | 14234 | ||
14292 | if (lpfc_fcp_look_ahead) | ||
14293 | atomic_inc(&hba_eq_hdl->hba_eq_in_use); | ||
14294 | |||
14295 | return IRQ_HANDLED; | 14235 | return IRQ_HANDLED; |
14296 | } /* lpfc_sli4_fp_intr_handler */ | 14236 | } /* lpfc_sli4_fp_intr_handler */ |
14297 | 14237 | ||
@@ -14329,7 +14269,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) | |||
14329 | /* | 14269 | /* |
14330 | * Invoke fast-path host attention interrupt handling as appropriate. | 14270 | * Invoke fast-path host attention interrupt handling as appropriate. |
14331 | */ | 14271 | */ |
14332 | for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { | 14272 | for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
14333 | hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, | 14273 | hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, |
14334 | &phba->sli4_hba.hba_eq_hdl[qidx]); | 14274 | &phba->sli4_hba.hba_eq_hdl[qidx]); |
14335 | if (hba_irq_rc == IRQ_HANDLED) | 14275 | if (hba_irq_rc == IRQ_HANDLED) |
@@ -14516,7 +14456,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, | |||
14516 | union lpfc_sli4_cfg_shdr *shdr; | 14456 | union lpfc_sli4_cfg_shdr *shdr; |
14517 | uint16_t dmult; | 14457 | uint16_t dmult; |
14518 | 14458 | ||
14519 | if (startq >= phba->cfg_hdw_queue) | 14459 | if (startq >= phba->cfg_irq_chann) |
14520 | return 0; | 14460 | return 0; |
14521 | 14461 | ||
14522 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 14462 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
@@ -14530,7 +14470,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, | |||
14530 | eq_delay = &mbox->u.mqe.un.eq_delay; | 14470 | eq_delay = &mbox->u.mqe.un.eq_delay; |
14531 | 14471 | ||
14532 | /* Calculate delay multiper from maximum interrupt per second */ | 14472 | /* Calculate delay multiper from maximum interrupt per second */ |
14533 | result = imax / phba->cfg_hdw_queue; | 14473 | result = imax / phba->cfg_irq_chann; |
14534 | if (result > LPFC_DMULT_CONST || result == 0) | 14474 | if (result > LPFC_DMULT_CONST || result == 0) |
14535 | dmult = 0; | 14475 | dmult = 0; |
14536 | else | 14476 | else |
@@ -14539,7 +14479,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, | |||
14539 | dmult = LPFC_DMULT_MAX; | 14479 | dmult = LPFC_DMULT_MAX; |
14540 | 14480 | ||
14541 | cnt = 0; | 14481 | cnt = 0; |
14542 | for (qidx = startq; qidx < phba->cfg_hdw_queue; qidx++) { | 14482 | for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { |
14543 | eq = phba->sli4_hba.hdwq[qidx].hba_eq; | 14483 | eq = phba->sli4_hba.hdwq[qidx].hba_eq; |
14544 | if (!eq) | 14484 | if (!eq) |
14545 | continue; | 14485 | continue; |
@@ -14557,7 +14497,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, | |||
14557 | val = phba->cfg_fcp_imax; | 14497 | val = phba->cfg_fcp_imax; |
14558 | if (val) { | 14498 | if (val) { |
14559 | /* First, interrupts per sec per EQ */ | 14499 | /* First, interrupts per sec per EQ */ |
14560 | val = phba->cfg_fcp_imax / phba->cfg_hdw_queue; | 14500 | val = phba->cfg_fcp_imax / phba->cfg_irq_chann; |
14561 | 14501 | ||
14562 | /* us delay between each interrupt */ | 14502 | /* us delay between each interrupt */ |
14563 | val = LPFC_SEC_TO_USEC / val; | 14503 | val = LPFC_SEC_TO_USEC / val; |
@@ -14852,10 +14792,13 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
14852 | cq->subtype = subtype; | 14792 | cq->subtype = subtype; |
14853 | cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); | 14793 | cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); |
14854 | cq->assoc_qid = eq->queue_id; | 14794 | cq->assoc_qid = eq->queue_id; |
14795 | cq->assoc_qp = eq; | ||
14855 | cq->host_index = 0; | 14796 | cq->host_index = 0; |
14856 | cq->hba_index = 0; | 14797 | cq->hba_index = 0; |
14857 | cq->entry_repost = LPFC_CQ_REPOST; | 14798 | cq->entry_repost = LPFC_CQ_REPOST; |
14858 | 14799 | ||
14800 | if (cq->queue_id > phba->sli4_hba.cq_max) | ||
14801 | phba->sli4_hba.cq_max = cq->queue_id; | ||
14859 | out: | 14802 | out: |
14860 | mempool_free(mbox, phba->mbox_mem_pool); | 14803 | mempool_free(mbox, phba->mbox_mem_pool); |
14861 | return status; | 14804 | return status; |
@@ -15061,6 +15004,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
15061 | cq->type = type; | 15004 | cq->type = type; |
15062 | cq->subtype = subtype; | 15005 | cq->subtype = subtype; |
15063 | cq->assoc_qid = eq->queue_id; | 15006 | cq->assoc_qid = eq->queue_id; |
15007 | cq->assoc_qp = eq; | ||
15064 | cq->host_index = 0; | 15008 | cq->host_index = 0; |
15065 | cq->hba_index = 0; | 15009 | cq->hba_index = 0; |
15066 | cq->entry_repost = LPFC_CQ_REPOST; | 15010 | cq->entry_repost = LPFC_CQ_REPOST; |
@@ -15101,6 +15045,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
15101 | for (idx = 0; idx < numcq; idx++) { | 15045 | for (idx = 0; idx < numcq; idx++) { |
15102 | cq = cqp[idx]; | 15046 | cq = cqp[idx]; |
15103 | cq->queue_id = rc + idx; | 15047 | cq->queue_id = rc + idx; |
15048 | if (cq->queue_id > phba->sli4_hba.cq_max) | ||
15049 | phba->sli4_hba.cq_max = cq->queue_id; | ||
15104 | } | 15050 | } |
15105 | 15051 | ||
15106 | out: | 15052 | out: |
@@ -19664,7 +19610,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, | |||
19664 | /* NVME_LS and NVME_LS ABTS requests. */ | 19610 | /* NVME_LS and NVME_LS ABTS requests. */ |
19665 | if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { | 19611 | if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { |
19666 | pring = phba->sli4_hba.nvmels_wq->pring; | 19612 | pring = phba->sli4_hba.nvmels_wq->pring; |
19667 | spin_lock_irqsave(&pring->ring_lock, iflags); | 19613 | lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
19614 | qp, wq_access); | ||
19668 | sglq = __lpfc_sli_get_els_sglq(phba, pwqe); | 19615 | sglq = __lpfc_sli_get_els_sglq(phba, pwqe); |
19669 | if (!sglq) { | 19616 | if (!sglq) { |
19670 | spin_unlock_irqrestore(&pring->ring_lock, iflags); | 19617 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
@@ -19697,7 +19644,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, | |||
19697 | 19644 | ||
19698 | bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); | 19645 | bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); |
19699 | 19646 | ||
19700 | spin_lock_irqsave(&pring->ring_lock, iflags); | 19647 | lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
19648 | qp, wq_access); | ||
19701 | ret = lpfc_sli4_wq_put(wq, wqe); | 19649 | ret = lpfc_sli4_wq_put(wq, wqe); |
19702 | if (ret) { | 19650 | if (ret) { |
19703 | spin_unlock_irqrestore(&pring->ring_lock, iflags); | 19651 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
@@ -19724,7 +19672,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, | |||
19724 | pwqe->sli4_xritag); | 19672 | pwqe->sli4_xritag); |
19725 | bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); | 19673 | bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); |
19726 | 19674 | ||
19727 | spin_lock_irqsave(&pring->ring_lock, iflags); | 19675 | lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
19676 | qp, wq_access); | ||
19728 | ret = lpfc_sli4_wq_put(wq, wqe); | 19677 | ret = lpfc_sli4_wq_put(wq, wqe); |
19729 | if (ret) { | 19678 | if (ret) { |
19730 | spin_unlock_irqrestore(&pring->ring_lock, iflags); | 19679 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
@@ -19872,18 +19821,20 @@ void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) | |||
19872 | { | 19821 | { |
19873 | struct lpfc_pbl_pool *pbl_pool; | 19822 | struct lpfc_pbl_pool *pbl_pool; |
19874 | struct lpfc_pvt_pool *pvt_pool; | 19823 | struct lpfc_pvt_pool *pvt_pool; |
19824 | struct lpfc_sli4_hdw_queue *qp; | ||
19875 | struct lpfc_io_buf *lpfc_ncmd; | 19825 | struct lpfc_io_buf *lpfc_ncmd; |
19876 | struct lpfc_io_buf *lpfc_ncmd_next; | 19826 | struct lpfc_io_buf *lpfc_ncmd_next; |
19877 | unsigned long iflag; | 19827 | unsigned long iflag; |
19878 | struct list_head tmp_list; | 19828 | struct list_head tmp_list; |
19879 | u32 tmp_count; | 19829 | u32 tmp_count; |
19880 | 19830 | ||
19881 | pbl_pool = &phba->sli4_hba.hdwq[hwqid].p_multixri_pool->pbl_pool; | 19831 | qp = &phba->sli4_hba.hdwq[hwqid]; |
19882 | pvt_pool = &phba->sli4_hba.hdwq[hwqid].p_multixri_pool->pvt_pool; | 19832 | pbl_pool = &qp->p_multixri_pool->pbl_pool; |
19833 | pvt_pool = &qp->p_multixri_pool->pvt_pool; | ||
19883 | tmp_count = 0; | 19834 | tmp_count = 0; |
19884 | 19835 | ||
19885 | spin_lock_irqsave(&pbl_pool->lock, iflag); | 19836 | lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); |
19886 | spin_lock(&pvt_pool->lock); | 19837 | lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); |
19887 | 19838 | ||
19888 | if (pvt_pool->count > pvt_pool->low_watermark) { | 19839 | if (pvt_pool->count > pvt_pool->low_watermark) { |
19889 | /* Step 1: move (all - low_watermark) from pvt_pool | 19840 | /* Step 1: move (all - low_watermark) from pvt_pool |
@@ -19936,7 +19887,8 @@ void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) | |||
19936 | * false - if the specified pbl_pool is empty or locked by someone else | 19887 | * false - if the specified pbl_pool is empty or locked by someone else |
19937 | **/ | 19888 | **/ |
19938 | static bool | 19889 | static bool |
19939 | _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_pbl_pool *pbl_pool, | 19890 | _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, |
19891 | struct lpfc_pbl_pool *pbl_pool, | ||
19940 | struct lpfc_pvt_pool *pvt_pool, u32 count) | 19892 | struct lpfc_pvt_pool *pvt_pool, u32 count) |
19941 | { | 19893 | { |
19942 | struct lpfc_io_buf *lpfc_ncmd; | 19894 | struct lpfc_io_buf *lpfc_ncmd; |
@@ -19948,7 +19900,7 @@ _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_pbl_pool *pbl_pool, | |||
19948 | if (ret) { | 19900 | if (ret) { |
19949 | if (pbl_pool->count) { | 19901 | if (pbl_pool->count) { |
19950 | /* Move a batch of XRIs from public to private pool */ | 19902 | /* Move a batch of XRIs from public to private pool */ |
19951 | spin_lock(&pvt_pool->lock); | 19903 | lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); |
19952 | list_for_each_entry_safe(lpfc_ncmd, | 19904 | list_for_each_entry_safe(lpfc_ncmd, |
19953 | lpfc_ncmd_next, | 19905 | lpfc_ncmd_next, |
19954 | &pbl_pool->list, | 19906 | &pbl_pool->list, |
@@ -19990,16 +19942,18 @@ void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) | |||
19990 | struct lpfc_multixri_pool *next_multixri_pool; | 19942 | struct lpfc_multixri_pool *next_multixri_pool; |
19991 | struct lpfc_pvt_pool *pvt_pool; | 19943 | struct lpfc_pvt_pool *pvt_pool; |
19992 | struct lpfc_pbl_pool *pbl_pool; | 19944 | struct lpfc_pbl_pool *pbl_pool; |
19945 | struct lpfc_sli4_hdw_queue *qp; | ||
19993 | u32 next_hwqid; | 19946 | u32 next_hwqid; |
19994 | u32 hwq_count; | 19947 | u32 hwq_count; |
19995 | int ret; | 19948 | int ret; |
19996 | 19949 | ||
19997 | multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; | 19950 | qp = &phba->sli4_hba.hdwq[hwqid]; |
19951 | multixri_pool = qp->p_multixri_pool; | ||
19998 | pvt_pool = &multixri_pool->pvt_pool; | 19952 | pvt_pool = &multixri_pool->pvt_pool; |
19999 | pbl_pool = &multixri_pool->pbl_pool; | 19953 | pbl_pool = &multixri_pool->pbl_pool; |
20000 | 19954 | ||
20001 | /* Check if local pbl_pool is available */ | 19955 | /* Check if local pbl_pool is available */ |
20002 | ret = _lpfc_move_xri_pbl_to_pvt(phba, pbl_pool, pvt_pool, count); | 19956 | ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); |
20003 | if (ret) { | 19957 | if (ret) { |
20004 | #ifdef LPFC_MXP_STAT | 19958 | #ifdef LPFC_MXP_STAT |
20005 | multixri_pool->local_pbl_hit_count++; | 19959 | multixri_pool->local_pbl_hit_count++; |
@@ -20022,7 +19976,7 @@ void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) | |||
20022 | 19976 | ||
20023 | /* Check if the public free xri pool is available */ | 19977 | /* Check if the public free xri pool is available */ |
20024 | ret = _lpfc_move_xri_pbl_to_pvt( | 19978 | ret = _lpfc_move_xri_pbl_to_pvt( |
20025 | phba, pbl_pool, pvt_pool, count); | 19979 | phba, qp, pbl_pool, pvt_pool, count); |
20026 | 19980 | ||
20027 | /* Exit while-loop if success or all hwqid are checked */ | 19981 | /* Exit while-loop if success or all hwqid are checked */ |
20028 | } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); | 19982 | } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); |
@@ -20138,20 +20092,23 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, | |||
20138 | if ((pvt_pool->count < pvt_pool->low_watermark) || | 20092 | if ((pvt_pool->count < pvt_pool->low_watermark) || |
20139 | (xri_owned < xri_limit && | 20093 | (xri_owned < xri_limit && |
20140 | pvt_pool->count < pvt_pool->high_watermark)) { | 20094 | pvt_pool->count < pvt_pool->high_watermark)) { |
20141 | spin_lock_irqsave(&pvt_pool->lock, iflag); | 20095 | lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, |
20096 | qp, free_pvt_pool); | ||
20142 | list_add_tail(&lpfc_ncmd->list, | 20097 | list_add_tail(&lpfc_ncmd->list, |
20143 | &pvt_pool->list); | 20098 | &pvt_pool->list); |
20144 | pvt_pool->count++; | 20099 | pvt_pool->count++; |
20145 | spin_unlock_irqrestore(&pvt_pool->lock, iflag); | 20100 | spin_unlock_irqrestore(&pvt_pool->lock, iflag); |
20146 | } else { | 20101 | } else { |
20147 | spin_lock_irqsave(&pbl_pool->lock, iflag); | 20102 | lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, |
20103 | qp, free_pub_pool); | ||
20148 | list_add_tail(&lpfc_ncmd->list, | 20104 | list_add_tail(&lpfc_ncmd->list, |
20149 | &pbl_pool->list); | 20105 | &pbl_pool->list); |
20150 | pbl_pool->count++; | 20106 | pbl_pool->count++; |
20151 | spin_unlock_irqrestore(&pbl_pool->lock, iflag); | 20107 | spin_unlock_irqrestore(&pbl_pool->lock, iflag); |
20152 | } | 20108 | } |
20153 | } else { | 20109 | } else { |
20154 | spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); | 20110 | lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, |
20111 | qp, free_xri); | ||
20155 | list_add_tail(&lpfc_ncmd->list, | 20112 | list_add_tail(&lpfc_ncmd->list, |
20156 | &qp->lpfc_io_buf_list_put); | 20113 | &qp->lpfc_io_buf_list_put); |
20157 | qp->put_io_bufs++; | 20114 | qp->put_io_bufs++; |
@@ -20174,6 +20131,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, | |||
20174 | **/ | 20131 | **/ |
20175 | static struct lpfc_io_buf * | 20132 | static struct lpfc_io_buf * |
20176 | lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, | 20133 | lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, |
20134 | struct lpfc_sli4_hdw_queue *qp, | ||
20177 | struct lpfc_pvt_pool *pvt_pool, | 20135 | struct lpfc_pvt_pool *pvt_pool, |
20178 | struct lpfc_nodelist *ndlp) | 20136 | struct lpfc_nodelist *ndlp) |
20179 | { | 20137 | { |
@@ -20181,7 +20139,7 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, | |||
20181 | struct lpfc_io_buf *lpfc_ncmd_next; | 20139 | struct lpfc_io_buf *lpfc_ncmd_next; |
20182 | unsigned long iflag; | 20140 | unsigned long iflag; |
20183 | 20141 | ||
20184 | spin_lock_irqsave(&pvt_pool->lock, iflag); | 20142 | lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); |
20185 | list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, | 20143 | list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
20186 | &pvt_pool->list, list) { | 20144 | &pvt_pool->list, list) { |
20187 | if (lpfc_test_rrq_active( | 20145 | if (lpfc_test_rrq_active( |
@@ -20276,7 +20234,7 @@ lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, | |||
20276 | lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); | 20234 | lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); |
20277 | 20235 | ||
20278 | /* Get one XRI from private free xri pool */ | 20236 | /* Get one XRI from private free xri pool */ |
20279 | lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, pvt_pool, ndlp); | 20237 | lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); |
20280 | 20238 | ||
20281 | if (lpfc_ncmd) { | 20239 | if (lpfc_ncmd) { |
20282 | lpfc_ncmd->hdwq = qp; | 20240 | lpfc_ncmd->hdwq = qp; |
@@ -20349,11 +20307,13 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, | |||
20349 | lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( | 20307 | lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( |
20350 | phba, ndlp, hwqid, expedite); | 20308 | phba, ndlp, hwqid, expedite); |
20351 | else { | 20309 | else { |
20352 | spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag); | 20310 | lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, |
20311 | qp, alloc_xri_get); | ||
20353 | if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) | 20312 | if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) |
20354 | lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); | 20313 | lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); |
20355 | if (!lpfc_cmd) { | 20314 | if (!lpfc_cmd) { |
20356 | spin_lock(&qp->io_buf_list_put_lock); | 20315 | lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, |
20316 | qp, alloc_xri_put); | ||
20357 | list_splice(&qp->lpfc_io_buf_list_put, | 20317 | list_splice(&qp->lpfc_io_buf_list_put, |
20358 | &qp->lpfc_io_buf_list_get); | 20318 | &qp->lpfc_io_buf_list_get); |
20359 | qp->get_io_bufs += qp->put_io_bufs; | 20319 | qp->get_io_bufs += qp->put_io_bufs; |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index f5e58cd4c6ac..c381f2cb4909 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ | 42 | /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ |
43 | #define LPFC_HBA_HDWQ_MIN 0 | 43 | #define LPFC_HBA_HDWQ_MIN 0 |
44 | #define LPFC_HBA_HDWQ_MAX 64 | 44 | #define LPFC_HBA_HDWQ_MAX 128 |
45 | #define LPFC_HBA_HDWQ_DEF 0 | 45 | #define LPFC_HBA_HDWQ_DEF 0 |
46 | 46 | ||
47 | /* Common buffer size to accomidate SCSI and NVME IO buffers */ | 47 | /* Common buffer size to accomidate SCSI and NVME IO buffers */ |
@@ -166,16 +166,19 @@ struct lpfc_queue { | |||
166 | uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ | 166 | uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ |
167 | uint32_t host_index; /* The host's index for putting or getting */ | 167 | uint32_t host_index; /* The host's index for putting or getting */ |
168 | uint32_t hba_index; /* The last known hba index for get or put */ | 168 | uint32_t hba_index; /* The last known hba index for get or put */ |
169 | uint32_t q_mode; | ||
169 | 170 | ||
170 | struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ | 171 | struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ |
171 | struct lpfc_rqb *rqbp; /* ptr to RQ buffers */ | 172 | struct lpfc_rqb *rqbp; /* ptr to RQ buffers */ |
172 | 173 | ||
173 | uint32_t q_mode; | ||
174 | uint16_t page_count; /* Number of pages allocated for this queue */ | 174 | uint16_t page_count; /* Number of pages allocated for this queue */ |
175 | uint16_t page_size; /* size of page allocated for this queue */ | 175 | uint16_t page_size; /* size of page allocated for this queue */ |
176 | #define LPFC_EXPANDED_PAGE_SIZE 16384 | 176 | #define LPFC_EXPANDED_PAGE_SIZE 16384 |
177 | #define LPFC_DEFAULT_PAGE_SIZE 4096 | 177 | #define LPFC_DEFAULT_PAGE_SIZE 4096 |
178 | uint16_t chann; /* IO channel this queue is associated with */ | 178 | uint16_t chann; /* Hardware Queue association WQ/CQ */ |
179 | /* CPU affinity for EQ */ | ||
180 | #define LPFC_FIND_BY_EQ 0 | ||
181 | #define LPFC_FIND_BY_HDWQ 1 | ||
179 | uint8_t db_format; | 182 | uint8_t db_format; |
180 | #define LPFC_DB_RING_FORMAT 0x01 | 183 | #define LPFC_DB_RING_FORMAT 0x01 |
181 | #define LPFC_DB_LIST_FORMAT 0x02 | 184 | #define LPFC_DB_LIST_FORMAT 0x02 |
@@ -431,11 +434,6 @@ struct lpfc_hba_eq_hdl { | |||
431 | uint32_t idx; | 434 | uint32_t idx; |
432 | char handler_name[LPFC_SLI4_HANDLER_NAME_SZ]; | 435 | char handler_name[LPFC_SLI4_HANDLER_NAME_SZ]; |
433 | struct lpfc_hba *phba; | 436 | struct lpfc_hba *phba; |
434 | atomic_t hba_eq_in_use; | ||
435 | struct cpumask *cpumask; | ||
436 | /* CPU affinitsed to or 0xffffffff if multiple */ | ||
437 | uint32_t cpu; | ||
438 | #define LPFC_MULTI_CPU_AFFINITY 0xffffffff | ||
439 | }; | 437 | }; |
440 | 438 | ||
441 | /*BB Credit recovery value*/ | 439 | /*BB Credit recovery value*/ |
@@ -529,7 +527,9 @@ struct lpfc_vector_map_info { | |||
529 | uint16_t phys_id; | 527 | uint16_t phys_id; |
530 | uint16_t core_id; | 528 | uint16_t core_id; |
531 | uint16_t irq; | 529 | uint16_t irq; |
530 | uint16_t eq; | ||
532 | uint16_t hdwq; | 531 | uint16_t hdwq; |
532 | uint16_t hyper; | ||
533 | }; | 533 | }; |
534 | #define LPFC_VECTOR_MAP_EMPTY 0xffff | 534 | #define LPFC_VECTOR_MAP_EMPTY 0xffff |
535 | 535 | ||
@@ -593,6 +593,21 @@ struct lpfc_fc4_ctrl_stat { | |||
593 | u32 io_cmpls; | 593 | u32 io_cmpls; |
594 | }; | 594 | }; |
595 | 595 | ||
596 | #ifdef LPFC_HDWQ_LOCK_STAT | ||
597 | struct lpfc_lock_stat { | ||
598 | uint32_t alloc_xri_get; | ||
599 | uint32_t alloc_xri_put; | ||
600 | uint32_t free_xri; | ||
601 | uint32_t wq_access; | ||
602 | uint32_t alloc_pvt_pool; | ||
603 | uint32_t mv_from_pvt_pool; | ||
604 | uint32_t mv_to_pub_pool; | ||
605 | uint32_t mv_to_pvt_pool; | ||
606 | uint32_t free_pub_pool; | ||
607 | uint32_t free_pvt_pool; | ||
608 | }; | ||
609 | #endif | ||
610 | |||
596 | /* SLI4 HBA data structure entries */ | 611 | /* SLI4 HBA data structure entries */ |
597 | struct lpfc_sli4_hdw_queue { | 612 | struct lpfc_sli4_hdw_queue { |
598 | /* Pointers to the constructed SLI4 queues */ | 613 | /* Pointers to the constructed SLI4 queues */ |
@@ -626,6 +641,9 @@ struct lpfc_sli4_hdw_queue { | |||
626 | /* FC-4 Stats counters */ | 641 | /* FC-4 Stats counters */ |
627 | struct lpfc_fc4_ctrl_stat nvme_cstat; | 642 | struct lpfc_fc4_ctrl_stat nvme_cstat; |
628 | struct lpfc_fc4_ctrl_stat scsi_cstat; | 643 | struct lpfc_fc4_ctrl_stat scsi_cstat; |
644 | #ifdef LPFC_HDWQ_LOCK_STAT | ||
645 | struct lpfc_lock_stat lock_conflict; | ||
646 | #endif | ||
629 | 647 | ||
630 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 648 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
631 | #define LPFC_CHECK_CPU_CNT 128 | 649 | #define LPFC_CHECK_CPU_CNT 128 |
@@ -635,6 +653,34 @@ struct lpfc_sli4_hdw_queue { | |||
635 | #endif | 653 | #endif |
636 | }; | 654 | }; |
637 | 655 | ||
656 | #ifdef LPFC_HDWQ_LOCK_STAT | ||
657 | /* compile time trylock stats */ | ||
658 | #define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \ | ||
659 | { \ | ||
660 | int only_once = 1; \ | ||
661 | while (spin_trylock_irqsave(lock, flag) == 0) { \ | ||
662 | if (only_once) { \ | ||
663 | only_once = 0; \ | ||
664 | qp->lock_conflict.lstat++; \ | ||
665 | } \ | ||
666 | } \ | ||
667 | } | ||
668 | #define lpfc_qp_spin_lock(lock, qp, lstat) \ | ||
669 | { \ | ||
670 | int only_once = 1; \ | ||
671 | while (spin_trylock(lock) == 0) { \ | ||
672 | if (only_once) { \ | ||
673 | only_once = 0; \ | ||
674 | qp->lock_conflict.lstat++; \ | ||
675 | } \ | ||
676 | } \ | ||
677 | } | ||
678 | #else | ||
679 | #define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \ | ||
680 | spin_lock_irqsave(lock, flag) | ||
681 | #define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock) | ||
682 | #endif | ||
683 | |||
638 | struct lpfc_sli4_hba { | 684 | struct lpfc_sli4_hba { |
639 | void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for | 685 | void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for |
640 | * config space registers | 686 | * config space registers |
@@ -764,6 +810,8 @@ struct lpfc_sli4_hba { | |||
764 | uint16_t nvmet_xri_cnt; | 810 | uint16_t nvmet_xri_cnt; |
765 | uint16_t nvmet_io_wait_cnt; | 811 | uint16_t nvmet_io_wait_cnt; |
766 | uint16_t nvmet_io_wait_total; | 812 | uint16_t nvmet_io_wait_total; |
813 | uint16_t cq_max; | ||
814 | struct lpfc_queue **cq_lookup; | ||
767 | struct list_head lpfc_els_sgl_list; | 815 | struct list_head lpfc_els_sgl_list; |
768 | struct list_head lpfc_abts_els_sgl_list; | 816 | struct list_head lpfc_abts_els_sgl_list; |
769 | spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ | 817 | spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ |