summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2019-01-28 14:14:35 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2019-02-05 22:29:50 -0500
commit222e9239c60888b7c9331f4b3d0a99d2f27dca6b (patch)
treee0aa27118876bd0b6858b9993c5fdd4b22c5b1c5 /drivers/scsi/lpfc
parent75508a8b8b2de1b39b72e7dc31505cddc8354a01 (diff)
scsi: lpfc: Resize cpu maps structures based on possible cpus
The work done to date utilized the number of present cpus when sizing per-cpu structures. Structures should have been sized based on the max possible cpu count. Convert the driver over to possible cpu count for sizing allocation. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
4 files changed, 51 insertions, 41 deletions
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2864cb53b1e8..a114965a376c 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5176,16 +5176,22 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5176 case 1: 5176 case 1:
5177 len += snprintf(buf + len, PAGE_SIZE-len, 5177 len += snprintf(buf + len, PAGE_SIZE-len,
5178 "fcp_cpu_map: HBA centric mapping (%d): " 5178 "fcp_cpu_map: HBA centric mapping (%d): "
5179 "%d online CPUs\n", 5179 "%d of %d CPUs online from %d possible CPUs\n",
5180 phba->cfg_fcp_cpu_map, 5180 phba->cfg_fcp_cpu_map, num_online_cpus(),
5181 phba->sli4_hba.num_online_cpu); 5181 num_present_cpus(),
5182 phba->sli4_hba.num_possible_cpu);
5182 break; 5183 break;
5183 } 5184 }
5184 5185
5185 while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { 5186 while (phba->sli4_hba.curr_disp_cpu <
5187 phba->sli4_hba.num_possible_cpu) {
5186 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; 5188 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5187 5189
5188 if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) { 5190 if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5191 len += snprintf(buf + len, PAGE_SIZE - len,
5192 "CPU %02d not present\n",
5193 phba->sli4_hba.curr_disp_cpu);
5194 else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
5189 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) 5195 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5190 len += snprintf( 5196 len += snprintf(
5191 buf + len, PAGE_SIZE - len, 5197 buf + len, PAGE_SIZE - len,
@@ -5225,14 +5231,15 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5225 5231
5226 /* display max number of CPUs keeping some margin */ 5232 /* display max number of CPUs keeping some margin */
5227 if (phba->sli4_hba.curr_disp_cpu < 5233 if (phba->sli4_hba.curr_disp_cpu <
5228 phba->sli4_hba.num_present_cpu && 5234 phba->sli4_hba.num_possible_cpu &&
5229 (len >= (PAGE_SIZE - 64))) { 5235 (len >= (PAGE_SIZE - 64))) {
5230 len += snprintf(buf + len, PAGE_SIZE-len, "more...\n"); 5236 len += snprintf(buf + len,
5237 PAGE_SIZE - len, "more...\n");
5231 break; 5238 break;
5232 } 5239 }
5233 } 5240 }
5234 5241
5235 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu) 5242 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5236 phba->sli4_hba.curr_disp_cpu = 0; 5243 phba->sli4_hba.curr_disp_cpu = 0;
5237 5244
5238 return len; 5245 return len;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 05919480e430..8ba2861db7b6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6373,8 +6373,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6373 u32 if_type; 6373 u32 if_type;
6374 u32 if_fam; 6374 u32 if_fam;
6375 6375
6376 phba->sli4_hba.num_online_cpu = num_online_cpus();
6377 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6376 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6377 phba->sli4_hba.num_possible_cpu = num_possible_cpus();
6378 phba->sli4_hba.curr_disp_cpu = 0; 6378 phba->sli4_hba.curr_disp_cpu = 0;
6379 6379
6380 /* Get all the module params for configuring this host */ 6380 /* Get all the module params for configuring this host */
@@ -6796,7 +6796,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6796 goto out_free_fcf_rr_bmask; 6796 goto out_free_fcf_rr_bmask;
6797 } 6797 }
6798 6798
6799 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, 6799 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
6800 sizeof(struct lpfc_vector_map_info), 6800 sizeof(struct lpfc_vector_map_info),
6801 GFP_KERNEL); 6801 GFP_KERNEL);
6802 if (!phba->sli4_hba.cpu_map) { 6802 if (!phba->sli4_hba.cpu_map) {
@@ -6868,8 +6868,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6868 6868
6869 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6869 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
6870 kfree(phba->sli4_hba.cpu_map); 6870 kfree(phba->sli4_hba.cpu_map);
6871 phba->sli4_hba.num_possible_cpu = 0;
6871 phba->sli4_hba.num_present_cpu = 0; 6872 phba->sli4_hba.num_present_cpu = 0;
6872 phba->sli4_hba.num_online_cpu = 0;
6873 phba->sli4_hba.curr_disp_cpu = 0; 6873 phba->sli4_hba.curr_disp_cpu = 0;
6874 6874
6875 /* Free memory allocated for fast-path work queue handles */ 6875 /* Free memory allocated for fast-path work queue handles */
@@ -10519,15 +10519,14 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10519 int cpu; 10519 int cpu;
10520 10520
10521 /* Find the desired phys_id for the specified EQ */ 10521 /* Find the desired phys_id for the specified EQ */
10522 cpup = phba->sli4_hba.cpu_map; 10522 for_each_present_cpu(cpu) {
10523 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 10523 cpup = &phba->sli4_hba.cpu_map[cpu];
10524 if ((match == LPFC_FIND_BY_EQ) && 10524 if ((match == LPFC_FIND_BY_EQ) &&
10525 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && 10525 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10526 (cpup->eq == id)) 10526 (cpup->eq == id))
10527 return cpu; 10527 return cpu;
10528 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10528 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10529 return cpu; 10529 return cpu;
10530 cpup++;
10531 } 10530 }
10532 return 0; 10531 return 0;
10533} 10532}
@@ -10545,11 +10544,10 @@ lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq)
10545 int cpu; 10544 int cpu;
10546 10545
10547 /* Find the desired phys_id for the specified EQ */ 10546 /* Find the desired phys_id for the specified EQ */
10548 cpup = phba->sli4_hba.cpu_map; 10547 for_each_present_cpu(cpu) {
10549 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 10548 cpup = &phba->sli4_hba.cpu_map[cpu];
10550 if (cpup->hdwq == hdwq) 10549 if (cpup->hdwq == hdwq)
10551 return cpup->eq; 10550 return cpup->eq;
10552 cpup++;
10553 } 10551 }
10554 return 0; 10552 return 0;
10555} 10553}
@@ -10569,15 +10567,13 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10569 struct lpfc_vector_map_info *cpup; 10567 struct lpfc_vector_map_info *cpup;
10570 int idx; 10568 int idx;
10571 10569
10572 cpup = phba->sli4_hba.cpu_map; 10570 for_each_present_cpu(idx) {
10573 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { 10571 cpup = &phba->sli4_hba.cpu_map[idx];
10574 /* Does the cpup match the one we are looking for */ 10572 /* Does the cpup match the one we are looking for */
10575 if ((cpup->phys_id == phys_id) && 10573 if ((cpup->phys_id == phys_id) &&
10576 (cpup->core_id == core_id) && 10574 (cpup->core_id == core_id) &&
10577 (cpu != idx)) { 10575 (cpu != idx))
10578 return 1; 10576 return 1;
10579 }
10580 cpup++;
10581 } 10577 }
10582 return 0; 10578 return 0;
10583} 10579}
@@ -10608,7 +10604,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10608 /* Init cpu_map array */ 10604 /* Init cpu_map array */
10609 memset(phba->sli4_hba.cpu_map, 0xff, 10605 memset(phba->sli4_hba.cpu_map, 0xff,
10610 (sizeof(struct lpfc_vector_map_info) * 10606 (sizeof(struct lpfc_vector_map_info) *
10611 phba->sli4_hba.num_present_cpu)); 10607 phba->sli4_hba.num_possible_cpu));
10612 10608
10613 max_phys_id = 0; 10609 max_phys_id = 0;
10614 min_phys_id = 0xffff; 10610 min_phys_id = 0xffff;
@@ -10617,8 +10613,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10617 phys_id = 0; 10613 phys_id = 0;
10618 10614
10619 /* Update CPU map with physical id and core id of each CPU */ 10615 /* Update CPU map with physical id and core id of each CPU */
10620 cpup = phba->sli4_hba.cpu_map; 10616 for_each_present_cpu(cpu) {
10621 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 10617 cpup = &phba->sli4_hba.cpu_map[cpu];
10622#ifdef CONFIG_X86 10618#ifdef CONFIG_X86
10623 cpuinfo = &cpu_data(cpu); 10619 cpuinfo = &cpu_data(cpu);
10624 cpup->phys_id = cpuinfo->phys_proc_id; 10620 cpup->phys_id = cpuinfo->phys_proc_id;
@@ -10645,8 +10641,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10645 max_core_id = cpup->core_id; 10641 max_core_id = cpup->core_id;
10646 if (cpup->core_id < min_core_id) 10642 if (cpup->core_id < min_core_id)
10647 min_core_id = cpup->core_id; 10643 min_core_id = cpup->core_id;
10648
10649 cpup++;
10650 } 10644 }
10651 10645
10652 for_each_possible_cpu(i) { 10646 for_each_possible_cpu(i) {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 0d296aee2d82..0b27e8c5ae32 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1194,9 +1194,9 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1194 1194
1195 /* Cycle the the entire CPU context list for every MRQ */ 1195 /* Cycle the the entire CPU context list for every MRQ */
1196 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 1196 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1197 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) { 1197 for_each_present_cpu(j) {
1198 infop = lpfc_get_ctx_list(phba, j, i);
1198 __lpfc_nvmet_clean_io_for_cpu(phba, infop); 1199 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1199 infop++; /* next */
1200 } 1200 }
1201 } 1201 }
1202 kfree(phba->sli4_hba.nvmet_ctx_info); 1202 kfree(phba->sli4_hba.nvmet_ctx_info);
@@ -1211,14 +1211,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1211 union lpfc_wqe128 *wqe; 1211 union lpfc_wqe128 *wqe;
1212 struct lpfc_nvmet_ctx_info *last_infop; 1212 struct lpfc_nvmet_ctx_info *last_infop;
1213 struct lpfc_nvmet_ctx_info *infop; 1213 struct lpfc_nvmet_ctx_info *infop;
1214 int i, j, idx; 1214 int i, j, idx, cpu;
1215 1215
1216 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 1216 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1217 "6403 Allocate NVMET resources for %d XRIs\n", 1217 "6403 Allocate NVMET resources for %d XRIs\n",
1218 phba->sli4_hba.nvmet_xri_cnt); 1218 phba->sli4_hba.nvmet_xri_cnt);
1219 1219
1220 phba->sli4_hba.nvmet_ctx_info = kcalloc( 1220 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1221 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq, 1221 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1222 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); 1222 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1223 if (!phba->sli4_hba.nvmet_ctx_info) { 1223 if (!phba->sli4_hba.nvmet_ctx_info) {
1224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1246,13 +1246,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1246 * of the IO completion. Thus a context that was allocated for MRQ A 1246 * of the IO completion. Thus a context that was allocated for MRQ A
1247 * whose IO completed on CPU B will be freed to cpuB/mrqA. 1247 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1248 */ 1248 */
1249 infop = phba->sli4_hba.nvmet_ctx_info; 1249 for_each_possible_cpu(i) {
1250 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1251 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { 1250 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1251 infop = lpfc_get_ctx_list(phba, i, j);
1252 INIT_LIST_HEAD(&infop->nvmet_ctx_list); 1252 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1253 spin_lock_init(&infop->nvmet_ctx_list_lock); 1253 spin_lock_init(&infop->nvmet_ctx_list_lock);
1254 infop->nvmet_ctx_list_cnt = 0; 1254 infop->nvmet_ctx_list_cnt = 0;
1255 infop++;
1256 } 1255 }
1257 } 1256 }
1258 1257
@@ -1262,8 +1261,10 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1262 * MRQ 1 cycling thru CPUs 0 - X, and so on. 1261 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1263 */ 1262 */
1264 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { 1263 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1265 last_infop = lpfc_get_ctx_list(phba, 0, j); 1264 last_infop = lpfc_get_ctx_list(phba,
1266 for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) { 1265 cpumask_first(cpu_present_mask),
1266 j);
1267 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1267 infop = lpfc_get_ctx_list(phba, i, j); 1268 infop = lpfc_get_ctx_list(phba, i, j);
1268 infop->nvmet_ctx_next_cpu = last_infop; 1269 infop->nvmet_ctx_next_cpu = last_infop;
1269 last_infop = infop; 1270 last_infop = infop;
@@ -1274,6 +1275,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1274 * received command on a per xri basis. 1275 * received command on a per xri basis.
1275 */ 1276 */
1276 idx = 0; 1277 idx = 0;
1278 cpu = cpumask_first(cpu_present_mask);
1277 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { 1279 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1278 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); 1280 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1279 if (!ctx_buf) { 1281 if (!ctx_buf) {
@@ -1327,7 +1329,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1327 * is MRQidx will be associated with CPUidx. This association 1329 * is MRQidx will be associated with CPUidx. This association
1328 * can change on the fly. 1330 * can change on the fly.
1329 */ 1331 */
1330 infop = lpfc_get_ctx_list(phba, idx, idx); 1332 infop = lpfc_get_ctx_list(phba, cpu, idx);
1331 spin_lock(&infop->nvmet_ctx_list_lock); 1333 spin_lock(&infop->nvmet_ctx_list_lock);
1332 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); 1334 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1333 infop->nvmet_ctx_list_cnt++; 1335 infop->nvmet_ctx_list_cnt++;
@@ -1335,11 +1337,18 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1335 1337
1336 /* Spread ctx structures evenly across all MRQs */ 1338 /* Spread ctx structures evenly across all MRQs */
1337 idx++; 1339 idx++;
1338 if (idx >= phba->cfg_nvmet_mrq) 1340 if (idx >= phba->cfg_nvmet_mrq) {
1339 idx = 0; 1341 idx = 0;
1342 cpu = cpumask_first(cpu_present_mask);
1343 continue;
1344 }
1345 cpu = cpumask_next(cpu, cpu_present_mask);
1346 if (cpu == nr_cpu_ids)
1347 cpu = cpumask_first(cpu_present_mask);
1348
1340 } 1349 }
1341 1350
1342 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 1351 for_each_present_cpu(i) {
1343 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { 1352 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1344 infop = lpfc_get_ctx_list(phba, i, j); 1353 infop = lpfc_get_ctx_list(phba, i, j);
1345 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 1354 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
@@ -1839,7 +1848,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1839 else 1848 else
1840 get_infop = current_infop->nvmet_ctx_next_cpu; 1849 get_infop = current_infop->nvmet_ctx_next_cpu;
1841 1850
1842 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 1851 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
1843 if (get_infop == current_infop) { 1852 if (get_infop == current_infop) {
1844 get_infop = get_infop->nvmet_ctx_next_cpu; 1853 get_infop = get_infop->nvmet_ctx_next_cpu;
1845 continue; 1854 continue;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 20566c506e5f..1e3d7f534eaa 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -890,7 +890,7 @@ struct lpfc_sli4_hba {
890 890
891 /* CPU to vector mapping information */ 891 /* CPU to vector mapping information */
892 struct lpfc_vector_map_info *cpu_map; 892 struct lpfc_vector_map_info *cpu_map;
893 uint16_t num_online_cpu; 893 uint16_t num_possible_cpu;
894 uint16_t num_present_cpu; 894 uint16_t num_present_cpu;
895 uint16_t curr_disp_cpu; 895 uint16_t curr_disp_cpu;
896 struct lpfc_eq_intr_info __percpu *eq_info; 896 struct lpfc_eq_intr_info __percpu *eq_info;