diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 92 |
1 files changed, 50 insertions, 42 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 501147c4a147..647f5bfb3bd3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) | |||
3031 | phba->sli4_hba.scsi_xri_max); | 3031 | phba->sli4_hba.scsi_xri_max); |
3032 | 3032 | ||
3033 | spin_lock_irq(&phba->scsi_buf_list_get_lock); | 3033 | spin_lock_irq(&phba->scsi_buf_list_get_lock); |
3034 | spin_lock_irq(&phba->scsi_buf_list_put_lock); | 3034 | spin_lock(&phba->scsi_buf_list_put_lock); |
3035 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); | 3035 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); |
3036 | list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); | 3036 | list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); |
3037 | spin_unlock_irq(&phba->scsi_buf_list_put_lock); | 3037 | spin_unlock(&phba->scsi_buf_list_put_lock); |
3038 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 3038 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
3039 | 3039 | ||
3040 | if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { | 3040 | if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { |
@@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) | |||
3070 | psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; | 3070 | psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; |
3071 | } | 3071 | } |
3072 | spin_lock_irq(&phba->scsi_buf_list_get_lock); | 3072 | spin_lock_irq(&phba->scsi_buf_list_get_lock); |
3073 | spin_lock_irq(&phba->scsi_buf_list_put_lock); | 3073 | spin_lock(&phba->scsi_buf_list_put_lock); |
3074 | list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); | 3074 | list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); |
3075 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); | 3075 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
3076 | spin_unlock_irq(&phba->scsi_buf_list_put_lock); | 3076 | spin_unlock(&phba->scsi_buf_list_put_lock); |
3077 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 3077 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
3078 | 3078 | ||
3079 | return 0; | 3079 | return 0; |
@@ -4859,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4859 | struct lpfc_mqe *mqe; | 4859 | struct lpfc_mqe *mqe; |
4860 | int longs; | 4860 | int longs; |
4861 | 4861 | ||
4862 | /* Get all the module params for configuring this host */ | ||
4863 | lpfc_get_cfgparam(phba); | ||
4864 | |||
4862 | /* Before proceed, wait for POST done and device ready */ | 4865 | /* Before proceed, wait for POST done and device ready */ |
4863 | rc = lpfc_sli4_post_status_check(phba); | 4866 | rc = lpfc_sli4_post_status_check(phba); |
4864 | if (rc) | 4867 | if (rc) |
@@ -4902,15 +4905,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4902 | sizeof(struct lpfc_mbox_ext_buf_ctx)); | 4905 | sizeof(struct lpfc_mbox_ext_buf_ctx)); |
4903 | INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); | 4906 | INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); |
4904 | 4907 | ||
4905 | /* | ||
4906 | * We need to do a READ_CONFIG mailbox command here before | ||
4907 | * calling lpfc_get_cfgparam. For VFs this will report the | ||
4908 | * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. | ||
4909 | * All of the resources allocated | ||
4910 | * for this Port are tied to these values. | ||
4911 | */ | ||
4912 | /* Get all the module params for configuring this host */ | ||
4913 | lpfc_get_cfgparam(phba); | ||
4914 | phba->max_vpi = LPFC_MAX_VPI; | 4908 | phba->max_vpi = LPFC_MAX_VPI; |
4915 | 4909 | ||
4916 | /* This will be set to correct value after the read_config mbox */ | 4910 | /* This will be set to correct value after the read_config mbox */ |
@@ -7141,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | |||
7141 | phba->sli4_hba.fcp_wq = NULL; | 7135 | phba->sli4_hba.fcp_wq = NULL; |
7142 | } | 7136 | } |
7143 | 7137 | ||
7144 | if (phba->pci_bar0_memmap_p) { | ||
7145 | iounmap(phba->pci_bar0_memmap_p); | ||
7146 | phba->pci_bar0_memmap_p = NULL; | ||
7147 | } | ||
7148 | if (phba->pci_bar2_memmap_p) { | ||
7149 | iounmap(phba->pci_bar2_memmap_p); | ||
7150 | phba->pci_bar2_memmap_p = NULL; | ||
7151 | } | ||
7152 | if (phba->pci_bar4_memmap_p) { | ||
7153 | iounmap(phba->pci_bar4_memmap_p); | ||
7154 | phba->pci_bar4_memmap_p = NULL; | ||
7155 | } | ||
7156 | |||
7157 | /* Release FCP CQ mapping array */ | 7138 | /* Release FCP CQ mapping array */ |
7158 | if (phba->sli4_hba.fcp_cq_map != NULL) { | 7139 | if (phba->sli4_hba.fcp_cq_map != NULL) { |
7159 | kfree(phba->sli4_hba.fcp_cq_map); | 7140 | kfree(phba->sli4_hba.fcp_cq_map); |
@@ -7942,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7942 | * particular PCI BARs regions is dependent on the type of | 7923 | * particular PCI BARs regions is dependent on the type of |
7943 | * SLI4 device. | 7924 | * SLI4 device. |
7944 | */ | 7925 | */ |
7945 | if (pci_resource_start(pdev, 0)) { | 7926 | if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { |
7946 | phba->pci_bar0_map = pci_resource_start(pdev, 0); | 7927 | phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); |
7947 | bar0map_len = pci_resource_len(pdev, 0); | 7928 | bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); |
7948 | 7929 | ||
7949 | /* | 7930 | /* |
7950 | * Map SLI4 PCI Config Space Register base to a kernel virtual | 7931 | * Map SLI4 PCI Config Space Register base to a kernel virtual |
@@ -7958,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7958 | "registers.\n"); | 7939 | "registers.\n"); |
7959 | goto out; | 7940 | goto out; |
7960 | } | 7941 | } |
7942 | phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; | ||
7961 | /* Set up BAR0 PCI config space register memory map */ | 7943 | /* Set up BAR0 PCI config space register memory map */ |
7962 | lpfc_sli4_bar0_register_memmap(phba, if_type); | 7944 | lpfc_sli4_bar0_register_memmap(phba, if_type); |
7963 | } else { | 7945 | } else { |
@@ -7980,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7980 | } | 7962 | } |
7981 | 7963 | ||
7982 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && | 7964 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && |
7983 | (pci_resource_start(pdev, 2))) { | 7965 | (pci_resource_start(pdev, PCI_64BIT_BAR2))) { |
7984 | /* | 7966 | /* |
7985 | * Map SLI4 if type 0 HBA Control Register base to a kernel | 7967 | * Map SLI4 if type 0 HBA Control Register base to a kernel |
7986 | * virtual address and setup the registers. | 7968 | * virtual address and setup the registers. |
7987 | */ | 7969 | */ |
7988 | phba->pci_bar1_map = pci_resource_start(pdev, 2); | 7970 | phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); |
7989 | bar1map_len = pci_resource_len(pdev, 2); | 7971 | bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); |
7990 | phba->sli4_hba.ctrl_regs_memmap_p = | 7972 | phba->sli4_hba.ctrl_regs_memmap_p = |
7991 | ioremap(phba->pci_bar1_map, bar1map_len); | 7973 | ioremap(phba->pci_bar1_map, bar1map_len); |
7992 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { | 7974 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { |
@@ -7994,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7994 | "ioremap failed for SLI4 HBA control registers.\n"); | 7976 | "ioremap failed for SLI4 HBA control registers.\n"); |
7995 | goto out_iounmap_conf; | 7977 | goto out_iounmap_conf; |
7996 | } | 7978 | } |
7979 | phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; | ||
7997 | lpfc_sli4_bar1_register_memmap(phba); | 7980 | lpfc_sli4_bar1_register_memmap(phba); |
7998 | } | 7981 | } |
7999 | 7982 | ||
8000 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && | 7983 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && |
8001 | (pci_resource_start(pdev, 4))) { | 7984 | (pci_resource_start(pdev, PCI_64BIT_BAR4))) { |
8002 | /* | 7985 | /* |
8003 | * Map SLI4 if type 0 HBA Doorbell Register base to a kernel | 7986 | * Map SLI4 if type 0 HBA Doorbell Register base to a kernel |
8004 | * virtual address and setup the registers. | 7987 | * virtual address and setup the registers. |
8005 | */ | 7988 | */ |
8006 | phba->pci_bar2_map = pci_resource_start(pdev, 4); | 7989 | phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); |
8007 | bar2map_len = pci_resource_len(pdev, 4); | 7990 | bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); |
8008 | phba->sli4_hba.drbl_regs_memmap_p = | 7991 | phba->sli4_hba.drbl_regs_memmap_p = |
8009 | ioremap(phba->pci_bar2_map, bar2map_len); | 7992 | ioremap(phba->pci_bar2_map, bar2map_len); |
8010 | if (!phba->sli4_hba.drbl_regs_memmap_p) { | 7993 | if (!phba->sli4_hba.drbl_regs_memmap_p) { |
@@ -8012,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
8012 | "ioremap failed for SLI4 HBA doorbell registers.\n"); | 7995 | "ioremap failed for SLI4 HBA doorbell registers.\n"); |
8013 | goto out_iounmap_ctrl; | 7996 | goto out_iounmap_ctrl; |
8014 | } | 7997 | } |
7998 | phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; | ||
8015 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); | 7999 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); |
8016 | if (error) | 8000 | if (error) |
8017 | goto out_iounmap_all; | 8001 | goto out_iounmap_all; |
@@ -8405,7 +8389,8 @@ static int | |||
8405 | lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | 8389 | lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) |
8406 | { | 8390 | { |
8407 | int i, idx, saved_chann, used_chann, cpu, phys_id; | 8391 | int i, idx, saved_chann, used_chann, cpu, phys_id; |
8408 | int max_phys_id, num_io_channel, first_cpu; | 8392 | int max_phys_id, min_phys_id; |
8393 | int num_io_channel, first_cpu, chan; | ||
8409 | struct lpfc_vector_map_info *cpup; | 8394 | struct lpfc_vector_map_info *cpup; |
8410 | #ifdef CONFIG_X86 | 8395 | #ifdef CONFIG_X86 |
8411 | struct cpuinfo_x86 *cpuinfo; | 8396 | struct cpuinfo_x86 *cpuinfo; |
@@ -8423,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8423 | phba->sli4_hba.num_present_cpu)); | 8408 | phba->sli4_hba.num_present_cpu)); |
8424 | 8409 | ||
8425 | max_phys_id = 0; | 8410 | max_phys_id = 0; |
8411 | min_phys_id = 0xff; | ||
8426 | phys_id = 0; | 8412 | phys_id = 0; |
8427 | num_io_channel = 0; | 8413 | num_io_channel = 0; |
8428 | first_cpu = LPFC_VECTOR_MAP_EMPTY; | 8414 | first_cpu = LPFC_VECTOR_MAP_EMPTY; |
@@ -8446,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8446 | 8432 | ||
8447 | if (cpup->phys_id > max_phys_id) | 8433 | if (cpup->phys_id > max_phys_id) |
8448 | max_phys_id = cpup->phys_id; | 8434 | max_phys_id = cpup->phys_id; |
8435 | if (cpup->phys_id < min_phys_id) | ||
8436 | min_phys_id = cpup->phys_id; | ||
8449 | cpup++; | 8437 | cpup++; |
8450 | } | 8438 | } |
8451 | 8439 | ||
8440 | phys_id = min_phys_id; | ||
8452 | /* Now associate the HBA vectors with specific CPUs */ | 8441 | /* Now associate the HBA vectors with specific CPUs */ |
8453 | for (idx = 0; idx < vectors; idx++) { | 8442 | for (idx = 0; idx < vectors; idx++) { |
8454 | cpup = phba->sli4_hba.cpu_map; | 8443 | cpup = phba->sli4_hba.cpu_map; |
@@ -8459,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8459 | for (i = 1; i < max_phys_id; i++) { | 8448 | for (i = 1; i < max_phys_id; i++) { |
8460 | phys_id++; | 8449 | phys_id++; |
8461 | if (phys_id > max_phys_id) | 8450 | if (phys_id > max_phys_id) |
8462 | phys_id = 0; | 8451 | phys_id = min_phys_id; |
8463 | cpu = lpfc_find_next_cpu(phba, phys_id); | 8452 | cpu = lpfc_find_next_cpu(phba, phys_id); |
8464 | if (cpu == LPFC_VECTOR_MAP_EMPTY) | 8453 | if (cpu == LPFC_VECTOR_MAP_EMPTY) |
8465 | continue; | 8454 | continue; |
8466 | goto found; | 8455 | goto found; |
8467 | } | 8456 | } |
8468 | 8457 | ||
8458 | /* Use round robin for scheduling */ | ||
8459 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; | ||
8460 | chan = 0; | ||
8461 | cpup = phba->sli4_hba.cpu_map; | ||
8462 | for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { | ||
8463 | cpup->channel_id = chan; | ||
8464 | cpup++; | ||
8465 | chan++; | ||
8466 | if (chan >= phba->cfg_fcp_io_channel) | ||
8467 | chan = 0; | ||
8468 | } | ||
8469 | |||
8469 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8470 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8470 | "3329 Cannot set affinity:" | 8471 | "3329 Cannot set affinity:" |
8471 | "Error mapping vector %d (%d)\n", | 8472 | "Error mapping vector %d (%d)\n", |
@@ -8503,7 +8504,7 @@ found: | |||
8503 | /* Spread vector mapping across multple physical CPU nodes */ | 8504 | /* Spread vector mapping across multple physical CPU nodes */ |
8504 | phys_id++; | 8505 | phys_id++; |
8505 | if (phys_id > max_phys_id) | 8506 | if (phys_id > max_phys_id) |
8506 | phys_id = 0; | 8507 | phys_id = min_phys_id; |
8507 | } | 8508 | } |
8508 | 8509 | ||
8509 | /* | 8510 | /* |
@@ -8513,7 +8514,7 @@ found: | |||
8513 | * Base the remaining IO channel assigned, to IO channels already | 8514 | * Base the remaining IO channel assigned, to IO channels already |
8514 | * assigned to other CPUs on the same phys_id. | 8515 | * assigned to other CPUs on the same phys_id. |
8515 | */ | 8516 | */ |
8516 | for (i = 0; i <= max_phys_id; i++) { | 8517 | for (i = min_phys_id; i <= max_phys_id; i++) { |
8517 | /* | 8518 | /* |
8518 | * If there are no io channels already mapped to | 8519 | * If there are no io channels already mapped to |
8519 | * this phys_id, just round robin thru the io_channels. | 8520 | * this phys_id, just round robin thru the io_channels. |
@@ -8595,10 +8596,11 @@ out: | |||
8595 | if (num_io_channel != phba->sli4_hba.num_present_cpu) | 8596 | if (num_io_channel != phba->sli4_hba.num_present_cpu) |
8596 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8597 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8597 | "3333 Set affinity mismatch:" | 8598 | "3333 Set affinity mismatch:" |
8598 | "%d chann != %d cpus: %d vactors\n", | 8599 | "%d chann != %d cpus: %d vectors\n", |
8599 | num_io_channel, phba->sli4_hba.num_present_cpu, | 8600 | num_io_channel, phba->sli4_hba.num_present_cpu, |
8600 | vectors); | 8601 | vectors); |
8601 | 8602 | ||
8603 | /* Enable using cpu affinity for scheduling */ | ||
8602 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; | 8604 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; |
8603 | return 1; | 8605 | return 1; |
8604 | } | 8606 | } |
@@ -8689,9 +8691,12 @@ enable_msix_vectors: | |||
8689 | 8691 | ||
8690 | cfg_fail_out: | 8692 | cfg_fail_out: |
8691 | /* free the irq already requested */ | 8693 | /* free the irq already requested */ |
8692 | for (--index; index >= 0; index--) | 8694 | for (--index; index >= 0; index--) { |
8695 | irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. | ||
8696 | vector, NULL); | ||
8693 | free_irq(phba->sli4_hba.msix_entries[index].vector, | 8697 | free_irq(phba->sli4_hba.msix_entries[index].vector, |
8694 | &phba->sli4_hba.fcp_eq_hdl[index]); | 8698 | &phba->sli4_hba.fcp_eq_hdl[index]); |
8699 | } | ||
8695 | 8700 | ||
8696 | msi_fail_out: | 8701 | msi_fail_out: |
8697 | /* Unconfigure MSI-X capability structure */ | 8702 | /* Unconfigure MSI-X capability structure */ |
@@ -8712,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) | |||
8712 | int index; | 8717 | int index; |
8713 | 8718 | ||
8714 | /* Free up MSI-X multi-message vectors */ | 8719 | /* Free up MSI-X multi-message vectors */ |
8715 | for (index = 0; index < phba->cfg_fcp_io_channel; index++) | 8720 | for (index = 0; index < phba->cfg_fcp_io_channel; index++) { |
8721 | irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. | ||
8722 | vector, NULL); | ||
8716 | free_irq(phba->sli4_hba.msix_entries[index].vector, | 8723 | free_irq(phba->sli4_hba.msix_entries[index].vector, |
8717 | &phba->sli4_hba.fcp_eq_hdl[index]); | 8724 | &phba->sli4_hba.fcp_eq_hdl[index]); |
8725 | } | ||
8718 | 8726 | ||
8719 | /* Disable MSI-X */ | 8727 | /* Disable MSI-X */ |
8720 | pci_disable_msix(phba->pcidev); | 8728 | pci_disable_msix(phba->pcidev); |