aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Bottomley <JBottomley@Parallels.com>2013-05-10 10:53:40 -0400
committerJames Bottomley <JBottomley@Parallels.com>2013-05-10 10:53:40 -0400
commit832e77bc1106592c621fc42f2f6a4500e414a0a1 (patch)
tree2b226019e7b1344f05e44d79da956a5306d20780 /drivers/scsi/lpfc
parente0fd9affeb64088eff407dfc98bbd3a5c17ea479 (diff)
parente689cf0caf2d5ebcb300fb69887d35c0abdbdb97 (diff)
Merge branch 'misc' into for-linus
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c113
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c91
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c607
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c798
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h1
20 files changed, 1617 insertions, 437 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 7706c99ec8bb..bcc56cac4fd8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -46,10 +46,15 @@ struct lpfc_sli2_slim;
46#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi 46#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
47 cmnd for menlo needs nearly twice as for firmware 47 cmnd for menlo needs nearly twice as for firmware
48 downloads using bsg */ 48 downloads using bsg */
49#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 49
50#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
51#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
52#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
50#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 53#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
54#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
55#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
56
51#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ 57#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
52#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
53#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 58#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
54#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 59#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
55#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ 60#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
@@ -66,8 +71,10 @@ struct lpfc_sli2_slim;
66 * queue depths when there are driver resource error or Firmware 71 * queue depths when there are driver resource error or Firmware
67 * resource error. 72 * resource error.
68 */ 73 */
69#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ 74/* 1 Second */
70#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ 75#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
76/* 5 minutes */
77#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300))
71 78
72/* Number of exchanges reserved for discovery to complete */ 79/* Number of exchanges reserved for discovery to complete */
73#define LPFC_DISC_IOCB_BUFF_COUNT 20 80#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -671,6 +678,7 @@ struct lpfc_hba {
671 uint32_t lmt; 678 uint32_t lmt;
672 679
673 uint32_t fc_topology; /* link topology, from LINK INIT */ 680 uint32_t fc_topology; /* link topology, from LINK INIT */
681 uint32_t fc_topology_changed; /* link topology, from LINK INIT */
674 682
675 struct lpfc_stats fc_stat; 683 struct lpfc_stats fc_stat;
676 684
@@ -701,9 +709,11 @@ struct lpfc_hba {
701 uint32_t cfg_poll_tmo; 709 uint32_t cfg_poll_tmo;
702 uint32_t cfg_use_msi; 710 uint32_t cfg_use_msi;
703 uint32_t cfg_fcp_imax; 711 uint32_t cfg_fcp_imax;
712 uint32_t cfg_fcp_cpu_map;
704 uint32_t cfg_fcp_wq_count; 713 uint32_t cfg_fcp_wq_count;
705 uint32_t cfg_fcp_eq_count; 714 uint32_t cfg_fcp_eq_count;
706 uint32_t cfg_fcp_io_channel; 715 uint32_t cfg_fcp_io_channel;
716 uint32_t cfg_total_seg_cnt;
707 uint32_t cfg_sg_seg_cnt; 717 uint32_t cfg_sg_seg_cnt;
708 uint32_t cfg_prot_sg_seg_cnt; 718 uint32_t cfg_prot_sg_seg_cnt;
709 uint32_t cfg_sg_dma_buf_size; 719 uint32_t cfg_sg_dma_buf_size;
@@ -804,8 +814,10 @@ struct lpfc_hba {
804 uint64_t bg_reftag_err_cnt; 814 uint64_t bg_reftag_err_cnt;
805 815
806 /* fastpath list. */ 816 /* fastpath list. */
807 spinlock_t scsi_buf_list_lock; 817 spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */
808 struct list_head lpfc_scsi_buf_list; 818 spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */
819 struct list_head lpfc_scsi_buf_list_get;
820 struct list_head lpfc_scsi_buf_list_put;
809 uint32_t total_scsi_bufs; 821 uint32_t total_scsi_bufs;
810 struct list_head lpfc_iocb_list; 822 struct list_head lpfc_iocb_list;
811 uint32_t total_iocbq_bufs; 823 uint32_t total_iocbq_bufs;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 9290713af253..3c5625b8b1f4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
674 int i; 674 int i;
675 int rc; 675 int rc;
676 676
677 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
678 return 0;
679
677 init_completion(&online_compl); 680 init_completion(&online_compl);
678 rc = lpfc_workq_post_event(phba, &status, &online_compl, 681 rc = lpfc_workq_post_event(phba, &status, &online_compl,
679 LPFC_EVT_OFFLINE_PREP); 682 LPFC_EVT_OFFLINE_PREP);
@@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba)
741 int status = 0; 744 int status = 0;
742 int rc; 745 int rc;
743 746
744 if (!phba->cfg_enable_hba_reset) 747 if ((!phba->cfg_enable_hba_reset) ||
748 (phba->pport->fc_flag & FC_OFFLINE_MODE))
745 return -EACCES; 749 return -EACCES;
746 750
747 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 751 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
895 pci_disable_sriov(pdev); 899 pci_disable_sriov(pdev);
896 phba->cfg_sriov_nr_virtfn = 0; 900 phba->cfg_sriov_nr_virtfn = 0;
897 } 901 }
902
898 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 903 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
899 904
900 if (status != 0) 905 if (status != 0)
@@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
2801 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2806 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2802 "3054 lpfc_topology changed from %d to %d\n", 2807 "3054 lpfc_topology changed from %d to %d\n",
2803 prev_val, val); 2808 prev_val, val);
2809 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
2810 phba->fc_topology_changed = 1;
2804 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 2811 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
2805 if (err) { 2812 if (err) {
2806 phba->cfg_topology = prev_val; 2813 phba->cfg_topology = prev_val;
@@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
3792static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, 3799static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
3793 lpfc_fcp_imax_show, lpfc_fcp_imax_store); 3800 lpfc_fcp_imax_show, lpfc_fcp_imax_store);
3794 3801
3802/**
3803 * lpfc_state_show - Display current driver CPU affinity
3804 * @dev: class converted to a Scsi_host structure.
3805 * @attr: device attribute, not used.
3806 * @buf: on return contains text describing the state of the link.
3807 *
3808 * Returns: size of formatted string.
3809 **/
3810static ssize_t
3811lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
3812 char *buf)
3813{
3814 struct Scsi_Host *shost = class_to_shost(dev);
3815 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
3816 struct lpfc_hba *phba = vport->phba;
3817 struct lpfc_vector_map_info *cpup;
3818 int idx, len = 0;
3819
3820 if ((phba->sli_rev != LPFC_SLI_REV4) ||
3821 (phba->intr_type != MSIX))
3822 return len;
3823
3824 switch (phba->cfg_fcp_cpu_map) {
3825 case 0:
3826 len += snprintf(buf + len, PAGE_SIZE-len,
3827 "fcp_cpu_map: No mapping (%d)\n",
3828 phba->cfg_fcp_cpu_map);
3829 return len;
3830 case 1:
3831 len += snprintf(buf + len, PAGE_SIZE-len,
3832 "fcp_cpu_map: HBA centric mapping (%d): "
3833 "%d online CPUs\n",
3834 phba->cfg_fcp_cpu_map,
3835 phba->sli4_hba.num_online_cpu);
3836 break;
3837 case 2:
3838 len += snprintf(buf + len, PAGE_SIZE-len,
3839 "fcp_cpu_map: Driver centric mapping (%d): "
3840 "%d online CPUs\n",
3841 phba->cfg_fcp_cpu_map,
3842 phba->sli4_hba.num_online_cpu);
3843 break;
3844 }
3845
3846 cpup = phba->sli4_hba.cpu_map;
3847 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
3848 if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
3849 len += snprintf(buf + len, PAGE_SIZE-len,
3850 "CPU %02d io_chan %02d "
3851 "physid %d coreid %d\n",
3852 idx, cpup->channel_id, cpup->phys_id,
3853 cpup->core_id);
3854 else
3855 len += snprintf(buf + len, PAGE_SIZE-len,
3856 "CPU %02d io_chan %02d "
3857 "physid %d coreid %d IRQ %d\n",
3858 idx, cpup->channel_id, cpup->phys_id,
3859 cpup->core_id, cpup->irq);
3860
3861 cpup++;
3862 }
3863 return len;
3864}
3865
3866/**
3867 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
3868 * @dev: class device that is converted into a Scsi_host.
3869 * @attr: device attribute, not used.
3870 * @buf: one or more lpfc_polling_flags values.
3871 * @count: not used.
3872 *
3873 * Returns:
3874 * -EINVAL - Not implemented yet.
3875 **/
3876static ssize_t
3877lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
3878 const char *buf, size_t count)
3879{
3880 int status = -EINVAL;
3881 return status;
3882}
3883
3884/*
3885# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
3886# for the HBA.
3887#
3888# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
3889# 0 - Do not affinitze IRQ vectors
3890# 1 - Affintize HBA vectors with respect to each HBA
3891# (start with CPU0 for each HBA)
3892# 2 - Affintize HBA vectors with respect to the entire driver
3893# (round robin thru all CPUs across all HBAs)
3894*/
3895static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
3896module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
3897MODULE_PARM_DESC(lpfc_fcp_cpu_map,
3898 "Defines how to map CPUs to IRQ vectors per HBA");
3899
3900/**
3901 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
3902 * @phba: lpfc_hba pointer.
3903 * @val: link speed value.
3904 *
3905 * Description:
3906 * If val is in a valid range [0-2], then affinitze the adapter's
3907 * MSIX vectors.
3908 *
3909 * Returns:
3910 * zero if val saved.
3911 * -EINVAL val out of range
3912 **/
3913static int
3914lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
3915{
3916 if (phba->sli_rev != LPFC_SLI_REV4) {
3917 phba->cfg_fcp_cpu_map = 0;
3918 return 0;
3919 }
3920
3921 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
3922 phba->cfg_fcp_cpu_map = val;
3923 return 0;
3924 }
3925
3926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3927 "3326 fcp_cpu_map: %d out of range, using default\n",
3928 val);
3929 phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
3930
3931 return 0;
3932}
3933
3934static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
3935 lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
3936
3795/* 3937/*
3796# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3938# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
3797# Value range is [2,3]. Default value is 3. 3939# Value range is [2,3]. Default value is 3.
@@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
4009# 0 = disabled (default) 4151# 0 = disabled (default)
4010# 1 = enabled 4152# 1 = enabled
4011# Value range is [0,1]. Default value is 0. 4153# Value range is [0,1]. Default value is 0.
4154#
4155# This feature in under investigation and may be supported in the future.
4012*/ 4156*/
4013unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; 4157unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
4014 4158
4015module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
4016MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
4017
4018/* 4159/*
4019# lpfc_prot_mask: i 4160# lpfc_prot_mask: i
4020# - Bit mask of host protection capabilities used to register with the 4161# - Bit mask of host protection capabilities used to register with the
@@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery,
4071 4212
4072/* 4213/*
4073 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count 4214 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
4074 * This value can be set to values between 64 and 256. The default value is 4215 * This value can be set to values between 64 and 4096. The default value is
4075 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer 4216 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
4076 * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). 4217 * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
4218 * Because of the additional overhead involved in setting up T10-DIF,
4219 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
4220 * and will be limited to 512 if BlockGuard is enabled under SLI3.
4077 */ 4221 */
4078LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, 4222LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
4079 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); 4223 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
4080 4224
4081LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, 4225/*
4082 LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, 4226 * This parameter will be depricated, the driver cannot limit the
4083 "Max Protection Scatter Gather Segment Count"); 4227 * protection data s/g list.
4228 */
4229LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
4230 LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
4231 "Max Protection Scatter Gather Segment Count");
4084 4232
4085struct device_attribute *lpfc_hba_attrs[] = { 4233struct device_attribute *lpfc_hba_attrs[] = {
4086 &dev_attr_bg_info, 4234 &dev_attr_bg_info,
@@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4141 &dev_attr_lpfc_poll_tmo, 4289 &dev_attr_lpfc_poll_tmo,
4142 &dev_attr_lpfc_use_msi, 4290 &dev_attr_lpfc_use_msi,
4143 &dev_attr_lpfc_fcp_imax, 4291 &dev_attr_lpfc_fcp_imax,
4292 &dev_attr_lpfc_fcp_cpu_map,
4144 &dev_attr_lpfc_fcp_wq_count, 4293 &dev_attr_lpfc_fcp_wq_count,
4145 &dev_attr_lpfc_fcp_eq_count, 4294 &dev_attr_lpfc_fcp_eq_count,
4146 &dev_attr_lpfc_fcp_io_channel, 4295 &dev_attr_lpfc_fcp_io_channel,
@@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5123 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 5272 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
5124 lpfc_use_msi_init(phba, lpfc_use_msi); 5273 lpfc_use_msi_init(phba, lpfc_use_msi);
5125 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5274 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
5275 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
5126 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); 5276 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
5127 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); 5277 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
5128 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); 5278 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 888666892004..094be2cad65b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
219 unsigned int transfer_bytes, bytes_copied = 0; 219 unsigned int transfer_bytes, bytes_copied = 0;
220 unsigned int sg_offset, dma_offset; 220 unsigned int sg_offset, dma_offset;
221 unsigned char *dma_address, *sg_address; 221 unsigned char *dma_address, *sg_address;
222 struct scatterlist *sgel;
223 LIST_HEAD(temp_list); 222 LIST_HEAD(temp_list);
224 223 struct sg_mapping_iter miter;
224 unsigned long flags;
225 unsigned int sg_flags = SG_MITER_ATOMIC;
226 bool sg_valid;
225 227
226 list_splice_init(&dma_buffers->list, &temp_list); 228 list_splice_init(&dma_buffers->list, &temp_list);
227 list_add(&dma_buffers->list, &temp_list); 229 list_add(&dma_buffers->list, &temp_list);
228 sg_offset = 0; 230 sg_offset = 0;
229 sgel = bsg_buffers->sg_list; 231 if (to_buffers)
232 sg_flags |= SG_MITER_FROM_SG;
233 else
234 sg_flags |= SG_MITER_TO_SG;
235 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
236 sg_flags);
237 local_irq_save(flags);
238 sg_valid = sg_miter_next(&miter);
230 list_for_each_entry(mp, &temp_list, list) { 239 list_for_each_entry(mp, &temp_list, list) {
231 dma_offset = 0; 240 dma_offset = 0;
232 while (bytes_to_transfer && sgel && 241 while (bytes_to_transfer && sg_valid &&
233 (dma_offset < LPFC_BPL_SIZE)) { 242 (dma_offset < LPFC_BPL_SIZE)) {
234 dma_address = mp->virt + dma_offset; 243 dma_address = mp->virt + dma_offset;
235 if (sg_offset) { 244 if (sg_offset) {
236 /* Continue previous partial transfer of sg */ 245 /* Continue previous partial transfer of sg */
237 sg_address = sg_virt(sgel) + sg_offset; 246 sg_address = miter.addr + sg_offset;
238 transfer_bytes = sgel->length - sg_offset; 247 transfer_bytes = miter.length - sg_offset;
239 } else { 248 } else {
240 sg_address = sg_virt(sgel); 249 sg_address = miter.addr;
241 transfer_bytes = sgel->length; 250 transfer_bytes = miter.length;
242 } 251 }
243 if (bytes_to_transfer < transfer_bytes) 252 if (bytes_to_transfer < transfer_bytes)
244 transfer_bytes = bytes_to_transfer; 253 transfer_bytes = bytes_to_transfer;
@@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
252 sg_offset += transfer_bytes; 261 sg_offset += transfer_bytes;
253 bytes_to_transfer -= transfer_bytes; 262 bytes_to_transfer -= transfer_bytes;
254 bytes_copied += transfer_bytes; 263 bytes_copied += transfer_bytes;
255 if (sg_offset >= sgel->length) { 264 if (sg_offset >= miter.length) {
256 sg_offset = 0; 265 sg_offset = 0;
257 sgel = sg_next(sgel); 266 sg_valid = sg_miter_next(&miter);
258 } 267 }
259 } 268 }
260 } 269 }
270 sg_miter_stop(&miter);
271 local_irq_restore(flags);
261 list_del_init(&dma_buffers->list); 272 list_del_init(&dma_buffers->list);
262 list_splice(&temp_list, &dma_buffers->list); 273 list_splice(&temp_list, &dma_buffers->list);
263 return bytes_copied; 274 return bytes_copied;
@@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
471 cmdiocbq->context1 = dd_data; 482 cmdiocbq->context1 = dd_data;
472 cmdiocbq->context2 = cmp; 483 cmdiocbq->context2 = cmp;
473 cmdiocbq->context3 = bmp; 484 cmdiocbq->context3 = bmp;
485 cmdiocbq->context_un.ndlp = ndlp;
474 dd_data->type = TYPE_IOCB; 486 dd_data->type = TYPE_IOCB;
475 dd_data->set_job = job; 487 dd_data->set_job = job;
476 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 488 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1508 ctiocb->context1 = dd_data; 1520 ctiocb->context1 = dd_data;
1509 ctiocb->context2 = cmp; 1521 ctiocb->context2 = cmp;
1510 ctiocb->context3 = bmp; 1522 ctiocb->context3 = bmp;
1523 ctiocb->context_un.ndlp = ndlp;
1511 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1524 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1512 1525
1513 dd_data->type = TYPE_IOCB; 1526 dd_data->type = TYPE_IOCB;
@@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2576 evt->wait_time_stamp = jiffies; 2589 evt->wait_time_stamp = jiffies;
2577 time_left = wait_event_interruptible_timeout( 2590 time_left = wait_event_interruptible_timeout(
2578 evt->wq, !list_empty(&evt->events_to_see), 2591 evt->wq, !list_empty(&evt->events_to_see),
2579 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 2592 msecs_to_jiffies(1000 *
2593 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2580 if (list_empty(&evt->events_to_see)) 2594 if (list_empty(&evt->events_to_see))
2581 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2595 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2582 else { 2596 else {
@@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
3151 evt->waiting = 1; 3165 evt->waiting = 1;
3152 time_left = wait_event_interruptible_timeout( 3166 time_left = wait_event_interruptible_timeout(
3153 evt->wq, !list_empty(&evt->events_to_see), 3167 evt->wq, !list_empty(&evt->events_to_see),
3154 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 3168 msecs_to_jiffies(1000 *
3169 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3155 evt->waiting = 0; 3170 evt->waiting = 0;
3156 if (list_empty(&evt->events_to_see)) { 3171 if (list_empty(&evt->events_to_see)) {
3157 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3172 rc = (time_left) ? -EINTR : -ETIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7631893ae005..d41456e5f814 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
470void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); 470void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
471uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); 471uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
472int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); 472int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
473void lpfc_sli4_offline_eratt(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7bff3a19af56..ae1a07c57cae 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
1811 if (init_utsname()->nodename[0] != '\0') 1811 if (init_utsname()->nodename[0] != '\0')
1812 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 1812 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
1813 else 1813 else
1814 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 1814 mod_timer(&vport->fc_fdmitmo, jiffies +
1815 msecs_to_jiffies(1000 * 60));
1815 } 1816 }
1816 return; 1817 return;
1817} 1818}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index bbed8471bf0b..3cae0a92e8bd 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -29,6 +29,7 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32
32#include "lpfc_hw4.h" 33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
@@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
238 239
239 icmd->un.elsreq64.remoteID = did; /* DID */ 240 icmd->un.elsreq64.remoteID = did; /* DID */
240 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 241 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
241 icmd->ulpTimeout = phba->fc_ratov * 2; 242 if (elscmd == ELS_CMD_FLOGI)
243 icmd->ulpTimeout = FF_DEF_RATOV * 2;
244 else
245 icmd->ulpTimeout = phba->fc_ratov * 2;
242 } else { 246 } else {
243 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 247 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
244 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 248 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
308 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 312 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
309 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 313 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
310 "0116 Xmit ELS command x%x to remote " 314 "0116 Xmit ELS command x%x to remote "
311 "NPORT x%x I/O tag: x%x, port state: x%x\n", 315 "NPORT x%x I/O tag: x%x, port state:x%x"
316 " fc_flag:x%x\n",
312 elscmd, did, elsiocb->iotag, 317 elscmd, did, elsiocb->iotag,
313 vport->port_state); 318 vport->port_state,
319 vport->fc_flag);
314 } else { 320 } else {
315 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 321 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
317 "0117 Xmit ELS response x%x to remote " 323 "0117 Xmit ELS response x%x to remote "
318 "NPORT x%x I/O tag: x%x, size: x%x\n", 324 "NPORT x%x I/O tag: x%x, size: x%x "
325 "port_state x%x fc_flag x%x\n",
319 elscmd, ndlp->nlp_DID, elsiocb->iotag, 326 elscmd, ndlp->nlp_DID, elsiocb->iotag,
320 cmdSize); 327 cmdSize, vport->port_state,
328 vport->fc_flag);
321 } 329 }
322 return elsiocb; 330 return elsiocb;
323 331
@@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
909 spin_lock_irq(shost->host_lock); 917 spin_lock_irq(shost->host_lock);
910 vport->fc_flag |= FC_PT2PT; 918 vport->fc_flag |= FC_PT2PT;
911 spin_unlock_irq(shost->host_lock); 919 spin_unlock_irq(shost->host_lock);
920 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
921 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
922 lpfc_unregister_fcf_prep(phba);
923
924 /* The FC_VFI_REGISTERED flag will get clear in the cmpl
925 * handler for unreg_vfi, but if we don't force the
926 * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
927 * built with the update bit set instead of just the vp bit to
928 * change the Nport ID. We need to have the vp set and the
929 * Upd cleared on topology changes.
930 */
931 spin_lock_irq(shost->host_lock);
932 vport->fc_flag &= ~FC_VFI_REGISTERED;
933 spin_unlock_irq(shost->host_lock);
934 phba->fc_topology_changed = 0;
935 lpfc_issue_reg_vfi(vport);
936 }
912 937
913 /* Start discovery - this should just do CLEAR_LA */ 938 /* Start discovery - this should just do CLEAR_LA */
914 lpfc_disc_start(vport); 939 lpfc_disc_start(vport);
@@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi:
1030 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1055 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1031 if ((phba->sli_rev == LPFC_SLI_REV4) && 1056 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1032 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1057 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1033 (vport->fc_prevDID != vport->fc_myDID))) { 1058 (vport->fc_prevDID != vport->fc_myDID) ||
1034 if (vport->fc_flag & FC_VFI_REGISTERED) 1059 phba->fc_topology_changed)) {
1035 lpfc_sli4_unreg_all_rpis(vport); 1060 if (vport->fc_flag & FC_VFI_REGISTERED) {
1061 if (phba->fc_topology_changed) {
1062 lpfc_unregister_fcf_prep(phba);
1063 spin_lock_irq(shost->host_lock);
1064 vport->fc_flag &= ~FC_VFI_REGISTERED;
1065 spin_unlock_irq(shost->host_lock);
1066 phba->fc_topology_changed = 0;
1067 } else {
1068 lpfc_sli4_unreg_all_rpis(vport);
1069 }
1070 }
1036 lpfc_issue_reg_vfi(vport); 1071 lpfc_issue_reg_vfi(vport);
1037 lpfc_nlp_put(ndlp); 1072 lpfc_nlp_put(ndlp);
1038 goto out; 1073 goto out;
@@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi:
1054 1089
1055 /* FLOGI completes successfully */ 1090 /* FLOGI completes successfully */
1056 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1091 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1057 "0101 FLOGI completes successfully " 1092 "0101 FLOGI completes successfully, I/O tag:x%x, "
1058 "Data: x%x x%x x%x x%x\n", 1093 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
1059 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1094 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1060 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 1095 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1096 vport->port_state, vport->fc_flag);
1061 1097
1062 if (vport->port_state == LPFC_FLOGI) { 1098 if (vport->port_state == LPFC_FLOGI) {
1063 /* 1099 /*
@@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5047 struct ls_rjt stat; 5083 struct ls_rjt stat;
5048 uint32_t cmd, did; 5084 uint32_t cmd, did;
5049 int rc; 5085 int rc;
5086 uint32_t fc_flag = 0;
5087 uint32_t port_state = 0;
5050 5088
5051 cmd = *lp++; 5089 cmd = *lp++;
5052 sp = (struct serv_parm *) lp; 5090 sp = (struct serv_parm *) lp;
@@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5113 * will be. 5151 * will be.
5114 */ 5152 */
5115 vport->fc_myDID = PT2PT_LocalID; 5153 vport->fc_myDID = PT2PT_LocalID;
5116 } 5154 } else
5155 vport->fc_myDID = PT2PT_RemoteID;
5117 5156
5118 /* 5157 /*
5119 * The vport state should go to LPFC_FLOGI only 5158 * The vport state should go to LPFC_FLOGI only
5120 * AFTER we issue a FLOGI, not receive one. 5159 * AFTER we issue a FLOGI, not receive one.
5121 */ 5160 */
5122 spin_lock_irq(shost->host_lock); 5161 spin_lock_irq(shost->host_lock);
5162 fc_flag = vport->fc_flag;
5163 port_state = vport->port_state;
5123 vport->fc_flag |= FC_PT2PT; 5164 vport->fc_flag |= FC_PT2PT;
5124 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 5165 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
5166 vport->port_state = LPFC_FLOGI;
5125 spin_unlock_irq(shost->host_lock); 5167 spin_unlock_irq(shost->host_lock);
5168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5169 "3311 Rcv Flogi PS x%x new PS x%x "
5170 "fc_flag x%x new fc_flag x%x\n",
5171 port_state, vport->port_state,
5172 fc_flag, vport->fc_flag);
5126 5173
5127 /* 5174 /*
5128 * We temporarily set fc_myDID to make it look like we are 5175 * We temporarily set fc_myDID to make it look like we are
@@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6241 } 6288 }
6242 6289
6243 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) 6290 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
6244 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 6291 mod_timer(&vport->els_tmofunc,
6292 jiffies + msecs_to_jiffies(1000 * timeout));
6245} 6293}
6246 6294
6247/** 6295/**
@@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6612 /* ELS command <elsCmd> received from NPORT <did> */ 6660 /* ELS command <elsCmd> received from NPORT <did> */
6613 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6661 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6614 "0112 ELS command x%x received from NPORT x%x " 6662 "0112 ELS command x%x received from NPORT x%x "
6615 "Data: x%x\n", cmd, did, vport->port_state); 6663 "Data: x%x x%x x%x x%x\n",
6664 cmd, did, vport->port_state, vport->fc_flag,
6665 vport->fc_myDID, vport->fc_prevDID);
6616 switch (cmd) { 6666 switch (cmd) {
6617 case ELS_CMD_PLOGI: 6667 case ELS_CMD_PLOGI:
6618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6668 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6621 6671
6622 phba->fc_stat.elsRcvPLOGI++; 6672 phba->fc_stat.elsRcvPLOGI++;
6623 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 6673 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
6674 if (phba->sli_rev == LPFC_SLI_REV4 &&
6675 (phba->pport->fc_flag & FC_PT2PT)) {
6676 vport->fc_prevDID = vport->fc_myDID;
6677 /* Our DID needs to be updated before registering
6678 * the vfi. This is done in lpfc_rcv_plogi but
6679 * that is called after the reg_vfi.
6680 */
6681 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
6682 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6683 "3312 Remote port assigned DID x%x "
6684 "%x\n", vport->fc_myDID,
6685 vport->fc_prevDID);
6686 }
6624 6687
6625 lpfc_send_els_event(vport, ndlp, payload); 6688 lpfc_send_els_event(vport, ndlp, payload);
6626 6689
@@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6630 rjt_exp = LSEXP_NOTHING_MORE; 6693 rjt_exp = LSEXP_NOTHING_MORE;
6631 break; 6694 break;
6632 } 6695 }
6696 shost = lpfc_shost_from_vport(vport);
6633 if (vport->port_state < LPFC_DISC_AUTH) { 6697 if (vport->port_state < LPFC_DISC_AUTH) {
6634 if (!(phba->pport->fc_flag & FC_PT2PT) || 6698 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6635 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6699 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6641 * another NPort and the other side has initiated 6705 * another NPort and the other side has initiated
6642 * the PLOGI before responding to our FLOGI. 6706 * the PLOGI before responding to our FLOGI.
6643 */ 6707 */
6708 if (phba->sli_rev == LPFC_SLI_REV4 &&
6709 (phba->fc_topology_changed ||
6710 vport->fc_myDID != vport->fc_prevDID)) {
6711 lpfc_unregister_fcf_prep(phba);
6712 spin_lock_irq(shost->host_lock);
6713 vport->fc_flag &= ~FC_VFI_REGISTERED;
6714 spin_unlock_irq(shost->host_lock);
6715 phba->fc_topology_changed = 0;
6716 lpfc_issue_reg_vfi(vport);
6717 }
6644 } 6718 }
6645 6719
6646 shost = lpfc_shost_from_vport(vport);
6647 spin_lock_irq(shost->host_lock); 6720 spin_lock_irq(shost->host_lock);
6648 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 6721 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
6649 spin_unlock_irq(shost->host_lock); 6722 spin_unlock_irq(shost->host_lock);
@@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
7002 spin_lock_irq(shost->host_lock); 7075 spin_lock_irq(shost->host_lock);
7003 if (vport->fc_flag & FC_DISC_DELAYED) { 7076 if (vport->fc_flag & FC_DISC_DELAYED) {
7004 spin_unlock_irq(shost->host_lock); 7077 spin_unlock_irq(shost->host_lock);
7078 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
7079 "3334 Delay fc port discovery for %d seconds\n",
7080 phba->fc_ratov);
7005 mod_timer(&vport->delayed_disc_tmo, 7081 mod_timer(&vport->delayed_disc_tmo,
7006 jiffies + HZ * phba->fc_ratov); 7082 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
7007 return; 7083 return;
7008 } 7084 }
7009 spin_unlock_irq(shost->host_lock); 7085 spin_unlock_irq(shost->host_lock);
@@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
7287 return; 7363 return;
7288 7364
7289 shost = lpfc_shost_from_vport(phba->pport); 7365 shost = lpfc_shost_from_vport(phba->pport);
7290 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 7366 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
7291 spin_lock_irq(shost->host_lock); 7367 spin_lock_irq(shost->host_lock);
7292 ndlp->nlp_flag |= NLP_DELAY_TMO; 7368 ndlp->nlp_flag |= NLP_DELAY_TMO;
7293 spin_unlock_irq(shost->host_lock); 7369 spin_unlock_irq(shost->host_lock);
@@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
7791 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7867 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7792 /* Start a timer to unblock fabric iocbs after 100ms */ 7868 /* Start a timer to unblock fabric iocbs after 100ms */
7793 if (!blocked) 7869 if (!blocked)
7794 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 7870 mod_timer(&phba->fabric_block_timer,
7871 jiffies + msecs_to_jiffies(100));
7795 7872
7796 return; 7873 return;
7797} 7874}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 326e05a65a73..0f6e2548f35d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
160 if (!list_empty(&evtp->evt_listp)) 160 if (!list_empty(&evtp->evt_listp))
161 return; 161 return;
162 162
163 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
164
163 spin_lock_irq(&phba->hbalock); 165 spin_lock_irq(&phba->hbalock);
164 /* We need to hold the node by incrementing the reference 166 /* We need to hold the node by incrementing the reference
165 * count until this queued work is done 167 * count until this queued work is done
166 */ 168 */
167 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
168 if (evtp->evt_arg1) { 169 if (evtp->evt_arg1) {
169 evtp->evt = LPFC_EVT_DEV_LOSS; 170 evtp->evt = LPFC_EVT_DEV_LOSS;
170 list_add_tail(&evtp->evt_listp, &phba->work_list); 171 list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba)
1008 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1009 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1009 lpfc_linkup_port(vports[i]); 1010 lpfc_linkup_port(vports[i]);
1010 lpfc_destroy_vport_work_array(phba, vports); 1011 lpfc_destroy_vport_work_array(phba, vports);
1011 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1012 (phba->sli_rev < LPFC_SLI_REV4))
1013 lpfc_issue_clear_la(phba, phba->pport);
1014 1012
1015 return 0; 1013 return 0;
1016} 1014}
@@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1436 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1434 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1437 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1435 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1438 phba->hba_flag &= ~FCF_TS_INPROG; 1436 phba->hba_flag &= ~FCF_TS_INPROG;
1439 if (phba->pport->port_state != LPFC_FLOGI) { 1437 if (phba->pport->port_state != LPFC_FLOGI &&
1438 phba->pport->fc_flag & FC_FABRIC) {
1440 phba->hba_flag |= FCF_RR_INPROG; 1439 phba->hba_flag |= FCF_RR_INPROG;
1441 spin_unlock_irq(&phba->hbalock); 1440 spin_unlock_irq(&phba->hbalock);
1442 lpfc_initial_flogi(phba->pport); 1441 lpfc_initial_flogi(phba->pport);
@@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2270 spin_unlock_irq(&phba->hbalock); 2269 spin_unlock_irq(&phba->hbalock);
2271 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2270 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2272 "2836 New FCF matches in-use " 2271 "2836 New FCF matches in-use "
2273 "FCF (x%x)\n", 2272 "FCF (x%x), port_state:x%x, "
2274 phba->fcf.current_rec.fcf_indx); 2273 "fc_flag:x%x\n",
2274 phba->fcf.current_rec.fcf_indx,
2275 phba->pport->port_state,
2276 phba->pport->fc_flag);
2275 goto out; 2277 goto out;
2276 } else 2278 } else
2277 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2279 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@@ -2796,7 +2798,19 @@ void
2796lpfc_issue_init_vpi(struct lpfc_vport *vport) 2798lpfc_issue_init_vpi(struct lpfc_vport *vport)
2797{ 2799{
2798 LPFC_MBOXQ_t *mboxq; 2800 LPFC_MBOXQ_t *mboxq;
2799 int rc; 2801 int rc, vpi;
2802
2803 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2804 vpi = lpfc_alloc_vpi(vport->phba);
2805 if (!vpi) {
2806 lpfc_printf_vlog(vport, KERN_ERR,
2807 LOG_MBOX,
2808 "3303 Failed to obtain vport vpi\n");
2809 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2810 return;
2811 }
2812 vport->vpi = vpi;
2813 }
2800 2814
2801 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); 2815 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2802 if (!mboxq) { 2816 if (!mboxq) {
@@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2894 goto out_free_mem; 2908 goto out_free_mem;
2895 } 2909 }
2896 2910
2897 /* If the VFI is already registered, there is nothing else to do */ 2911 /* If the VFI is already registered, there is nothing else to do
2912 * Unless this was a VFI update and we are in PT2PT mode, then
2913 * we should drop through to set the port state to ready.
2914 */
2898 if (vport->fc_flag & FC_VFI_REGISTERED) 2915 if (vport->fc_flag & FC_VFI_REGISTERED)
2899 goto out_free_mem; 2916 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
2917 vport->fc_flag & FC_PT2PT))
2918 goto out_free_mem;
2900 2919
2901 /* The VPI is implicitly registered when the VFI is registered */ 2920 /* The VPI is implicitly registered when the VFI is registered */
2902 spin_lock_irq(shost->host_lock); 2921 spin_lock_irq(shost->host_lock);
@@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2913 goto out_free_mem; 2932 goto out_free_mem;
2914 } 2933 }
2915 2934
2935 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2936 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
2937 "alpacnt:%d LinkState:%x topology:%x\n",
2938 vport->port_state, vport->fc_flag, vport->fc_myDID,
2939 vport->phba->alpa_map[0],
2940 phba->link_state, phba->fc_topology);
2941
2916 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2942 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2917 /* 2943 /*
2918 * For private loop or for NPort pt2pt, 2944 * For private loop or for NPort pt2pt,
@@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2925 /* Use loop map to make discovery list */ 2951 /* Use loop map to make discovery list */
2926 lpfc_disc_list_loopmap(vport); 2952 lpfc_disc_list_loopmap(vport);
2927 /* Start discovery */ 2953 /* Start discovery */
2928 lpfc_disc_start(vport); 2954 if (vport->fc_flag & FC_PT2PT)
2955 vport->port_state = LPFC_VPORT_READY;
2956 else
2957 lpfc_disc_start(vport);
2929 } else { 2958 } else {
2930 lpfc_start_fdiscs(phba); 2959 lpfc_start_fdiscs(phba);
2931 lpfc_do_scr_ns_plogi(phba, vport); 2960 lpfc_do_scr_ns_plogi(phba, vport);
@@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3007 break; 3036 break;
3008 } 3037 }
3009 3038
3039 if (phba->fc_topology &&
3040 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3041 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3042 "3314 Toplogy changed was 0x%x is 0x%x\n",
3043 phba->fc_topology,
3044 bf_get(lpfc_mbx_read_top_topology, la));
3045 phba->fc_topology_changed = 1;
3046 }
3047
3010 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); 3048 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3011 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 3049 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3012 3050
@@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
4235 tmo, vport->port_state, vport->fc_flag); 4273 tmo, vport->port_state, vport->fc_flag);
4236 } 4274 }
4237 4275
4238 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); 4276 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4239 spin_lock_irq(shost->host_lock); 4277 spin_lock_irq(shost->host_lock);
4240 vport->fc_flag |= FC_DISC_TMO; 4278 vport->fc_flag |= FC_DISC_TMO;
4241 spin_unlock_irq(shost->host_lock); 4279 spin_unlock_irq(shost->host_lock);
@@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport)
4949 uint32_t clear_la_pending; 4987 uint32_t clear_la_pending;
4950 int did_changed; 4988 int did_changed;
4951 4989
4952 if (!lpfc_is_link_up(phba)) 4990 if (!lpfc_is_link_up(phba)) {
4991 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4992 "3315 Link is not up %x\n",
4993 phba->link_state);
4953 return; 4994 return;
4995 }
4954 4996
4955 if (phba->link_state == LPFC_CLEAR_LA) 4997 if (phba->link_state == LPFC_CLEAR_LA)
4956 clear_la_pending = 1; 4998 clear_la_pending = 1;
@@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport)
4983 if (num_sent) 5025 if (num_sent)
4984 return; 5026 return;
4985 5027
4986 /* Register the VPI for SLI3, NON-NPIV only. */ 5028 /* Register the VPI for SLI3, NPIV only. */
4987 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 5029 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4988 !(vport->fc_flag & FC_PT2PT) && 5030 !(vport->fc_flag & FC_PT2PT) &&
4989 !(vport->fc_flag & FC_RSCN_MODE) && 5031 !(vport->fc_flag & FC_RSCN_MODE) &&
4990 (phba->sli_rev < LPFC_SLI_REV4)) { 5032 (phba->sli_rev < LPFC_SLI_REV4)) {
5033 if (vport->port_type == LPFC_PHYSICAL_PORT)
5034 lpfc_issue_clear_la(phba, vport);
4991 lpfc_issue_reg_vpi(phba, vport); 5035 lpfc_issue_reg_vpi(phba, vport);
4992 return; 5036 return;
4993 } 5037 }
@@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5410 if (vport->cfg_fdmi_on == 1) 5454 if (vport->cfg_fdmi_on == 1)
5411 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 5455 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
5412 else 5456 else
5413 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 5457 mod_timer(&vport->fc_fdmitmo,
5458 jiffies + msecs_to_jiffies(1000 * 60));
5414 5459
5415 /* decrement the node reference count held for this callback 5460 /* decrement the node reference count held for this callback
5416 * function. 5461 * function.
@@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5855 struct lpfc_vport **vports; 5900 struct lpfc_vport **vports;
5856 struct lpfc_nodelist *ndlp; 5901 struct lpfc_nodelist *ndlp;
5857 struct Scsi_Host *shost; 5902 struct Scsi_Host *shost;
5858 int i, rc; 5903 int i = 0, rc;
5859 5904
5860 /* Unregister RPIs */ 5905 /* Unregister RPIs */
5861 if (lpfc_fcf_inuse(phba)) 5906 if (lpfc_fcf_inuse(phba))
@@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5883 spin_unlock_irq(shost->host_lock); 5928 spin_unlock_irq(shost->host_lock);
5884 } 5929 }
5885 lpfc_destroy_vport_work_array(phba, vports); 5930 lpfc_destroy_vport_work_array(phba, vports);
5931 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
5932 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
5933 if (ndlp)
5934 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
5935 lpfc_cleanup_pending_mbox(phba->pport);
5936 if (phba->sli_rev == LPFC_SLI_REV4)
5937 lpfc_sli4_unreg_all_rpis(phba->pport);
5938 lpfc_mbx_unreg_vpi(phba->pport);
5939 shost = lpfc_shost_from_vport(phba->pport);
5940 spin_lock_irq(shost->host_lock);
5941 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
5942 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
5943 spin_unlock_irq(shost->host_lock);
5944 }
5886 5945
5887 /* Cleanup any outstanding ELS commands */ 5946 /* Cleanup any outstanding ELS commands */
5888 lpfc_els_flush_all_cmd(phba); 5947 lpfc_els_flush_all_cmd(phba);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index e8c476031703..83700c18f468 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type {
1667#define BG_OP_IN_CSUM_OUT_CSUM 0x5 1667#define BG_OP_IN_CSUM_OUT_CSUM 0x5
1668#define BG_OP_IN_CRC_OUT_CSUM 0x6 1668#define BG_OP_IN_CRC_OUT_CSUM 0x6
1669#define BG_OP_IN_CSUM_OUT_CRC 0x7 1669#define BG_OP_IN_CSUM_OUT_CRC 0x7
1670#define BG_OP_RAW_MODE 0x8
1670 1671
1671struct lpfc_pde5 { 1672struct lpfc_pde5 {
1672 uint32_t word0; 1673 uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1dd2f6f0a127..713a4613ec3a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -200,6 +200,11 @@ struct lpfc_sli_intf {
200#define LPFC_MAX_IMAX 5000000 200#define LPFC_MAX_IMAX 5000000
201#define LPFC_DEF_IMAX 50000 201#define LPFC_DEF_IMAX 50000
202 202
203#define LPFC_MIN_CPU_MAP 0
204#define LPFC_MAX_CPU_MAP 2
205#define LPFC_HBA_CPU_MAP 1
206#define LPFC_DRIVER_CPU_MAP 2 /* Default */
207
203/* PORT_CAPABILITIES constants. */ 208/* PORT_CAPABILITIES constants. */
204#define LPFC_MAX_SUPPORTED_PAGES 8 209#define LPFC_MAX_SUPPORTED_PAGES 8
205 210
@@ -621,7 +626,7 @@ struct lpfc_register {
621#define lpfc_sliport_status_rdy_SHIFT 23 626#define lpfc_sliport_status_rdy_SHIFT 23
622#define lpfc_sliport_status_rdy_MASK 0x1 627#define lpfc_sliport_status_rdy_MASK 0x1
623#define lpfc_sliport_status_rdy_WORD word0 628#define lpfc_sliport_status_rdy_WORD word0
624#define MAX_IF_TYPE_2_RESETS 1000 629#define MAX_IF_TYPE_2_RESETS 6
625 630
626#define LPFC_CTL_PORT_CTL_OFFSET 0x408 631#define LPFC_CTL_PORT_CTL_OFFSET 0x408
627#define lpfc_sliport_ctrl_end_SHIFT 30 632#define lpfc_sliport_ctrl_end_SHIFT 30
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90b8b0515e23..cb465b253910 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/miscdevice.h> 35#include <linux/miscdevice.h>
36#include <linux/percpu.h>
36 37
37#include <scsi/scsi.h> 38#include <scsi/scsi.h>
38#include <scsi/scsi_device.h> 39#include <scsi/scsi_device.h>
@@ -58,6 +59,9 @@ char *_dump_buf_dif;
58unsigned long _dump_buf_dif_order; 59unsigned long _dump_buf_dif_order;
59spinlock_t _dump_buf_lock; 60spinlock_t _dump_buf_lock;
60 61
62/* Used when mapping IRQ vectors in a driver centric manner */
63uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
64
61static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 65static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
62static int lpfc_post_rcv_buf(struct lpfc_hba *); 66static int lpfc_post_rcv_buf(struct lpfc_hba *);
63static int lpfc_sli4_queue_verify(struct lpfc_hba *); 67static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
541 545
542 /* Set up ring-0 (ELS) timer */ 546 /* Set up ring-0 (ELS) timer */
543 timeout = phba->fc_ratov * 2; 547 timeout = phba->fc_ratov * 2;
544 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 548 mod_timer(&vport->els_tmofunc,
549 jiffies + msecs_to_jiffies(1000 * timeout));
545 /* Set up heart beat (HB) timer */ 550 /* Set up heart beat (HB) timer */
546 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 551 mod_timer(&phba->hb_tmofunc,
552 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
547 phba->hb_outstanding = 0; 553 phba->hb_outstanding = 0;
548 phba->last_completion_time = jiffies; 554 phba->last_completion_time = jiffies;
549 /* Set up error attention (ERATT) polling timer */ 555 /* Set up error attention (ERATT) polling timer */
550 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 556 mod_timer(&phba->eratt_poll,
557 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
551 558
552 if (phba->hba_flag & LINK_DISABLED) { 559 if (phba->hba_flag & LINK_DISABLED) {
553 lpfc_printf_log(phba, 560 lpfc_printf_log(phba,
@@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
908 psb->pCmd = NULL; 915 psb->pCmd = NULL;
909 psb->status = IOSTAT_SUCCESS; 916 psb->status = IOSTAT_SUCCESS;
910 } 917 }
911 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 918 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
912 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 919 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
913 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 920 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
914 return 0; 921 return 0;
915} 922}
916 923
@@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1021 !(phba->link_state == LPFC_HBA_ERROR) && 1028 !(phba->link_state == LPFC_HBA_ERROR) &&
1022 !(phba->pport->load_flag & FC_UNLOADING)) 1029 !(phba->pport->load_flag & FC_UNLOADING))
1023 mod_timer(&phba->hb_tmofunc, 1030 mod_timer(&phba->hb_tmofunc,
1024 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1031 jiffies +
1032 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1025 return; 1033 return;
1026} 1034}
1027 1035
@@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1064 1072
1065 spin_lock_irq(&phba->pport->work_port_lock); 1073 spin_lock_irq(&phba->pport->work_port_lock);
1066 1074
1067 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1075 if (time_after(phba->last_completion_time +
1068 jiffies)) { 1076 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1077 jiffies)) {
1069 spin_unlock_irq(&phba->pport->work_port_lock); 1078 spin_unlock_irq(&phba->pport->work_port_lock);
1070 if (!phba->hb_outstanding) 1079 if (!phba->hb_outstanding)
1071 mod_timer(&phba->hb_tmofunc, 1080 mod_timer(&phba->hb_tmofunc,
1072 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1081 jiffies +
1082 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1073 else 1083 else
1074 mod_timer(&phba->hb_tmofunc, 1084 mod_timer(&phba->hb_tmofunc,
1075 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1085 jiffies +
1086 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1076 return; 1087 return;
1077 } 1088 }
1078 spin_unlock_irq(&phba->pport->work_port_lock); 1089 spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1104 if (!pmboxq) { 1115 if (!pmboxq) {
1105 mod_timer(&phba->hb_tmofunc, 1116 mod_timer(&phba->hb_tmofunc,
1106 jiffies + 1117 jiffies +
1107 HZ * LPFC_HB_MBOX_INTERVAL); 1118 msecs_to_jiffies(1000 *
1119 LPFC_HB_MBOX_INTERVAL));
1108 return; 1120 return;
1109 } 1121 }
1110 1122
@@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1120 phba->mbox_mem_pool); 1132 phba->mbox_mem_pool);
1121 mod_timer(&phba->hb_tmofunc, 1133 mod_timer(&phba->hb_tmofunc,
1122 jiffies + 1134 jiffies +
1123 HZ * LPFC_HB_MBOX_INTERVAL); 1135 msecs_to_jiffies(1000 *
1136 LPFC_HB_MBOX_INTERVAL));
1124 return; 1137 return;
1125 } 1138 }
1126 phba->skipped_hb = 0; 1139 phba->skipped_hb = 0;
@@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1136 phba->skipped_hb = jiffies; 1149 phba->skipped_hb = jiffies;
1137 1150
1138 mod_timer(&phba->hb_tmofunc, 1151 mod_timer(&phba->hb_tmofunc,
1139 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1152 jiffies +
1153 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1140 return; 1154 return;
1141 } else { 1155 } else {
1142 /* 1156 /*
@@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1150 jiffies_to_msecs(jiffies 1164 jiffies_to_msecs(jiffies
1151 - phba->last_completion_time)); 1165 - phba->last_completion_time));
1152 mod_timer(&phba->hb_tmofunc, 1166 mod_timer(&phba->hb_tmofunc,
1153 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1167 jiffies +
1168 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1154 } 1169 }
1155 } 1170 }
1156} 1171}
@@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
1191 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1206 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1192 * other than Port Error 6 has been detected. 1207 * other than Port Error 6 has been detected.
1193 **/ 1208 **/
1194static void 1209void
1195lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1210lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1196{ 1211{
1197 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1212 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba)
2633 struct lpfc_vport *vport; 2648 struct lpfc_vport *vport;
2634 struct lpfc_vport **vports; 2649 struct lpfc_vport **vports;
2635 int i; 2650 int i;
2651 bool vpis_cleared = false;
2636 2652
2637 if (!phba) 2653 if (!phba)
2638 return 0; 2654 return 0;
@@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba)
2656 lpfc_unblock_mgmt_io(phba); 2672 lpfc_unblock_mgmt_io(phba);
2657 return 1; 2673 return 1;
2658 } 2674 }
2675 spin_lock_irq(&phba->hbalock);
2676 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2677 vpis_cleared = true;
2678 spin_unlock_irq(&phba->hbalock);
2659 } else { 2679 } else {
2660 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2680 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2661 lpfc_unblock_mgmt_io(phba); 2681 lpfc_unblock_mgmt_io(phba);
@@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba)
2672 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2692 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2673 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2693 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2674 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2694 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2675 if (phba->sli_rev == LPFC_SLI_REV4) 2695 if (phba->sli_rev == LPFC_SLI_REV4) {
2676 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2696 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2697 if ((vpis_cleared) &&
2698 (vports[i]->port_type !=
2699 LPFC_PHYSICAL_PORT))
2700 vports[i]->vpi = 0;
2701 }
2677 spin_unlock_irq(shost->host_lock); 2702 spin_unlock_irq(shost->host_lock);
2678 } 2703 }
2679 lpfc_destroy_vport_work_array(phba, vports); 2704 lpfc_destroy_vport_work_array(phba, vports);
@@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2833 struct lpfc_iocbq *io, *io_next; 2858 struct lpfc_iocbq *io, *io_next;
2834 2859
2835 spin_lock_irq(&phba->hbalock); 2860 spin_lock_irq(&phba->hbalock);
2861
2836 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2862 /* Release all the lpfc_scsi_bufs maintained by this host. */
2837 spin_lock(&phba->scsi_buf_list_lock); 2863
2838 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2864 spin_lock(&phba->scsi_buf_list_put_lock);
2865 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
2866 list) {
2839 list_del(&sb->list); 2867 list_del(&sb->list);
2840 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2868 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2841 sb->dma_handle); 2869 sb->dma_handle);
2842 kfree(sb); 2870 kfree(sb);
2843 phba->total_scsi_bufs--; 2871 phba->total_scsi_bufs--;
2844 } 2872 }
2845 spin_unlock(&phba->scsi_buf_list_lock); 2873 spin_unlock(&phba->scsi_buf_list_put_lock);
2874
2875 spin_lock(&phba->scsi_buf_list_get_lock);
2876 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
2877 list) {
2878 list_del(&sb->list);
2879 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2880 sb->dma_handle);
2881 kfree(sb);
2882 phba->total_scsi_bufs--;
2883 }
2884 spin_unlock(&phba->scsi_buf_list_get_lock);
2846 2885
2847 /* Release all the lpfc_iocbq entries maintained by this host. */ 2886 /* Release all the lpfc_iocbq entries maintained by this host. */
2848 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2887 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2978 phba->sli4_hba.scsi_xri_cnt, 3017 phba->sli4_hba.scsi_xri_cnt,
2979 phba->sli4_hba.scsi_xri_max); 3018 phba->sli4_hba.scsi_xri_max);
2980 3019
2981 spin_lock_irq(&phba->scsi_buf_list_lock); 3020 spin_lock_irq(&phba->scsi_buf_list_get_lock);
2982 list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); 3021 spin_lock_irq(&phba->scsi_buf_list_put_lock);
2983 spin_unlock_irq(&phba->scsi_buf_list_lock); 3022 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3023 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3024 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
3025 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
2984 3026
2985 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3027 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
2986 /* max scsi xri shrinked below the allocated scsi buffers */ 3028 /* max scsi xri shrinked below the allocated scsi buffers */
@@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2994 psb->dma_handle); 3036 psb->dma_handle);
2995 kfree(psb); 3037 kfree(psb);
2996 } 3038 }
2997 spin_lock_irq(&phba->scsi_buf_list_lock); 3039 spin_lock_irq(&phba->scsi_buf_list_get_lock);
2998 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3040 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
2999 spin_unlock_irq(&phba->scsi_buf_list_lock); 3041 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3000 } 3042 }
3001 3043
3002 /* update xris associated to remaining allocated scsi buffers */ 3044 /* update xris associated to remaining allocated scsi buffers */
@@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3014 psb->cur_iocbq.sli4_lxritag = lxri; 3056 psb->cur_iocbq.sli4_lxritag = lxri;
3015 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3057 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3016 } 3058 }
3017 spin_lock_irq(&phba->scsi_buf_list_lock); 3059 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3018 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); 3060 spin_lock_irq(&phba->scsi_buf_list_put_lock);
3019 spin_unlock_irq(&phba->scsi_buf_list_lock); 3061 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3062 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3063 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
3064 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3020 3065
3021 return 0; 3066 return 0;
3022 3067
@@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3197 stat = 1; 3242 stat = 1;
3198 goto finished; 3243 goto finished;
3199 } 3244 }
3200 if (time >= 30 * HZ) { 3245 if (time >= msecs_to_jiffies(30 * 1000)) {
3201 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3202 "0461 Scanning longer than 30 " 3247 "0461 Scanning longer than 30 "
3203 "seconds. Continuing initialization\n"); 3248 "seconds. Continuing initialization\n");
3204 stat = 1; 3249 stat = 1;
3205 goto finished; 3250 goto finished;
3206 } 3251 }
3207 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 3252 if (time >= msecs_to_jiffies(15 * 1000) &&
3253 phba->link_state <= LPFC_LINK_DOWN) {
3208 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3254 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3209 "0465 Link down longer than 15 " 3255 "0465 Link down longer than 15 "
3210 "seconds. Continuing initialization\n"); 3256 "seconds. Continuing initialization\n");
@@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3216 goto finished; 3262 goto finished;
3217 if (vport->num_disc_nodes || vport->fc_prli_sent) 3263 if (vport->num_disc_nodes || vport->fc_prli_sent)
3218 goto finished; 3264 goto finished;
3219 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 3265 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3220 goto finished; 3266 goto finished;
3221 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3267 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3222 goto finished; 3268 goto finished;
@@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4215 * If there are other active VLinks present, 4261 * If there are other active VLinks present,
4216 * re-instantiate the Vlink using FDISC. 4262 * re-instantiate the Vlink using FDISC.
4217 */ 4263 */
4218 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 4264 mod_timer(&ndlp->nlp_delayfunc,
4265 jiffies + msecs_to_jiffies(1000));
4219 shost = lpfc_shost_from_vport(vport); 4266 shost = lpfc_shost_from_vport(vport);
4220 spin_lock_irq(shost->host_lock); 4267 spin_lock_irq(shost->host_lock);
4221 ndlp->nlp_flag |= NLP_DELAY_TMO; 4268 ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4707 return -ENOMEM; 4754 return -ENOMEM;
4708 4755
4709 /* 4756 /*
4710 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4757 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4711 * used to create the sg_dma_buf_pool must be dynamically calculated. 4758 * used to create the sg_dma_buf_pool must be dynamically calculated.
4712 * 2 segments are added since the IOCB needs a command and response bde.
4713 */ 4759 */
4714 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4715 sizeof(struct fcp_rsp) +
4716 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4717 4760
4761 /* Initialize the host templates the configured values. */
4762 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4763 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4764
4765 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
4718 if (phba->cfg_enable_bg) { 4766 if (phba->cfg_enable_bg) {
4719 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4767 /*
4720 phba->cfg_sg_dma_buf_size += 4768 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4721 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4769 * the FCP rsp, and a BDE for each. Sice we have no control
4770 * over how many protection data segments the SCSI Layer
4771 * will hand us (ie: there could be one for every block
4772 * in the IO), we just allocate enough BDEs to accomidate
4773 * our max amount and we need to limit lpfc_sg_seg_cnt to
4774 * minimize the risk of running out.
4775 */
4776 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4777 sizeof(struct fcp_rsp) +
4778 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
4779
4780 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
4781 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
4782
4783 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
4784 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4785 } else {
4786 /*
4787 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4788 * the FCP rsp, a BDE for each, and a BDE for up to
4789 * cfg_sg_seg_cnt data segments.
4790 */
4791 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4792 sizeof(struct fcp_rsp) +
4793 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4794
4795 /* Total BDEs in BPL for scsi_sg_list */
4796 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4722 } 4797 }
4723 4798
4724 /* Also reinitialize the host templates with new values. */ 4799 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4725 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4800 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
4726 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4801 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4802 phba->cfg_total_seg_cnt);
4727 4803
4728 phba->max_vpi = LPFC_MAX_VPI; 4804 phba->max_vpi = LPFC_MAX_VPI;
4729 /* This will be set to correct value after config_port mbox */ 4805 /* This will be set to correct value after config_port mbox */
@@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4789static int 4865static int
4790lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4866lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4791{ 4867{
4868 struct lpfc_vector_map_info *cpup;
4792 struct lpfc_sli *psli; 4869 struct lpfc_sli *psli;
4793 LPFC_MBOXQ_t *mboxq; 4870 LPFC_MBOXQ_t *mboxq;
4794 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4871 int rc, i, hbq_count, max_buf_size;
4795 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4872 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4796 struct lpfc_mqe *mqe; 4873 struct lpfc_mqe *mqe;
4797 int longs, sli_family; 4874 int longs;
4798 int sges_per_segment;
4799 4875
4800 /* Before proceed, wait for POST done and device ready */ 4876 /* Before proceed, wait for POST done and device ready */
4801 rc = lpfc_sli4_post_status_check(phba); 4877 rc = lpfc_sli4_post_status_check(phba);
@@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4863 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4939 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4864 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4940 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4865 4941
4866 /* With BlockGuard we can have multiple SGEs per Data Segemnt */
4867 sges_per_segment = 1;
4868 if (phba->cfg_enable_bg)
4869 sges_per_segment = 2;
4870
4871 /* 4942 /*
4872 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 4943 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4873 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 4944 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4878 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4949 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4879 if (!phba->sli.ring) 4950 if (!phba->sli.ring)
4880 return -ENOMEM; 4951 return -ENOMEM;
4952
4881 /* 4953 /*
4882 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4954 * It doesn't matter what family our adapter is in, we are
4955 * limited to 2 Pages, 512 SGEs, for our SGL.
4956 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
4957 */
4958 max_buf_size = (2 * SLI4_PAGE_SIZE);
4959 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
4960 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
4961
4962 /*
4963 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4883 * used to create the sg_dma_buf_pool must be dynamically calculated. 4964 * used to create the sg_dma_buf_pool must be dynamically calculated.
4884 * 2 segments are added since the IOCB needs a command and response bde.
4885 * To insure that the scsi sgl does not cross a 4k page boundary only
4886 * sgl sizes of must be a power of 2.
4887 */ 4965 */
4888 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4966
4889 (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * 4967 if (phba->cfg_enable_bg) {
4890 sizeof(struct sli4_sge))); 4968 /*
4891 4969 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4892 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4970 * the FCP rsp, and a SGE for each. Sice we have no control
4893 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4971 * over how many protection data segments the SCSI Layer
4894 switch (sli_family) { 4972 * will hand us (ie: there could be one for every block
4895 case LPFC_SLI_INTF_FAMILY_BE2: 4973 * in the IO), we just allocate enough SGEs to accomidate
4896 case LPFC_SLI_INTF_FAMILY_BE3: 4974 * our max amount and we need to limit lpfc_sg_seg_cnt to
4897 /* There is a single hint for BE - 2 pages per BPL. */ 4975 * minimize the risk of running out.
4898 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4976 */
4899 LPFC_SLI_INTF_SLI_HINT1_1) 4977 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4900 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4978 sizeof(struct fcp_rsp) + max_buf_size;
4901 break; 4979
4902 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4980 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
4903 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4981 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
4904 default: 4982
4905 break; 4983 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
4984 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
4985 } else {
4986 /*
4987 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4988 * the FCP rsp, a SGE for each, and a SGE for up to
4989 * cfg_sg_seg_cnt data segments.
4990 */
4991 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4992 sizeof(struct fcp_rsp) +
4993 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
4994
4995 /* Total SGEs for scsi_sg_list */
4996 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4997 /*
4998 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
4999 * to post 1 page for the SGL.
5000 */
4906 } 5001 }
4907 5002
4908 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 5003 /* Initialize the host templates with the updated values. */
4909 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 5004 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4910 dma_buf_size = dma_buf_size << 1) 5005 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4911 ; 5006
4912 if (dma_buf_size == max_buf_size) 5007 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
4913 phba->cfg_sg_seg_cnt = (dma_buf_size - 5008 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
4914 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 5009 else
4915 (2 * sizeof(struct sli4_sge))) / 5010 phba->cfg_sg_dma_buf_size =
4916 sizeof(struct sli4_sge); 5011 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
4917 phba->cfg_sg_dma_buf_size = dma_buf_size; 5012
5013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5014 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5015 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5016 phba->cfg_total_seg_cnt);
4918 5017
4919 /* Initialize buffer queue management fields */ 5018 /* Initialize buffer queue management fields */
4920 hbq_count = lpfc_sli_hbq_count(); 5019 hbq_count = lpfc_sli_hbq_count();
@@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5104 goto out_free_fcp_eq_hdl; 5203 goto out_free_fcp_eq_hdl;
5105 } 5204 }
5106 5205
5206 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5207 phba->sli4_hba.num_present_cpu),
5208 GFP_KERNEL);
5209 if (!phba->sli4_hba.cpu_map) {
5210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5211 "3327 Failed allocate memory for msi-x "
5212 "interrupt vector mapping\n");
5213 rc = -ENOMEM;
5214 goto out_free_msix;
5215 }
5216 /* Initialize io channels for round robin */
5217 cpup = phba->sli4_hba.cpu_map;
5218 rc = 0;
5219 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5220 cpup->channel_id = rc;
5221 rc++;
5222 if (rc >= phba->cfg_fcp_io_channel)
5223 rc = 0;
5224 }
5225
5107 /* 5226 /*
5108 * Enable sr-iov virtual functions if supported and configured 5227 * Enable sr-iov virtual functions if supported and configured
5109 * through the module parameter. 5228 * through the module parameter.
@@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5123 5242
5124 return 0; 5243 return 0;
5125 5244
5245out_free_msix:
5246 kfree(phba->sli4_hba.msix_entries);
5126out_free_fcp_eq_hdl: 5247out_free_fcp_eq_hdl:
5127 kfree(phba->sli4_hba.fcp_eq_hdl); 5248 kfree(phba->sli4_hba.fcp_eq_hdl);
5128out_free_fcf_rr_bmask: 5249out_free_fcf_rr_bmask:
@@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5152{ 5273{
5153 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5274 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5154 5275
5276 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5277 kfree(phba->sli4_hba.cpu_map);
5278 phba->sli4_hba.num_present_cpu = 0;
5279 phba->sli4_hba.num_online_cpu = 0;
5280
5155 /* Free memory allocated for msi-x interrupt vector entries */ 5281 /* Free memory allocated for msi-x interrupt vector entries */
5156 kfree(phba->sli4_hba.msix_entries); 5282 kfree(phba->sli4_hba.msix_entries);
5157 5283
@@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5260 init_waitqueue_head(&phba->work_waitq); 5386 init_waitqueue_head(&phba->work_waitq);
5261 5387
5262 /* Initialize the scsi buffer list used by driver for scsi IO */ 5388 /* Initialize the scsi buffer list used by driver for scsi IO */
5263 spin_lock_init(&phba->scsi_buf_list_lock); 5389 spin_lock_init(&phba->scsi_buf_list_get_lock);
5264 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 5390 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5391 spin_lock_init(&phba->scsi_buf_list_put_lock);
5392 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5265 5393
5266 /* Initialize the fabric iocb list */ 5394 /* Initialize the fabric iocb list */
5267 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5395 INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6696 int cfg_fcp_io_channel; 6824 int cfg_fcp_io_channel;
6697 uint32_t cpu; 6825 uint32_t cpu;
6698 uint32_t i = 0; 6826 uint32_t i = 0;
6827 uint32_t j = 0;
6699 6828
6700 6829
6701 /* 6830 /*
@@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6706 /* Sanity check on HBA EQ parameters */ 6835 /* Sanity check on HBA EQ parameters */
6707 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 6836 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6708 6837
6709 /* It doesn't make sense to have more io channels then CPUs */ 6838 /* It doesn't make sense to have more io channels then online CPUs */
6710 for_each_online_cpu(cpu) { 6839 for_each_present_cpu(cpu) {
6711 i++; 6840 if (cpu_online(cpu))
6841 i++;
6842 j++;
6712 } 6843 }
6844 phba->sli4_hba.num_online_cpu = i;
6845 phba->sli4_hba.num_present_cpu = j;
6846
6713 if (i < cfg_fcp_io_channel) { 6847 if (i < cfg_fcp_io_channel) {
6714 lpfc_printf_log(phba, 6848 lpfc_printf_log(phba,
6715 KERN_ERR, LOG_INIT, 6849 KERN_ERR, LOG_INIT,
6716 "3188 Reducing IO channels to match number of " 6850 "3188 Reducing IO channels to match number of "
6717 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); 6851 "online CPUs: from %d to %d\n",
6852 cfg_fcp_io_channel, i);
6718 cfg_fcp_io_channel = i; 6853 cfg_fcp_io_channel = i;
6719 } 6854 }
6720 6855
@@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7743 7878
7744out: 7879out:
7745 /* Catch the not-ready port failure after a port reset. */ 7880 /* Catch the not-ready port failure after a port reset. */
7746 if (num_resets >= MAX_IF_TYPE_2_RESETS) 7881 if (num_resets >= MAX_IF_TYPE_2_RESETS) {
7882 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7883 "3317 HBA not functional: IP Reset Failed "
7884 "after (%d) retries, try: "
7885 "echo fw_reset > board_mode\n", num_resets);
7747 rc = -ENODEV; 7886 rc = -ENODEV;
7887 }
7748 7888
7749 return rc; 7889 return rc;
7750} 7890}
@@ -8209,6 +8349,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
8209} 8349}
8210 8350
8211/** 8351/**
8352 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8353 * @phba: pointer to lpfc hba data structure.
8354 *
8355 * Find next available CPU to use for IRQ to CPU affinity.
8356 */
8357static int
8358lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
8359{
8360 struct lpfc_vector_map_info *cpup;
8361 int cpu;
8362
8363 cpup = phba->sli4_hba.cpu_map;
8364 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8365 /* CPU must be online */
8366 if (cpu_online(cpu)) {
8367 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8368 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
8369 (cpup->phys_id == phys_id)) {
8370 return cpu;
8371 }
8372 }
8373 cpup++;
8374 }
8375
8376 /*
8377 * If we get here, we have used ALL CPUs for the specific
8378 * phys_id. Now we need to clear out lpfc_used_cpu and start
8379 * reusing CPUs.
8380 */
8381
8382 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8383 if (lpfc_used_cpu[cpu] == phys_id)
8384 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
8385 }
8386
8387 cpup = phba->sli4_hba.cpu_map;
8388 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8389 /* CPU must be online */
8390 if (cpu_online(cpu)) {
8391 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8392 (cpup->phys_id == phys_id)) {
8393 return cpu;
8394 }
8395 }
8396 cpup++;
8397 }
8398 return LPFC_VECTOR_MAP_EMPTY;
8399}
8400
8401/**
8402 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8403 * @phba: pointer to lpfc hba data structure.
8404 * @vectors: number of HBA vectors
8405 *
8406 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8407 * affinization across multple physical CPUs (numa nodes).
8408 * In addition, this routine will assign an IO channel for each CPU
8409 * to use when issuing I/Os.
8410 */
8411static int
8412lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8413{
8414 int i, idx, saved_chann, used_chann, cpu, phys_id;
8415 int max_phys_id, num_io_channel, first_cpu;
8416 struct lpfc_vector_map_info *cpup;
8417#ifdef CONFIG_X86
8418 struct cpuinfo_x86 *cpuinfo;
8419#endif
8420 struct cpumask *mask;
8421 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
8422
8423 /* If there is no mapping, just return */
8424 if (!phba->cfg_fcp_cpu_map)
8425 return 1;
8426
8427 /* Init cpu_map array */
8428 memset(phba->sli4_hba.cpu_map, 0xff,
8429 (sizeof(struct lpfc_vector_map_info) *
8430 phba->sli4_hba.num_present_cpu));
8431
8432 max_phys_id = 0;
8433 phys_id = 0;
8434 num_io_channel = 0;
8435 first_cpu = LPFC_VECTOR_MAP_EMPTY;
8436
8437 /* Update CPU map with physical id and core id of each CPU */
8438 cpup = phba->sli4_hba.cpu_map;
8439 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8440#ifdef CONFIG_X86
8441 cpuinfo = &cpu_data(cpu);
8442 cpup->phys_id = cpuinfo->phys_proc_id;
8443 cpup->core_id = cpuinfo->cpu_core_id;
8444#else
8445 /* No distinction between CPUs for other platforms */
8446 cpup->phys_id = 0;
8447 cpup->core_id = 0;
8448#endif
8449
8450 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8451 "3328 CPU physid %d coreid %d\n",
8452 cpup->phys_id, cpup->core_id);
8453
8454 if (cpup->phys_id > max_phys_id)
8455 max_phys_id = cpup->phys_id;
8456 cpup++;
8457 }
8458
8459 /* Now associate the HBA vectors with specific CPUs */
8460 for (idx = 0; idx < vectors; idx++) {
8461 cpup = phba->sli4_hba.cpu_map;
8462 cpu = lpfc_find_next_cpu(phba, phys_id);
8463 if (cpu == LPFC_VECTOR_MAP_EMPTY) {
8464
8465 /* Try for all phys_id's */
8466 for (i = 1; i < max_phys_id; i++) {
8467 phys_id++;
8468 if (phys_id > max_phys_id)
8469 phys_id = 0;
8470 cpu = lpfc_find_next_cpu(phba, phys_id);
8471 if (cpu == LPFC_VECTOR_MAP_EMPTY)
8472 continue;
8473 goto found;
8474 }
8475
8476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8477 "3329 Cannot set affinity:"
8478 "Error mapping vector %d (%d)\n",
8479 idx, vectors);
8480 return 0;
8481 }
8482found:
8483 cpup += cpu;
8484 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
8485 lpfc_used_cpu[cpu] = phys_id;
8486
8487 /* Associate vector with selected CPU */
8488 cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
8489
8490 /* Associate IO channel with selected CPU */
8491 cpup->channel_id = idx;
8492 num_io_channel++;
8493
8494 if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
8495 first_cpu = cpu;
8496
8497 /* Now affinitize to the selected CPU */
8498 mask = &cpup->maskbits;
8499 cpumask_clear(mask);
8500 cpumask_set_cpu(cpu, mask);
8501 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
8502 vector, mask);
8503
8504 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8505 "3330 Set Affinity: CPU %d channel %d "
8506 "irq %d (%x)\n",
8507 cpu, cpup->channel_id,
8508 phba->sli4_hba.msix_entries[idx].vector, i);
8509
8510 /* Spread vector mapping across multple physical CPU nodes */
8511 phys_id++;
8512 if (phys_id > max_phys_id)
8513 phys_id = 0;
8514 }
8515
8516 /*
8517 * Finally fill in the IO channel for any remaining CPUs.
8518 * At this point, all IO channels have been assigned to a specific
8519 * MSIx vector, mapped to a specific CPU.
8520 * Base the remaining IO channel assigned, to IO channels already
8521 * assigned to other CPUs on the same phys_id.
8522 */
8523 for (i = 0; i <= max_phys_id; i++) {
8524 /*
8525 * If there are no io channels already mapped to
8526 * this phys_id, just round robin thru the io_channels.
8527 * Setup chann[] for round robin.
8528 */
8529 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8530 chann[idx] = idx;
8531
8532 saved_chann = 0;
8533 used_chann = 0;
8534
8535 /*
8536 * First build a list of IO channels already assigned
8537 * to this phys_id before reassigning the same IO
8538 * channels to the remaining CPUs.
8539 */
8540 cpup = phba->sli4_hba.cpu_map;
8541 cpu = first_cpu;
8542 cpup += cpu;
8543 for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
8544 idx++) {
8545 if (cpup->phys_id == i) {
8546 /*
8547 * Save any IO channels that are
8548 * already mapped to this phys_id.
8549 */
8550 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
8551 chann[saved_chann] =
8552 cpup->channel_id;
8553 saved_chann++;
8554 goto out;
8555 }
8556
8557 /* See if we are using round-robin */
8558 if (saved_chann == 0)
8559 saved_chann =
8560 phba->cfg_fcp_io_channel;
8561
8562 /* Associate next IO channel with CPU */
8563 cpup->channel_id = chann[used_chann];
8564 num_io_channel++;
8565 used_chann++;
8566 if (used_chann == saved_chann)
8567 used_chann = 0;
8568
8569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8570 "3331 Set IO_CHANN "
8571 "CPU %d channel %d\n",
8572 idx, cpup->channel_id);
8573 }
8574out:
8575 cpu++;
8576 if (cpu >= phba->sli4_hba.num_present_cpu) {
8577 cpup = phba->sli4_hba.cpu_map;
8578 cpu = 0;
8579 } else {
8580 cpup++;
8581 }
8582 }
8583 }
8584
8585 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
8586 cpup = phba->sli4_hba.cpu_map;
8587 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
8588 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
8589 cpup->channel_id = 0;
8590 num_io_channel++;
8591
8592 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8593 "3332 Assign IO_CHANN "
8594 "CPU %d channel %d\n",
8595 idx, cpup->channel_id);
8596 }
8597 cpup++;
8598 }
8599 }
8600
8601 /* Sanity check */
8602 if (num_io_channel != phba->sli4_hba.num_present_cpu)
8603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8604 "3333 Set affinity mismatch:"
8605 "%d chann != %d cpus: %d vactors\n",
8606 num_io_channel, phba->sli4_hba.num_present_cpu,
8607 vectors);
8608
8609 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
8610 return 1;
8611}
8612
8613
8614/**
8212 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 8615 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8213 * @phba: pointer to lpfc hba data structure. 8616 * @phba: pointer to lpfc hba data structure.
8214 * 8617 *
@@ -8259,9 +8662,7 @@ enable_msix_vectors:
8259 phba->sli4_hba.msix_entries[index].vector, 8662 phba->sli4_hba.msix_entries[index].vector,
8260 phba->sli4_hba.msix_entries[index].entry); 8663 phba->sli4_hba.msix_entries[index].entry);
8261 8664
8262 /* 8665 /* Assign MSI-X vectors to interrupt handlers */
8263 * Assign MSI-X vectors to interrupt handlers
8264 */
8265 for (index = 0; index < vectors; index++) { 8666 for (index = 0; index < vectors; index++) {
8266 memset(&phba->sli4_hba.handler_name[index], 0, 16); 8667 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8267 sprintf((char *)&phba->sli4_hba.handler_name[index], 8668 sprintf((char *)&phba->sli4_hba.handler_name[index],
@@ -8289,6 +8690,8 @@ enable_msix_vectors:
8289 phba->cfg_fcp_io_channel, vectors); 8690 phba->cfg_fcp_io_channel, vectors);
8290 phba->cfg_fcp_io_channel = vectors; 8691 phba->cfg_fcp_io_channel = vectors;
8291 } 8692 }
8693
8694 lpfc_sli4_set_affinity(phba, vectors);
8292 return rc; 8695 return rc;
8293 8696
8294cfg_fail_out: 8697cfg_fail_out:
@@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9213 /* Block all SCSI devices' I/Os on the host */ 9616 /* Block all SCSI devices' I/Os on the host */
9214 lpfc_scsi_dev_block(phba); 9617 lpfc_scsi_dev_block(phba);
9215 9618
9619 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9620 lpfc_sli_flush_fcp_rings(phba);
9621
9216 /* stop all timers */ 9622 /* stop all timers */
9217 lpfc_stop_hba_timers(phba); 9623 lpfc_stop_hba_timers(phba);
9218 9624
9219 /* Disable interrupt and pci device */ 9625 /* Disable interrupt and pci device */
9220 lpfc_sli_disable_intr(phba); 9626 lpfc_sli_disable_intr(phba);
9221 pci_disable_device(phba->pcidev); 9627 pci_disable_device(phba->pcidev);
9222
9223 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9224 lpfc_sli_flush_fcp_rings(phba);
9225} 9628}
9226 9629
9227/** 9630/**
@@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9966 /* Block all SCSI devices' I/Os on the host */ 10369 /* Block all SCSI devices' I/Os on the host */
9967 lpfc_scsi_dev_block(phba); 10370 lpfc_scsi_dev_block(phba);
9968 10371
10372 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10373 lpfc_sli_flush_fcp_rings(phba);
10374
9969 /* stop all timers */ 10375 /* stop all timers */
9970 lpfc_stop_hba_timers(phba); 10376 lpfc_stop_hba_timers(phba);
9971 10377
@@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9973 lpfc_sli4_disable_intr(phba); 10379 lpfc_sli4_disable_intr(phba);
9974 lpfc_sli4_queue_destroy(phba); 10380 lpfc_sli4_queue_destroy(phba);
9975 pci_disable_device(phba->pcidev); 10381 pci_disable_device(phba->pcidev);
9976
9977 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9978 lpfc_sli_flush_fcp_rings(phba);
9979} 10382}
9980 10383
9981/** 10384/**
@@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = {
10535static int __init 10938static int __init
10536lpfc_init(void) 10939lpfc_init(void)
10537{ 10940{
10941 int cpu;
10538 int error = 0; 10942 int error = 0;
10539 10943
10540 printk(LPFC_MODULE_DESC "\n"); 10944 printk(LPFC_MODULE_DESC "\n");
@@ -10561,6 +10965,11 @@ lpfc_init(void)
10561 return -ENOMEM; 10965 return -ENOMEM;
10562 } 10966 }
10563 } 10967 }
10968
10969 /* Initialize in case vector mapping is needed */
10970 for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
10971 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
10972
10564 error = pci_register_driver(&lpfc_driver); 10973 error = pci_register_driver(&lpfc_driver);
10565 if (error) { 10974 if (error) {
10566 fc_release_transport(lpfc_transport_template); 10975 fc_release_transport(lpfc_transport_template);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index baf53e6c2bd1..2a4e5d21eab2 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -37,6 +37,7 @@
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */ 38#define LOG_FIP 0x00020000 /* FIP events */
39#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ 39#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
40#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */
40#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 41#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
41 42
42#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 43#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a7a9fa468308..41363db7d426 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2149 2149
2150 /* Only FC supports upd bit */ 2150 /* Only FC supports upd bit */
2151 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && 2151 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2152 (vport->fc_flag & FC_VFI_REGISTERED)) { 2152 (vport->fc_flag & FC_VFI_REGISTERED) &&
2153 (!phba->fc_topology_changed)) {
2153 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); 2154 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
2154 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); 2155 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2155 } 2156 }
2156 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, 2157 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2157 "3134 Register VFI, mydid:x%x, fcfi:%d, " 2158 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2158 " vfi:%d, vpi:%d, fc_pname:%x%x\n", 2159 " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
2160 " port_state:x%x topology chg:%d\n",
2159 vport->fc_myDID, 2161 vport->fc_myDID,
2160 phba->fcf.fcfi, 2162 phba->fcf.fcfi,
2161 phba->sli4_hba.vfi_ids[vport->vfi], 2163 phba->sli4_hba.vfi_ids[vport->vfi],
2162 phba->vpi_ids[vport->vpi], 2164 phba->vpi_ids[vport->vpi],
2163 reg_vfi->wwn[0], reg_vfi->wwn[1]); 2165 reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
2166 vport->port_state, phba->fc_topology_changed);
2164} 2167}
2165 2168
2166/** 2169/**
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cd86069a0ba8..812d0cd7c86d 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
65 int i; 65 int i;
66 66
67 if (phba->sli_rev == LPFC_SLI_REV4) 67 if (phba->sli_rev == LPFC_SLI_REV4) {
68 /* Calculate alignment */
69 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
70 i = phba->cfg_sg_dma_buf_size;
71 else
72 i = SLI4_PAGE_SIZE;
73
68 phba->lpfc_scsi_dma_buf_pool = 74 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool", 75 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev, 76 phba->pcidev,
71 phba->cfg_sg_dma_buf_size, 77 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size, 78 i,
73 0); 79 0);
74 else 80 } else {
75 phba->lpfc_scsi_dma_buf_pool = 81 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool", 82 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size, 83 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0); 84 align, 0);
85 }
86
79 if (!phba->lpfc_scsi_dma_buf_pool) 87 if (!phba->lpfc_scsi_dma_buf_pool)
80 goto fail; 88 goto fail;
81 89
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 82f4d3542289..31e9b92f5a9b 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
332 332
333 /* PLOGI chkparm OK */ 333 /* PLOGI chkparm OK */
334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
335 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 335 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
336 "x%x x%x x%x\n",
336 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 337 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
337 ndlp->nlp_rpi); 338 ndlp->nlp_rpi, vport->port_state,
339 vport->fc_flag);
338 340
339 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) 341 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
340 ndlp->nlp_fcp_info |= CLASS2; 342 ndlp->nlp_fcp_info |= CLASS2;
@@ -574,7 +576,7 @@ out:
574 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 576 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
575 577
576 /* 1 sec timeout */ 578 /* 1 sec timeout */
577 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 579 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
578 580
579 spin_lock_irq(shost->host_lock); 581 spin_lock_irq(shost->host_lock);
580 ndlp->nlp_flag |= NLP_DELAY_TMO; 582 ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
631 * If there are other active VLinks present, 633 * If there are other active VLinks present,
632 * re-instantiate the Vlink using FDISC. 634 * re-instantiate the Vlink using FDISC.
633 */ 635 */
634 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 636 mod_timer(&ndlp->nlp_delayfunc,
637 jiffies + msecs_to_jiffies(1000));
635 spin_lock_irq(shost->host_lock); 638 spin_lock_irq(shost->host_lock);
636 ndlp->nlp_flag |= NLP_DELAY_TMO; 639 ndlp->nlp_flag |= NLP_DELAY_TMO;
637 spin_unlock_irq(shost->host_lock); 640 spin_unlock_irq(shost->host_lock);
@@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
648 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 651 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
649 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 652 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
650 /* Only try to re-login if this is NOT a Fabric Node */ 653 /* Only try to re-login if this is NOT a Fabric Node */
651 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 654 mod_timer(&ndlp->nlp_delayfunc,
655 jiffies + msecs_to_jiffies(1000 * 1));
652 spin_lock_irq(shost->host_lock); 656 spin_lock_irq(shost->host_lock);
653 ndlp->nlp_flag |= NLP_DELAY_TMO; 657 ndlp->nlp_flag |= NLP_DELAY_TMO;
654 spin_unlock_irq(shost->host_lock); 658 spin_unlock_irq(shost->host_lock);
@@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
969 } 973 }
970 974
971 /* Put ndlp in npr state set plogi timer for 1 sec */ 975 /* Put ndlp in npr state set plogi timer for 1 sec */
972 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 976 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
973 spin_lock_irq(shost->host_lock); 977 spin_lock_irq(shost->host_lock);
974 ndlp->nlp_flag |= NLP_DELAY_TMO; 978 ndlp->nlp_flag |= NLP_DELAY_TMO;
975 spin_unlock_irq(shost->host_lock); 979 spin_unlock_irq(shost->host_lock);
@@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1303 if ((irsp->ulpStatus) || 1307 if ((irsp->ulpStatus) ||
1304 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 1308 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1305 /* 1 sec timeout */ 1309 /* 1 sec timeout */
1306 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1310 mod_timer(&ndlp->nlp_delayfunc,
1311 jiffies + msecs_to_jiffies(1000));
1307 spin_lock_irq(shost->host_lock); 1312 spin_lock_irq(shost->host_lock);
1308 ndlp->nlp_flag |= NLP_DELAY_TMO; 1313 ndlp->nlp_flag |= NLP_DELAY_TMO;
1309 spin_unlock_irq(shost->host_lock); 1314 spin_unlock_irq(shost->host_lock);
@@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1509 } 1514 }
1510 1515
1511 /* Put ndlp in npr state set plogi timer for 1 sec */ 1516 /* Put ndlp in npr state set plogi timer for 1 sec */
1512 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1517 mod_timer(&ndlp->nlp_delayfunc,
1518 jiffies + msecs_to_jiffies(1000 * 1));
1513 spin_lock_irq(shost->host_lock); 1519 spin_lock_irq(shost->host_lock);
1514 ndlp->nlp_flag |= NLP_DELAY_TMO; 1520 ndlp->nlp_flag |= NLP_DELAY_TMO;
1515 spin_unlock_irq(shost->host_lock); 1521 spin_unlock_irq(shost->host_lock);
@@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2145 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 2151 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2146 2152
2147 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { 2153 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2148 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 2154 mod_timer(&ndlp->nlp_delayfunc,
2155 jiffies + msecs_to_jiffies(1000 * 1));
2149 spin_lock_irq(shost->host_lock); 2156 spin_lock_irq(shost->host_lock);
2150 ndlp->nlp_flag |= NLP_DELAY_TMO; 2157 ndlp->nlp_flag |= NLP_DELAY_TMO;
2151 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2158 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 74b8710e1e90..8523b278ec9d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -24,6 +24,8 @@
24#include <linux/export.h> 24#include <linux/export.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27#include <linux/crc-t10dif.h>
28#include <net/checksum.h>
27 29
28#include <scsi/scsi.h> 30#include <scsi/scsi.h>
29#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
@@ -48,7 +50,7 @@
48#define LPFC_RESET_WAIT 2 50#define LPFC_RESET_WAIT 2
49#define LPFC_ABORT_WAIT 2 51#define LPFC_ABORT_WAIT 2
50 52
51int _dump_buf_done; 53int _dump_buf_done = 1;
52 54
53static char *dif_op_str[] = { 55static char *dif_op_str[] = {
54 "PROT_NORMAL", 56 "PROT_NORMAL",
@@ -66,6 +68,10 @@ struct scsi_dif_tuple {
66 __be32 ref_tag; /* Target LBA or indirect LBA */ 68 __be32 ref_tag; /* Target LBA or indirect LBA */
67}; 69};
68 70
71#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
72#define scsi_prot_flagged(sc, flg) sc
73#endif
74
69static void 75static void
70lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 76lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
71static void 77static void
@@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
534 dma_addr_t pdma_phys_fcp_rsp; 540 dma_addr_t pdma_phys_fcp_rsp;
535 dma_addr_t pdma_phys_bpl; 541 dma_addr_t pdma_phys_bpl;
536 uint16_t iotag; 542 uint16_t iotag;
537 int bcnt; 543 int bcnt, bpl_size;
544
545 bpl_size = phba->cfg_sg_dma_buf_size -
546 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
547
548 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
549 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
550 num_to_alloc, phba->cfg_sg_dma_buf_size,
551 (int)sizeof(struct fcp_cmnd),
552 (int)sizeof(struct fcp_rsp), bpl_size);
538 553
539 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 554 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
540 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 555 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
759 struct list_head *post_sblist, int sb_count) 774 struct list_head *post_sblist, int sb_count)
760{ 775{
761 struct lpfc_scsi_buf *psb, *psb_next; 776 struct lpfc_scsi_buf *psb, *psb_next;
762 int status; 777 int status, sgl_size;
763 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 778 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
764 dma_addr_t pdma_phys_bpl1; 779 dma_addr_t pdma_phys_bpl1;
765 int last_xritag = NO_XRI; 780 int last_xritag = NO_XRI;
@@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
771 if (sb_count <= 0) 786 if (sb_count <= 0)
772 return -EINVAL; 787 return -EINVAL;
773 788
789 sgl_size = phba->cfg_sg_dma_buf_size -
790 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
791
774 list_for_each_entry_safe(psb, psb_next, post_sblist, list) { 792 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
775 list_del_init(&psb->list); 793 list_del_init(&psb->list);
776 block_cnt++; 794 block_cnt++;
@@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
803 post_cnt = block_cnt; 821 post_cnt = block_cnt;
804 } else if (block_cnt == 1) { 822 } else if (block_cnt == 1) {
805 /* last single sgl with non-contiguous xri */ 823 /* last single sgl with non-contiguous xri */
806 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 824 if (sgl_size > SGL_PAGE_SIZE)
807 pdma_phys_bpl1 = psb->dma_phys_bpl + 825 pdma_phys_bpl1 = psb->dma_phys_bpl +
808 SGL_PAGE_SIZE; 826 SGL_PAGE_SIZE;
809 else 827 else
@@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
885 int num_posted, rc = 0; 903 int num_posted, rc = 0;
886 904
887 /* get all SCSI buffers need to repost to a local list */ 905 /* get all SCSI buffers need to repost to a local list */
888 spin_lock_irq(&phba->scsi_buf_list_lock); 906 spin_lock_irq(&phba->scsi_buf_list_get_lock);
889 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); 907 spin_lock_irq(&phba->scsi_buf_list_put_lock);
890 spin_unlock_irq(&phba->scsi_buf_list_lock); 908 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
909 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
910 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
911 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
891 912
892 /* post the list of scsi buffer sgls to port if available */ 913 /* post the list of scsi buffer sgls to port if available */
893 if (!list_empty(&post_sblist)) { 914 if (!list_empty(&post_sblist)) {
@@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
923 IOCB_t *iocb; 944 IOCB_t *iocb;
924 dma_addr_t pdma_phys_fcp_cmd; 945 dma_addr_t pdma_phys_fcp_cmd;
925 dma_addr_t pdma_phys_fcp_rsp; 946 dma_addr_t pdma_phys_fcp_rsp;
926 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 947 dma_addr_t pdma_phys_bpl;
927 uint16_t iotag, lxri = 0; 948 uint16_t iotag, lxri = 0;
928 int bcnt, num_posted; 949 int bcnt, num_posted, sgl_size;
929 LIST_HEAD(prep_sblist); 950 LIST_HEAD(prep_sblist);
930 LIST_HEAD(post_sblist); 951 LIST_HEAD(post_sblist);
931 LIST_HEAD(scsi_sblist); 952 LIST_HEAD(scsi_sblist);
932 953
954 sgl_size = phba->cfg_sg_dma_buf_size -
955 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
956
957 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
958 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
959 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
960 (int)sizeof(struct fcp_cmnd),
961 (int)sizeof(struct fcp_rsp));
962
933 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 963 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
934 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 964 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
935 if (!psb) 965 if (!psb)
@@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
948 } 978 }
949 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 979 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
950 980
981 /* Page alignment is CRITICAL, double check to be sure */
982 if (((unsigned long)(psb->data) &
983 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
984 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
985 psb->data, psb->dma_handle);
986 kfree(psb);
987 break;
988 }
989
951 /* Allocate iotag for psb->cur_iocbq. */ 990 /* Allocate iotag for psb->cur_iocbq. */
952 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 991 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
953 if (iotag == 0) { 992 if (iotag == 0) {
@@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
968 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 1007 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
969 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 1008 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
970 psb->fcp_bpl = psb->data; 1009 psb->fcp_bpl = psb->data;
971 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) 1010 psb->fcp_cmnd = (psb->data + sgl_size);
972 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
973 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + 1011 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
974 sizeof(struct fcp_cmnd)); 1012 sizeof(struct fcp_cmnd));
975 1013
976 /* Initialize local short-hand pointers. */ 1014 /* Initialize local short-hand pointers. */
977 sgl = (struct sli4_sge *)psb->fcp_bpl; 1015 sgl = (struct sli4_sge *)psb->fcp_bpl;
978 pdma_phys_bpl = psb->dma_handle; 1016 pdma_phys_bpl = psb->dma_handle;
979 pdma_phys_fcp_cmd = 1017 pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
980 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
981 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
982 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 1018 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
983 1019
984 /* 1020 /*
@@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
1020 iocb->ulpLe = 1; 1056 iocb->ulpLe = 1;
1021 iocb->ulpClass = CLASS3; 1057 iocb->ulpClass = CLASS3;
1022 psb->cur_iocbq.context1 = psb; 1058 psb->cur_iocbq.context1 = psb;
1023 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1024 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
1025 else
1026 pdma_phys_bpl1 = 0;
1027 psb->dma_phys_bpl = pdma_phys_bpl; 1059 psb->dma_phys_bpl = pdma_phys_bpl;
1028 1060
1029 /* add the scsi buffer to a post list */ 1061 /* add the scsi buffer to a post list */
1030 list_add_tail(&psb->list, &post_sblist); 1062 list_add_tail(&psb->list, &post_sblist);
1031 spin_lock_irq(&phba->scsi_buf_list_lock); 1063 spin_lock_irq(&phba->scsi_buf_list_get_lock);
1032 phba->sli4_hba.scsi_xri_cnt++; 1064 phba->sli4_hba.scsi_xri_cnt++;
1033 spin_unlock_irq(&phba->scsi_buf_list_lock); 1065 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
1034 } 1066 }
1035 lpfc_printf_log(phba, KERN_INFO, LOG_BG, 1067 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1036 "3021 Allocate %d out of %d requested new SCSI " 1068 "3021 Allocate %d out of %d requested new SCSI "
@@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf*
1079lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1111lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1080{ 1112{
1081 struct lpfc_scsi_buf * lpfc_cmd = NULL; 1113 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1082 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 1114 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1083 unsigned long iflag = 0; 1115 unsigned long gflag = 0;
1084 1116 unsigned long pflag = 0;
1085 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1117
1086 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 1118 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1087 if (lpfc_cmd) { 1119 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1088 lpfc_cmd->seg_cnt = 0; 1120 list);
1089 lpfc_cmd->nonsg_phys = 0; 1121 if (!lpfc_cmd) {
1090 lpfc_cmd->prot_seg_cnt = 0; 1122 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1123 list_splice(&phba->lpfc_scsi_buf_list_put,
1124 &phba->lpfc_scsi_buf_list_get);
1125 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1126 list_remove_head(scsi_buf_list_get, lpfc_cmd,
1127 struct lpfc_scsi_buf, list);
1128 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1091 } 1129 }
1092 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1130 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1093 return lpfc_cmd; 1131 return lpfc_cmd;
1094} 1132}
1095/** 1133/**
@@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf*
1107lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1145lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1108{ 1146{
1109 struct lpfc_scsi_buf *lpfc_cmd ; 1147 struct lpfc_scsi_buf *lpfc_cmd ;
1110 unsigned long iflag = 0; 1148 unsigned long gflag = 0;
1149 unsigned long pflag = 0;
1111 int found = 0; 1150 int found = 0;
1112 1151
1113 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1152 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1114 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, 1153 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
1115 list) {
1116 if (lpfc_test_rrq_active(phba, ndlp, 1154 if (lpfc_test_rrq_active(phba, ndlp,
1117 lpfc_cmd->cur_iocbq.sli4_lxritag)) 1155 lpfc_cmd->cur_iocbq.sli4_lxritag))
1118 continue; 1156 continue;
1119 list_del(&lpfc_cmd->list); 1157 list_del(&lpfc_cmd->list);
1120 found = 1; 1158 found = 1;
1121 lpfc_cmd->seg_cnt = 0;
1122 lpfc_cmd->nonsg_phys = 0;
1123 lpfc_cmd->prot_seg_cnt = 0;
1124 break; 1159 break;
1125 } 1160 }
1126 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, 1161 if (!found) {
1127 iflag); 1162 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1163 list_splice(&phba->lpfc_scsi_buf_list_put,
1164 &phba->lpfc_scsi_buf_list_get);
1165 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1166 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1167 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
1168 list) {
1169 if (lpfc_test_rrq_active(
1170 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1171 continue;
1172 list_del(&lpfc_cmd->list);
1173 found = 1;
1174 break;
1175 }
1176 }
1177 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1128 if (!found) 1178 if (!found)
1129 return NULL; 1179 return NULL;
1130 else 1180 return lpfc_cmd;
1131 return lpfc_cmd;
1132} 1181}
1133/** 1182/**
1134 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 1183 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1160{ 1209{
1161 unsigned long iflag = 0; 1210 unsigned long iflag = 0;
1162 1211
1163 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1212 psb->seg_cnt = 0;
1213 psb->nonsg_phys = 0;
1214 psb->prot_seg_cnt = 0;
1215
1216 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1164 psb->pCmd = NULL; 1217 psb->pCmd = NULL;
1165 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 1218 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1166 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1219 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1220 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1167} 1221}
1168 1222
1169/** 1223/**
@@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1181{ 1235{
1182 unsigned long iflag = 0; 1236 unsigned long iflag = 0;
1183 1237
1238 psb->seg_cnt = 0;
1239 psb->nonsg_phys = 0;
1240 psb->prot_seg_cnt = 0;
1241
1184 if (psb->exch_busy) { 1242 if (psb->exch_busy) {
1185 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 1243 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1186 iflag); 1244 iflag);
@@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1190 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 1248 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1191 iflag); 1249 iflag);
1192 } else { 1250 } else {
1193
1194 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1195 psb->pCmd = NULL; 1251 psb->pCmd = NULL;
1196 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 1252 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1197 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1253 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1254 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1255 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1198 } 1256 }
1199} 1257}
1200 1258
@@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1268 "dma_map_sg. Config %d, seg_cnt %d\n", 1326 "dma_map_sg. Config %d, seg_cnt %d\n",
1269 __func__, phba->cfg_sg_seg_cnt, 1327 __func__, phba->cfg_sg_seg_cnt,
1270 lpfc_cmd->seg_cnt); 1328 lpfc_cmd->seg_cnt);
1329 lpfc_cmd->seg_cnt = 0;
1271 scsi_dma_unmap(scsi_cmnd); 1330 scsi_dma_unmap(scsi_cmnd);
1272 return 1; 1331 return 1;
1273 } 1332 }
@@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2013 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 2072 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2014 bf_set(pde6_optx, pde6, txop); 2073 bf_set(pde6_optx, pde6, txop);
2015 bf_set(pde6_oprx, pde6, rxop); 2074 bf_set(pde6_oprx, pde6, rxop);
2075
2076 /*
2077 * We only need to check the data on READs, for WRITEs
2078 * protection data is automatically generated, not checked.
2079 */
2016 if (datadir == DMA_FROM_DEVICE) { 2080 if (datadir == DMA_FROM_DEVICE) {
2017 bf_set(pde6_ce, pde6, checking); 2081 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2018 bf_set(pde6_re, pde6, checking); 2082 bf_set(pde6_ce, pde6, checking);
2083 else
2084 bf_set(pde6_ce, pde6, 0);
2085
2086 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2087 bf_set(pde6_re, pde6, checking);
2088 else
2089 bf_set(pde6_re, pde6, 0);
2019 } 2090 }
2020 bf_set(pde6_ai, pde6, 1); 2091 bf_set(pde6_ai, pde6, 1);
2021 bf_set(pde6_ae, pde6, 0); 2092 bf_set(pde6_ae, pde6, 0);
@@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2145 2216
2146 split_offset = 0; 2217 split_offset = 0;
2147 do { 2218 do {
2219 /* Check to see if we ran out of space */
2220 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2221 return num_bde + 3;
2222
2148 /* setup PDE5 with what we have */ 2223 /* setup PDE5 with what we have */
2149 pde5 = (struct lpfc_pde5 *) bpl; 2224 pde5 = (struct lpfc_pde5 *) bpl;
2150 memset(pde5, 0, sizeof(struct lpfc_pde5)); 2225 memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2164 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 2239 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2165 bf_set(pde6_optx, pde6, txop); 2240 bf_set(pde6_optx, pde6, txop);
2166 bf_set(pde6_oprx, pde6, rxop); 2241 bf_set(pde6_oprx, pde6, rxop);
2167 bf_set(pde6_ce, pde6, checking); 2242
2168 bf_set(pde6_re, pde6, checking); 2243 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2244 bf_set(pde6_ce, pde6, checking);
2245 else
2246 bf_set(pde6_ce, pde6, 0);
2247
2248 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2249 bf_set(pde6_re, pde6, checking);
2250 else
2251 bf_set(pde6_re, pde6, 0);
2252
2169 bf_set(pde6_ai, pde6, 1); 2253 bf_set(pde6_ai, pde6, 1);
2170 bf_set(pde6_ae, pde6, 0); 2254 bf_set(pde6_ae, pde6, 0);
2171 bf_set(pde6_apptagval, pde6, 0); 2255 bf_set(pde6_apptagval, pde6, 0);
@@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2213 pgdone = 0; 2297 pgdone = 0;
2214 subtotal = 0; /* total bytes processed for current prot grp */ 2298 subtotal = 0; /* total bytes processed for current prot grp */
2215 while (!pgdone) { 2299 while (!pgdone) {
2300 /* Check to see if we ran out of space */
2301 if (num_bde >= phba->cfg_total_seg_cnt)
2302 return num_bde + 1;
2303
2216 if (!sgde) { 2304 if (!sgde) {
2217 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2305 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2218 "9065 BLKGRD:%s Invalid data segment\n", 2306 "9065 BLKGRD:%s Invalid data segment\n",
@@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2324 struct sli4_sge_diseed *diseed = NULL; 2412 struct sli4_sge_diseed *diseed = NULL;
2325 dma_addr_t physaddr; 2413 dma_addr_t physaddr;
2326 int i = 0, num_sge = 0, status; 2414 int i = 0, num_sge = 0, status;
2327 int datadir = sc->sc_data_direction;
2328 uint32_t reftag; 2415 uint32_t reftag;
2329 unsigned blksize; 2416 unsigned blksize;
2330 uint8_t txop, rxop; 2417 uint8_t txop, rxop;
@@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2362 diseed->ref_tag = cpu_to_le32(reftag); 2449 diseed->ref_tag = cpu_to_le32(reftag);
2363 diseed->ref_tag_tran = diseed->ref_tag; 2450 diseed->ref_tag_tran = diseed->ref_tag;
2364 2451
2452 /*
2453 * We only need to check the data on READs, for WRITEs
2454 * protection data is automatically generated, not checked.
2455 */
2456 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2457 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2458 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2459 else
2460 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2461
2462 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2463 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2464 else
2465 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2466 }
2467
2365 /* setup DISEED with the rest of the info */ 2468 /* setup DISEED with the rest of the info */
2366 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2469 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2367 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2470 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2368 if (datadir == DMA_FROM_DEVICE) { 2471
2369 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2370 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2371 }
2372 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2472 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2373 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2473 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2374 2474
@@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2497 2597
2498 split_offset = 0; 2598 split_offset = 0;
2499 do { 2599 do {
2600 /* Check to see if we ran out of space */
2601 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2602 return num_sge + 3;
2603
2500 /* setup DISEED with what we have */ 2604 /* setup DISEED with what we have */
2501 diseed = (struct sli4_sge_diseed *) sgl; 2605 diseed = (struct sli4_sge_diseed *) sgl;
2502 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2606 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2506 diseed->ref_tag = cpu_to_le32(reftag); 2610 diseed->ref_tag = cpu_to_le32(reftag);
2507 diseed->ref_tag_tran = diseed->ref_tag; 2611 diseed->ref_tag_tran = diseed->ref_tag;
2508 2612
2613 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
2614 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2615
2616 } else {
2617 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2618 /*
2619 * When in this mode, the hardware will replace
2620 * the guard tag from the host with a
2621 * newly generated good CRC for the wire.
2622 * Switch to raw mode here to avoid this
2623 * behavior. What the host sends gets put on the wire.
2624 */
2625 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2626 txop = BG_OP_RAW_MODE;
2627 rxop = BG_OP_RAW_MODE;
2628 }
2629 }
2630
2631
2632 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2633 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2634 else
2635 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2636
2509 /* setup DISEED with the rest of the info */ 2637 /* setup DISEED with the rest of the info */
2510 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2638 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2511 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2639 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2512 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2640
2513 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2514 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2641 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2515 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2642 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2516 2643
@@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2556 pgdone = 0; 2683 pgdone = 0;
2557 subtotal = 0; /* total bytes processed for current prot grp */ 2684 subtotal = 0; /* total bytes processed for current prot grp */
2558 while (!pgdone) { 2685 while (!pgdone) {
2686 /* Check to see if we ran out of space */
2687 if (num_sge >= phba->cfg_total_seg_cnt)
2688 return num_sge + 1;
2689
2559 if (!sgde) { 2690 if (!sgde) {
2560 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2691 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2561 "9086 BLKGRD:%s Invalid data segment\n", 2692 "9086 BLKGRD:%s Invalid data segment\n",
@@ -2670,6 +2801,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2670} 2801}
2671 2802
2672/** 2803/**
2804 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2805 * @phba: The Hba for which this call is being executed.
2806 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2807 *
2808 * Adjust the data length to account for how much data
2809 * is actually on the wire.
2810 *
2811 * returns the adjusted data length
2812 **/
2813static int
2814lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2815 struct lpfc_scsi_buf *lpfc_cmd)
2816{
2817 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2818 int fcpdl;
2819
2820 fcpdl = scsi_bufflen(sc);
2821
2822 /* Check if there is protection data on the wire */
2823 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2824 /* Read */
2825 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2826 return fcpdl;
2827
2828 } else {
2829 /* Write */
2830 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2831 return fcpdl;
2832 }
2833
2834 /*
2835 * If we are in DIF Type 1 mode every data block has a 8 byte
2836 * DIF (trailer) attached to it. Must ajust FCP data length.
2837 */
2838 if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
2839 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2840
2841 return fcpdl;
2842}
2843
2844/**
2673 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2845 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2674 * @phba: The Hba for which this call is being executed. 2846 * @phba: The Hba for which this call is being executed.
2675 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2847 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
@@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2689 uint32_t num_bde = 0; 2861 uint32_t num_bde = 0;
2690 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2862 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2691 int prot_group_type = 0; 2863 int prot_group_type = 0;
2692 int diflen, fcpdl; 2864 int fcpdl;
2693 unsigned blksize;
2694 2865
2695 /* 2866 /*
2696 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2867 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2711 return 1; 2882 return 1;
2712 2883
2713 lpfc_cmd->seg_cnt = datasegcnt; 2884 lpfc_cmd->seg_cnt = datasegcnt;
2714 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2885
2715 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2886 /* First check if data segment count from SCSI Layer is good */
2716 "9067 BLKGRD: %s: Too many sg segments" 2887 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2717 " from dma_map_sg. Config %d, seg_cnt" 2888 goto err;
2718 " %d\n",
2719 __func__, phba->cfg_sg_seg_cnt,
2720 lpfc_cmd->seg_cnt);
2721 scsi_dma_unmap(scsi_cmnd);
2722 return 1;
2723 }
2724 2889
2725 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2890 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2726 2891
2727 switch (prot_group_type) { 2892 switch (prot_group_type) {
2728 case LPFC_PG_TYPE_NO_DIF: 2893 case LPFC_PG_TYPE_NO_DIF:
2894
2895 /* Here we need to add a PDE5 and PDE6 to the count */
2896 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2897 goto err;
2898
2729 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2899 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2730 datasegcnt); 2900 datasegcnt);
2731 /* we should have 2 or more entries in buffer list */ 2901 /* we should have 2 or more entries in buffer list */
2732 if (num_bde < 2) 2902 if (num_bde < 2)
2733 goto err; 2903 goto err;
2734 break; 2904 break;
2735 case LPFC_PG_TYPE_DIF_BUF:{ 2905
2906 case LPFC_PG_TYPE_DIF_BUF:
2736 /* 2907 /*
2737 * This type indicates that protection buffers are 2908 * This type indicates that protection buffers are
2738 * passed to the driver, so that needs to be prepared 2909 * passed to the driver, so that needs to be prepared
@@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2747 } 2918 }
2748 2919
2749 lpfc_cmd->prot_seg_cnt = protsegcnt; 2920 lpfc_cmd->prot_seg_cnt = protsegcnt;
2750 if (lpfc_cmd->prot_seg_cnt 2921
2751 > phba->cfg_prot_sg_seg_cnt) { 2922 /*
2752 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2923 * There is a minimun of 4 BPLs used for every
2753 "9068 BLKGRD: %s: Too many prot sg " 2924 * protection data segment.
2754 "segments from dma_map_sg. Config %d," 2925 */
2755 "prot_seg_cnt %d\n", __func__, 2926 if ((lpfc_cmd->prot_seg_cnt * 4) >
2756 phba->cfg_prot_sg_seg_cnt, 2927 (phba->cfg_total_seg_cnt - 2))
2757 lpfc_cmd->prot_seg_cnt); 2928 goto err;
2758 dma_unmap_sg(&phba->pcidev->dev,
2759 scsi_prot_sglist(scsi_cmnd),
2760 scsi_prot_sg_count(scsi_cmnd),
2761 datadir);
2762 scsi_dma_unmap(scsi_cmnd);
2763 return 1;
2764 }
2765 2929
2766 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2930 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2767 datasegcnt, protsegcnt); 2931 datasegcnt, protsegcnt);
2768 /* we should have 3 or more entries in buffer list */ 2932 /* we should have 3 or more entries in buffer list */
2769 if (num_bde < 3) 2933 if ((num_bde < 3) ||
2934 (num_bde > phba->cfg_total_seg_cnt))
2770 goto err; 2935 goto err;
2771 break; 2936 break;
2772 } 2937
2773 case LPFC_PG_TYPE_INVALID: 2938 case LPFC_PG_TYPE_INVALID:
2774 default: 2939 default:
2940 scsi_dma_unmap(scsi_cmnd);
2941 lpfc_cmd->seg_cnt = 0;
2942
2775 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2943 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2776 "9022 Unexpected protection group %i\n", 2944 "9022 Unexpected protection group %i\n",
2777 prot_group_type); 2945 prot_group_type);
@@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2790 iocb_cmd->ulpBdeCount = 1; 2958 iocb_cmd->ulpBdeCount = 1;
2791 iocb_cmd->ulpLe = 1; 2959 iocb_cmd->ulpLe = 1;
2792 2960
2793 fcpdl = scsi_bufflen(scsi_cmnd); 2961 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2794
2795 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2796 /*
2797 * We are in DIF Type 1 mode
2798 * Every data block has a 8 byte DIF (trailer)
2799 * attached to it. Must ajust FCP data length
2800 */
2801 blksize = lpfc_cmd_blksize(scsi_cmnd);
2802 diflen = (fcpdl / blksize) * 8;
2803 fcpdl += diflen;
2804 }
2805 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2962 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2806 2963
2807 /* 2964 /*
@@ -2812,14 +2969,234 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2812 2969
2813 return 0; 2970 return 0;
2814err: 2971err:
2972 if (lpfc_cmd->seg_cnt)
2973 scsi_dma_unmap(scsi_cmnd);
2974 if (lpfc_cmd->prot_seg_cnt)
2975 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2976 scsi_prot_sg_count(scsi_cmnd),
2977 scsi_cmnd->sc_data_direction);
2978
2815 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2979 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2816 "9023 Could not setup all needed BDE's" 2980 "9023 Cannot setup S/G List for HBA"
2817 "prot_group_type=%d, num_bde=%d\n", 2981 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2982 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2983 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2818 prot_group_type, num_bde); 2984 prot_group_type, num_bde);
2985
2986 lpfc_cmd->seg_cnt = 0;
2987 lpfc_cmd->prot_seg_cnt = 0;
2819 return 1; 2988 return 1;
2820} 2989}
2821 2990
2822/* 2991/*
2992 * This function calcuates the T10 DIF guard tag
2993 * on the specified data using a CRC algorithmn
2994 * using crc_t10dif.
2995 */
2996uint16_t
2997lpfc_bg_crc(uint8_t *data, int count)
2998{
2999 uint16_t crc = 0;
3000 uint16_t x;
3001
3002 crc = crc_t10dif(data, count);
3003 x = cpu_to_be16(crc);
3004 return x;
3005}
3006
3007/*
3008 * This function calcuates the T10 DIF guard tag
3009 * on the specified data using a CSUM algorithmn
3010 * using ip_compute_csum.
3011 */
3012uint16_t
3013lpfc_bg_csum(uint8_t *data, int count)
3014{
3015 uint16_t ret;
3016
3017 ret = ip_compute_csum(data, count);
3018 return ret;
3019}
3020
3021/*
3022 * This function examines the protection data to try to determine
3023 * what type of T10-DIF error occurred.
3024 */
3025void
3026lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3027{
3028 struct scatterlist *sgpe; /* s/g prot entry */
3029 struct scatterlist *sgde; /* s/g data entry */
3030 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3031 struct scsi_dif_tuple *src = NULL;
3032 uint8_t *data_src = NULL;
3033 uint16_t guard_tag, guard_type;
3034 uint16_t start_app_tag, app_tag;
3035 uint32_t start_ref_tag, ref_tag;
3036 int prot, protsegcnt;
3037 int err_type, len, data_len;
3038 int chk_ref, chk_app, chk_guard;
3039 uint16_t sum;
3040 unsigned blksize;
3041
3042 err_type = BGS_GUARD_ERR_MASK;
3043 sum = 0;
3044 guard_tag = 0;
3045
3046 /* First check to see if there is protection data to examine */
3047 prot = scsi_get_prot_op(cmd);
3048 if ((prot == SCSI_PROT_READ_STRIP) ||
3049 (prot == SCSI_PROT_WRITE_INSERT) ||
3050 (prot == SCSI_PROT_NORMAL))
3051 goto out;
3052
3053 /* Currently the driver just supports ref_tag and guard_tag checking */
3054 chk_ref = 1;
3055 chk_app = 0;
3056 chk_guard = 0;
3057
3058 /* Setup a ptr to the protection data provided by the SCSI host */
3059 sgpe = scsi_prot_sglist(cmd);
3060 protsegcnt = lpfc_cmd->prot_seg_cnt;
3061
3062 if (sgpe && protsegcnt) {
3063
3064 /*
3065 * We will only try to verify guard tag if the segment
3066 * data length is a multiple of the blksize.
3067 */
3068 sgde = scsi_sglist(cmd);
3069 blksize = lpfc_cmd_blksize(cmd);
3070 data_src = (uint8_t *)sg_virt(sgde);
3071 data_len = sgde->length;
3072 if ((data_len & (blksize - 1)) == 0)
3073 chk_guard = 1;
3074 guard_type = scsi_host_get_guard(cmd->device->host);
3075
3076 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
3077 start_app_tag = src->app_tag;
3078 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3079 len = sgpe->length;
3080 while (src && protsegcnt) {
3081 while (len) {
3082
3083 /*
3084 * First check to see if a protection data
3085 * check is valid
3086 */
3087 if ((src->ref_tag == 0xffffffff) ||
3088 (src->app_tag == 0xffff)) {
3089 start_ref_tag++;
3090 goto skipit;
3091 }
3092
3093 /* App Tag checking */
3094 app_tag = src->app_tag;
3095 if (chk_app && (app_tag != start_app_tag)) {
3096 err_type = BGS_APPTAG_ERR_MASK;
3097 goto out;
3098 }
3099
3100 /* Reference Tag checking */
3101 ref_tag = be32_to_cpu(src->ref_tag);
3102 if (chk_ref && (ref_tag != start_ref_tag)) {
3103 err_type = BGS_REFTAG_ERR_MASK;
3104 goto out;
3105 }
3106 start_ref_tag++;
3107
3108 /* Guard Tag checking */
3109 if (chk_guard) {
3110 guard_tag = src->guard_tag;
3111 if (guard_type == SHOST_DIX_GUARD_IP)
3112 sum = lpfc_bg_csum(data_src,
3113 blksize);
3114 else
3115 sum = lpfc_bg_crc(data_src,
3116 blksize);
3117 if ((guard_tag != sum)) {
3118 err_type = BGS_GUARD_ERR_MASK;
3119 goto out;
3120 }
3121 }
3122skipit:
3123 len -= sizeof(struct scsi_dif_tuple);
3124 if (len < 0)
3125 len = 0;
3126 src++;
3127
3128 data_src += blksize;
3129 data_len -= blksize;
3130
3131 /*
3132 * Are we at the end of the Data segment?
3133 * The data segment is only used for Guard
3134 * tag checking.
3135 */
3136 if (chk_guard && (data_len == 0)) {
3137 chk_guard = 0;
3138 sgde = sg_next(sgde);
3139 if (!sgde)
3140 goto out;
3141
3142 data_src = (uint8_t *)sg_virt(sgde);
3143 data_len = sgde->length;
3144 if ((data_len & (blksize - 1)) == 0)
3145 chk_guard = 1;
3146 }
3147 }
3148
3149 /* Goto the next Protection data segment */
3150 sgpe = sg_next(sgpe);
3151 if (sgpe) {
3152 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3153 len = sgpe->length;
3154 } else {
3155 src = NULL;
3156 }
3157 protsegcnt--;
3158 }
3159 }
3160out:
3161 if (err_type == BGS_GUARD_ERR_MASK) {
3162 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3163 0x10, 0x1);
3164 cmd->result = DRIVER_SENSE << 24
3165 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3166 phba->bg_guard_err_cnt++;
3167 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3168 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3169 (unsigned long)scsi_get_lba(cmd),
3170 sum, guard_tag);
3171
3172 } else if (err_type == BGS_REFTAG_ERR_MASK) {
3173 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3174 0x10, 0x3);
3175 cmd->result = DRIVER_SENSE << 24
3176 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3177
3178 phba->bg_reftag_err_cnt++;
3179 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3180 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3181 (unsigned long)scsi_get_lba(cmd),
3182 ref_tag, start_ref_tag);
3183
3184 } else if (err_type == BGS_APPTAG_ERR_MASK) {
3185 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3186 0x10, 0x2);
3187 cmd->result = DRIVER_SENSE << 24
3188 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3189
3190 phba->bg_apptag_err_cnt++;
3191 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3192 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3193 (unsigned long)scsi_get_lba(cmd),
3194 app_tag, start_app_tag);
3195 }
3196}
3197
3198
3199/*
2823 * This function checks for BlockGuard errors detected by 3200 * This function checks for BlockGuard errors detected by
2824 * the HBA. In case of errors, the ASC/ASCQ fields in the 3201 * the HBA. In case of errors, the ASC/ASCQ fields in the
2825 * sense buffer will be set accordingly, paired with 3202 * sense buffer will be set accordingly, paired with
@@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2842 uint32_t bgstat = bgf->bgstat; 3219 uint32_t bgstat = bgf->bgstat;
2843 uint64_t failing_sector = 0; 3220 uint64_t failing_sector = 0;
2844 3221
2845 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
2846 " 0x%x lba 0x%llx blk cnt 0x%x "
2847 "bgstat=0x%x bghm=0x%x\n",
2848 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
2849 blk_rq_sectors(cmd->request), bgstat, bghm);
2850
2851 spin_lock(&_dump_buf_lock); 3222 spin_lock(&_dump_buf_lock);
2852 if (!_dump_buf_done) { 3223 if (!_dump_buf_done) {
2853 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" 3224 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
@@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2870 3241
2871 if (lpfc_bgs_get_invalid_prof(bgstat)) { 3242 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2872 cmd->result = ScsiResult(DID_ERROR, 0); 3243 cmd->result = ScsiResult(DID_ERROR, 0);
2873 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" 3244 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2874 " BlockGuard profile. bgstat:0x%x\n", 3245 "9072 BLKGRD: Invalid BG Profile in cmd"
2875 bgstat); 3246 " 0x%x lba 0x%llx blk cnt 0x%x "
3247 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3248 (unsigned long long)scsi_get_lba(cmd),
3249 blk_rq_sectors(cmd->request), bgstat, bghm);
2876 ret = (-1); 3250 ret = (-1);
2877 goto out; 3251 goto out;
2878 } 3252 }
2879 3253
2880 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3254 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2881 cmd->result = ScsiResult(DID_ERROR, 0); 3255 cmd->result = ScsiResult(DID_ERROR, 0);
2882 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " 3256 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2883 "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 3257 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2884 bgstat); 3258 " 0x%x lba 0x%llx blk cnt 0x%x "
3259 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3260 (unsigned long long)scsi_get_lba(cmd),
3261 blk_rq_sectors(cmd->request), bgstat, bghm);
2885 ret = (-1); 3262 ret = (-1);
2886 goto out; 3263 goto out;
2887 } 3264 }
@@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2894 cmd->result = DRIVER_SENSE << 24 3271 cmd->result = DRIVER_SENSE << 24
2895 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3272 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2896 phba->bg_guard_err_cnt++; 3273 phba->bg_guard_err_cnt++;
2897 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3274 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2898 "9055 BLKGRD: guard_tag error\n"); 3275 "9055 BLKGRD: Guard Tag error in cmd"
3276 " 0x%x lba 0x%llx blk cnt 0x%x "
3277 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3278 (unsigned long long)scsi_get_lba(cmd),
3279 blk_rq_sectors(cmd->request), bgstat, bghm);
2899 } 3280 }
2900 3281
2901 if (lpfc_bgs_get_reftag_err(bgstat)) { 3282 if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2907 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3288 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2908 3289
2909 phba->bg_reftag_err_cnt++; 3290 phba->bg_reftag_err_cnt++;
2910 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3291 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2911 "9056 BLKGRD: ref_tag error\n"); 3292 "9056 BLKGRD: Ref Tag error in cmd"
3293 " 0x%x lba 0x%llx blk cnt 0x%x "
3294 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3295 (unsigned long long)scsi_get_lba(cmd),
3296 blk_rq_sectors(cmd->request), bgstat, bghm);
2912 } 3297 }
2913 3298
2914 if (lpfc_bgs_get_apptag_err(bgstat)) { 3299 if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2920 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3305 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2921 3306
2922 phba->bg_apptag_err_cnt++; 3307 phba->bg_apptag_err_cnt++;
2923 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3308 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2924 "9061 BLKGRD: app_tag error\n"); 3309 "9061 BLKGRD: App Tag error in cmd"
3310 " 0x%x lba 0x%llx blk cnt 0x%x "
3311 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3312 (unsigned long long)scsi_get_lba(cmd),
3313 blk_rq_sectors(cmd->request), bgstat, bghm);
2925 } 3314 }
2926 3315
2927 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3316 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2960 3349
2961 if (!ret) { 3350 if (!ret) {
2962 /* No error was reported - problem in FW? */ 3351 /* No error was reported - problem in FW? */
2963 cmd->result = ScsiResult(DID_ERROR, 0); 3352 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2964 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3353 "9057 BLKGRD: Unknown error in cmd"
2965 "9057 BLKGRD: Unknown error reported!\n"); 3354 " 0x%x lba 0x%llx blk cnt 0x%x "
3355 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3356 (unsigned long long)scsi_get_lba(cmd),
3357 blk_rq_sectors(cmd->request), bgstat, bghm);
3358
3359 /* Calcuate what type of error it was */
3360 lpfc_calc_bg_err(phba, lpfc_cmd);
2966 } 3361 }
2967
2968out: 3362out:
2969 return ret; 3363 return ret;
2970} 3364}
@@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3028 "dma_map_sg. Config %d, seg_cnt %d\n", 3422 "dma_map_sg. Config %d, seg_cnt %d\n",
3029 __func__, phba->cfg_sg_seg_cnt, 3423 __func__, phba->cfg_sg_seg_cnt,
3030 lpfc_cmd->seg_cnt); 3424 lpfc_cmd->seg_cnt);
3425 lpfc_cmd->seg_cnt = 0;
3031 scsi_dma_unmap(scsi_cmnd); 3426 scsi_dma_unmap(scsi_cmnd);
3032 return 1; 3427 return 1;
3033 } 3428 }
@@ -3094,45 +3489,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3094} 3489}
3095 3490
3096/** 3491/**
3097 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
3098 * @phba: The Hba for which this call is being executed.
3099 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
3100 *
3101 * Adjust the data length to account for how much data
3102 * is actually on the wire.
3103 *
3104 * returns the adjusted data length
3105 **/
3106static int
3107lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3108 struct lpfc_scsi_buf *lpfc_cmd)
3109{
3110 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3111 int diflen, fcpdl;
3112 unsigned blksize;
3113
3114 fcpdl = scsi_bufflen(sc);
3115
3116 /* Check if there is protection data on the wire */
3117 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3118 /* Read */
3119 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
3120 return fcpdl;
3121
3122 } else {
3123 /* Write */
3124 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
3125 return fcpdl;
3126 }
3127
3128 /* If protection data on the wire, adjust the count accordingly */
3129 blksize = lpfc_cmd_blksize(sc);
3130 diflen = (fcpdl / blksize) * 8;
3131 fcpdl += diflen;
3132 return fcpdl;
3133}
3134
3135/**
3136 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3492 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3137 * @phba: The Hba for which this call is being executed. 3493 * @phba: The Hba for which this call is being executed.
3138 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3494 * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3149 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3505 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3150 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); 3506 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3151 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 3507 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3152 uint32_t num_bde = 0; 3508 uint32_t num_sge = 0;
3153 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3509 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3154 int prot_group_type = 0; 3510 int prot_group_type = 0;
3155 int fcpdl; 3511 int fcpdl;
3156 3512
3157 /* 3513 /*
3158 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3514 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3159 * fcp_rsp regions to the first data bde entry 3515 * fcp_rsp regions to the first data sge entry
3160 */ 3516 */
3161 if (scsi_sg_count(scsi_cmnd)) { 3517 if (scsi_sg_count(scsi_cmnd)) {
3162 /* 3518 /*
@@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3179 3535
3180 sgl += 1; 3536 sgl += 1;
3181 lpfc_cmd->seg_cnt = datasegcnt; 3537 lpfc_cmd->seg_cnt = datasegcnt;
3182 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3538
3183 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3539 /* First check if data segment count from SCSI Layer is good */
3184 "9087 BLKGRD: %s: Too many sg segments" 3540 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3185 " from dma_map_sg. Config %d, seg_cnt" 3541 goto err;
3186 " %d\n",
3187 __func__, phba->cfg_sg_seg_cnt,
3188 lpfc_cmd->seg_cnt);
3189 scsi_dma_unmap(scsi_cmnd);
3190 return 1;
3191 }
3192 3542
3193 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3543 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3194 3544
3195 switch (prot_group_type) { 3545 switch (prot_group_type) {
3196 case LPFC_PG_TYPE_NO_DIF: 3546 case LPFC_PG_TYPE_NO_DIF:
3197 num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3547 /* Here we need to add a DISEED to the count */
3548 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3549 goto err;
3550
3551 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3198 datasegcnt); 3552 datasegcnt);
3553
3199 /* we should have 2 or more entries in buffer list */ 3554 /* we should have 2 or more entries in buffer list */
3200 if (num_bde < 2) 3555 if (num_sge < 2)
3201 goto err; 3556 goto err;
3202 break; 3557 break;
3203 case LPFC_PG_TYPE_DIF_BUF:{ 3558
3559 case LPFC_PG_TYPE_DIF_BUF:
3204 /* 3560 /*
3205 * This type indicates that protection buffers are 3561 * This type indicates that protection buffers are
3206 * passed to the driver, so that needs to be prepared 3562 * passed to the driver, so that needs to be prepared
@@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3215 } 3571 }
3216 3572
3217 lpfc_cmd->prot_seg_cnt = protsegcnt; 3573 lpfc_cmd->prot_seg_cnt = protsegcnt;
3218 if (lpfc_cmd->prot_seg_cnt 3574 /*
3219 > phba->cfg_prot_sg_seg_cnt) { 3575 * There is a minimun of 3 SGEs used for every
3220 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3576 * protection data segment.
3221 "9088 BLKGRD: %s: Too many prot sg " 3577 */
3222 "segments from dma_map_sg. Config %d," 3578 if ((lpfc_cmd->prot_seg_cnt * 3) >
3223 "prot_seg_cnt %d\n", __func__, 3579 (phba->cfg_total_seg_cnt - 2))
3224 phba->cfg_prot_sg_seg_cnt, 3580 goto err;
3225 lpfc_cmd->prot_seg_cnt);
3226 dma_unmap_sg(&phba->pcidev->dev,
3227 scsi_prot_sglist(scsi_cmnd),
3228 scsi_prot_sg_count(scsi_cmnd),
3229 datadir);
3230 scsi_dma_unmap(scsi_cmnd);
3231 return 1;
3232 }
3233 3581
3234 num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3582 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3235 datasegcnt, protsegcnt); 3583 datasegcnt, protsegcnt);
3584
3236 /* we should have 3 or more entries in buffer list */ 3585 /* we should have 3 or more entries in buffer list */
3237 if (num_bde < 3) 3586 if ((num_sge < 3) ||
3587 (num_sge > phba->cfg_total_seg_cnt))
3238 goto err; 3588 goto err;
3239 break; 3589 break;
3240 } 3590
3241 case LPFC_PG_TYPE_INVALID: 3591 case LPFC_PG_TYPE_INVALID:
3242 default: 3592 default:
3593 scsi_dma_unmap(scsi_cmnd);
3594 lpfc_cmd->seg_cnt = 0;
3595
3243 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3596 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3244 "9083 Unexpected protection group %i\n", 3597 "9083 Unexpected protection group %i\n",
3245 prot_group_type); 3598 prot_group_type);
@@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3263 } 3616 }
3264 3617
3265 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3618 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3266
3267 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3619 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3268 3620
3269 /* 3621 /*
@@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3274 3626
3275 return 0; 3627 return 0;
3276err: 3628err:
3629 if (lpfc_cmd->seg_cnt)
3630 scsi_dma_unmap(scsi_cmnd);
3631 if (lpfc_cmd->prot_seg_cnt)
3632 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3633 scsi_prot_sg_count(scsi_cmnd),
3634 scsi_cmnd->sc_data_direction);
3635
3277 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3636 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3278 "9084 Could not setup all needed BDE's" 3637 "9084 Cannot setup S/G List for HBA"
3279 "prot_group_type=%d, num_bde=%d\n", 3638 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3280 prot_group_type, num_bde); 3639 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3640 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3641 prot_group_type, num_sge);
3642
3643 lpfc_cmd->seg_cnt = 0;
3644 lpfc_cmd->prot_seg_cnt = 0;
3281 return 1; 3645 return 1;
3282} 3646}
3283 3647
@@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4357 4721
4358 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 4722 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4359 if (vport->phba->cfg_enable_bg) { 4723 if (vport->phba->cfg_enable_bg) {
4360 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, 4724 lpfc_printf_vlog(vport,
4725 KERN_INFO, LOG_SCSI_CMD,
4361 "9033 BLKGRD: rcvd %s cmd:x%x " 4726 "9033 BLKGRD: rcvd %s cmd:x%x "
4362 "sector x%llx cnt %u pt %x\n", 4727 "sector x%llx cnt %u pt %x\n",
4363 dif_op_str[scsi_get_prot_op(cmnd)], 4728 dif_op_str[scsi_get_prot_op(cmnd)],
@@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4369 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 4734 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4370 } else { 4735 } else {
4371 if (vport->phba->cfg_enable_bg) { 4736 if (vport->phba->cfg_enable_bg) {
4372 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, 4737 lpfc_printf_vlog(vport,
4738 KERN_INFO, LOG_SCSI_CMD,
4373 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 4739 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4374 "x%x sector x%llx cnt %u pt %x\n", 4740 "x%x sector x%llx cnt %u pt %x\n",
4375 cmnd->cmnd[0], 4741 cmnd->cmnd[0],
@@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4542 /* Wait for abort to complete */ 4908 /* Wait for abort to complete */
4543 wait_event_timeout(waitq, 4909 wait_event_timeout(waitq,
4544 (lpfc_cmd->pCmd != cmnd), 4910 (lpfc_cmd->pCmd != cmnd),
4545 (2*vport->cfg_devloss_tmo*HZ)); 4911 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4546 lpfc_cmd->waitq = NULL; 4912 lpfc_cmd->waitq = NULL;
4547 4913
4548 if (lpfc_cmd->pCmd == cmnd) { 4914 if (lpfc_cmd->pCmd == cmnd) {
@@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5012 struct lpfc_hba *phba = vport->phba; 5378 struct lpfc_hba *phba = vport->phba;
5013 int rc, ret = SUCCESS; 5379 int rc, ret = SUCCESS;
5014 5380
5381 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5382 "3172 SCSI layer issued Host Reset Data:\n");
5383
5015 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 5384 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5016 lpfc_offline(phba); 5385 lpfc_offline(phba);
5017 rc = lpfc_sli_brdrestart(phba); 5386 rc = lpfc_sli_brdrestart(phba);
5018 if (rc) 5387 if (rc)
5019 ret = FAILED; 5388 ret = FAILED;
5020 lpfc_online(phba); 5389 rc = lpfc_online(phba);
5390 if (rc)
5391 ret = FAILED;
5021 lpfc_unblock_mgmt_io(phba); 5392 lpfc_unblock_mgmt_io(phba);
5022 5393
5023 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 5394 if (ret == FAILED) {
5024 "3172 SCSI layer issued Host Reset Data: x%x\n", ret); 5395 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5396 "3323 Failed host reset, bring it offline\n");
5397 lpfc_sli4_offline_eratt(phba);
5398 }
5025 return ret; 5399 return ret;
5026} 5400}
5027 5401
@@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev)
5088 } 5462 }
5089 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); 5463 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5090 if (num_to_alloc != num_allocated) { 5464 if (num_to_alloc != num_allocated) {
5091 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5465 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5092 "0708 Allocation request of %d " 5466 "0708 Allocation request of %d "
5093 "command buffers did not succeed. " 5467 "command buffers did not succeed. "
5094 "Allocated %d buffers.\n", 5468 "Allocated %d buffers.\n",
5095 num_to_alloc, num_allocated); 5469 num_to_alloc, num_allocated);
5096 } 5470 }
5097 if (num_allocated > 0) 5471 if (num_allocated > 0)
5098 phba->total_scsi_bufs += num_allocated; 5472 phba->total_scsi_bufs += num_allocated;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35dd17eb0f27..572579f87de4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
667 667
668 spin_lock_irqsave(&phba->hbalock, iflags); 668 spin_lock_irqsave(&phba->hbalock, iflags);
669 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 669 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
670 next_time = jiffies + HZ * (phba->fc_ratov + 1); 670 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
671 list_for_each_entry_safe(rrq, nextrrq, 671 list_for_each_entry_safe(rrq, nextrrq,
672 &phba->active_rrq_list, list) { 672 &phba->active_rrq_list, list) {
673 if (time_after(jiffies, rrq->rrq_stop_time)) 673 if (time_after(jiffies, rrq->rrq_stop_time))
@@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
782 return; 782 return;
783 spin_lock_irqsave(&phba->hbalock, iflags); 783 spin_lock_irqsave(&phba->hbalock, iflags);
784 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 784 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
785 next_time = jiffies + HZ * (phba->fc_ratov * 2); 785 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
786 list_splice_init(&phba->active_rrq_list, &rrq_list); 786 list_splice_init(&phba->active_rrq_list, &rrq_list);
787 spin_unlock_irqrestore(&phba->hbalock, iflags); 787 spin_unlock_irqrestore(&phba->hbalock, iflags);
788 788
@@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
878 else 878 else
879 rrq->send_rrq = 0; 879 rrq->send_rrq = 0;
880 rrq->xritag = xritag; 880 rrq->xritag = xritag;
881 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 881 rrq->rrq_stop_time = jiffies +
882 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
882 rrq->ndlp = ndlp; 883 rrq->ndlp = ndlp;
883 rrq->nlp_DID = ndlp->nlp_DID; 884 rrq->nlp_DID = ndlp->nlp_DID;
884 rrq->vport = ndlp->vport; 885 rrq->vport = ndlp->vport;
@@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
926 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 927 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
927 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 928 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
928 ndlp = piocbq->context_un.ndlp; 929 ndlp = piocbq->context_un.ndlp;
929 else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && 930 else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
930 (piocbq->iocb_flag & LPFC_IO_LIBDFC))
931 ndlp = piocbq->context_un.ndlp; 931 ndlp = piocbq->context_un.ndlp;
932 else 932 else
933 ndlp = piocbq->context1; 933 ndlp = piocbq->context1;
@@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 BUG(); 1339 BUG();
1340 else 1340 else
1341 mod_timer(&piocb->vport->els_tmofunc, 1341 mod_timer(&piocb->vport->els_tmofunc,
1342 jiffies + HZ * (phba->fc_ratov << 1)); 1342 jiffies +
1343 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1343 } 1344 }
1344 1345
1345 1346
@@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2340 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2341 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2341 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2342 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2342 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2343 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2343 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2344 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2345 "x%x x%x x%x\n",
2344 pmb->vport ? pmb->vport->vpi : 0, 2346 pmb->vport ? pmb->vport->vpi : 0,
2345 pmbox->mbxCommand, 2347 pmbox->mbxCommand,
2346 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2348 lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2354 pmbox->un.varWords[4], 2356 pmbox->un.varWords[4],
2355 pmbox->un.varWords[5], 2357 pmbox->un.varWords[5],
2356 pmbox->un.varWords[6], 2358 pmbox->un.varWords[6],
2357 pmbox->un.varWords[7]); 2359 pmbox->un.varWords[7],
2360 pmbox->un.varWords[8],
2361 pmbox->un.varWords[9],
2362 pmbox->un.varWords[10]);
2358 2363
2359 if (pmb->mbox_cmpl) 2364 if (pmb->mbox_cmpl)
2360 pmb->mbox_cmpl(phba,pmb); 2365 pmb->mbox_cmpl(phba,pmb);
@@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr)
2908 lpfc_worker_wake_up(phba); 2913 lpfc_worker_wake_up(phba);
2909 else 2914 else
2910 /* Restart the timer for next eratt poll */ 2915 /* Restart the timer for next eratt poll */
2911 mod_timer(&phba->eratt_poll, jiffies + 2916 mod_timer(&phba->eratt_poll,
2912 HZ * LPFC_ERATT_POLL_INTERVAL); 2917 jiffies +
2918 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
2913 return; 2919 return;
2914} 2920}
2915 2921
@@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5511 list_del_init(&rsrc_blk->list); 5517 list_del_init(&rsrc_blk->list);
5512 kfree(rsrc_blk); 5518 kfree(rsrc_blk);
5513 } 5519 }
5520 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5514 break; 5521 break;
5515 case LPFC_RSC_TYPE_FCOE_XRI: 5522 case LPFC_RSC_TYPE_FCOE_XRI:
5516 kfree(phba->sli4_hba.xri_bmask); 5523 kfree(phba->sli4_hba.xri_bmask);
@@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5811 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5818 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5812 } else { 5819 } else {
5813 kfree(phba->vpi_bmask); 5820 kfree(phba->vpi_bmask);
5821 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5814 kfree(phba->vpi_ids); 5822 kfree(phba->vpi_ids);
5815 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5823 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5816 kfree(phba->sli4_hba.xri_bmask); 5824 kfree(phba->sli4_hba.xri_bmask);
@@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5992 struct lpfc_sglq *sglq_entry = NULL; 6000 struct lpfc_sglq *sglq_entry = NULL;
5993 struct lpfc_sglq *sglq_entry_next = NULL; 6001 struct lpfc_sglq *sglq_entry_next = NULL;
5994 struct lpfc_sglq *sglq_entry_first = NULL; 6002 struct lpfc_sglq *sglq_entry_first = NULL;
5995 int status, post_cnt = 0, num_posted = 0, block_cnt = 0; 6003 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
5996 int last_xritag = NO_XRI; 6004 int last_xritag = NO_XRI;
5997 LIST_HEAD(prep_sgl_list); 6005 LIST_HEAD(prep_sgl_list);
5998 LIST_HEAD(blck_sgl_list); 6006 LIST_HEAD(blck_sgl_list);
@@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6004 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 6012 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6005 spin_unlock_irq(&phba->hbalock); 6013 spin_unlock_irq(&phba->hbalock);
6006 6014
6015 total_cnt = phba->sli4_hba.els_xri_cnt;
6007 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6016 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6008 &allc_sgl_list, list) { 6017 &allc_sgl_list, list) {
6009 list_del_init(&sglq_entry->list); 6018 list_del_init(&sglq_entry->list);
@@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6055 sglq_entry->sli4_xritag); 6064 sglq_entry->sli4_xritag);
6056 list_add_tail(&sglq_entry->list, 6065 list_add_tail(&sglq_entry->list,
6057 &free_sgl_list); 6066 &free_sgl_list);
6058 spin_lock_irq(&phba->hbalock); 6067 total_cnt--;
6059 phba->sli4_hba.els_xri_cnt--;
6060 spin_unlock_irq(&phba->hbalock);
6061 } 6068 }
6062 } 6069 }
6063 } 6070 }
@@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6085 (sglq_entry_first->sli4_xritag + 6092 (sglq_entry_first->sli4_xritag +
6086 post_cnt - 1)); 6093 post_cnt - 1));
6087 list_splice_init(&blck_sgl_list, &free_sgl_list); 6094 list_splice_init(&blck_sgl_list, &free_sgl_list);
6088 spin_lock_irq(&phba->hbalock); 6095 total_cnt -= post_cnt;
6089 phba->sli4_hba.els_xri_cnt -= post_cnt;
6090 spin_unlock_irq(&phba->hbalock);
6091 } 6096 }
6092 6097
6093 /* don't reset xirtag due to hole in xri block */ 6098 /* don't reset xirtag due to hole in xri block */
@@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6097 /* reset els sgl post count for next round of posting */ 6102 /* reset els sgl post count for next round of posting */
6098 post_cnt = 0; 6103 post_cnt = 0;
6099 } 6104 }
6105 /* update the number of XRIs posted for ELS */
6106 phba->sli4_hba.els_xri_cnt = total_cnt;
6100 6107
6101 /* free the els sgls failed to post */ 6108 /* free the els sgls failed to post */
6102 lpfc_free_sgl_list(phba, &free_sgl_list); 6109 lpfc_free_sgl_list(phba, &free_sgl_list);
@@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6446 6453
6447 /* Start the ELS watchdog timer */ 6454 /* Start the ELS watchdog timer */
6448 mod_timer(&vport->els_tmofunc, 6455 mod_timer(&vport->els_tmofunc,
6449 jiffies + HZ * (phba->fc_ratov * 2)); 6456 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6450 6457
6451 /* Start heart beat timer */ 6458 /* Start heart beat timer */
6452 mod_timer(&phba->hb_tmofunc, 6459 mod_timer(&phba->hb_tmofunc,
6453 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 6460 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6454 phba->hb_outstanding = 0; 6461 phba->hb_outstanding = 0;
6455 phba->last_completion_time = jiffies; 6462 phba->last_completion_time = jiffies;
6456 6463
6457 /* Start error attention (ERATT) polling timer */ 6464 /* Start error attention (ERATT) polling timer */
6458 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 6465 mod_timer(&phba->eratt_poll,
6466 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
6459 6467
6460 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6468 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6461 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6469 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6822 goto out_not_finished; 6830 goto out_not_finished;
6823 } 6831 }
6824 /* timeout active mbox command */ 6832 /* timeout active mbox command */
6825 mod_timer(&psli->mbox_tmo, (jiffies + 6833 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6826 (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); 6834 1000);
6835 mod_timer(&psli->mbox_tmo, jiffies + timeout);
6827 } 6836 }
6828 6837
6829 /* Mailbox cmd <cmd> issue */ 6838 /* Mailbox cmd <cmd> issue */
@@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7496 7505
7497 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7506 /* Start timer for the mbox_tmo and log some mailbox post messages */
7498 mod_timer(&psli->mbox_tmo, (jiffies + 7507 mod_timer(&psli->mbox_tmo, (jiffies +
7499 (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); 7508 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
7500 7509
7501 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7510 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7502 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7511 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7914static inline uint32_t 7923static inline uint32_t
7915lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7924lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7916{ 7925{
7917 int i; 7926 struct lpfc_vector_map_info *cpup;
7918 7927 int chann, cpu;
7919 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7920 i = smp_processor_id();
7921 else
7922 i = atomic_add_return(1, &phba->fcp_qidx);
7923 7928
7924 i = (i % phba->cfg_fcp_io_channel); 7929 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
7925 return i; 7930 cpu = smp_processor_id();
7931 if (cpu < phba->sli4_hba.num_present_cpu) {
7932 cpup = phba->sli4_hba.cpu_map;
7933 cpup += cpu;
7934 return cpup->channel_id;
7935 }
7936 chann = cpu;
7937 }
7938 chann = atomic_add_return(1, &phba->fcp_qidx);
7939 chann = (chann % phba->cfg_fcp_io_channel);
7940 return chann;
7926} 7941}
7927 7942
7928/** 7943/**
@@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8444 8459
8445 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8460 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8446 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8461 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8462 if (unlikely(!phba->sli4_hba.fcp_wq))
8463 return IOCB_ERROR;
8447 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8464 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8448 &wqe)) 8465 &wqe))
8449 return IOCB_ERROR; 8466 return IOCB_ERROR;
8450 } else { 8467 } else {
8468 if (unlikely(!phba->sli4_hba.els_wq))
8469 return IOCB_ERROR;
8451 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8470 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8452 return IOCB_ERROR; 8471 return IOCB_ERROR;
8453 } 8472 }
@@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10003 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 10022 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10004 SLI_IOCB_RET_IOCB); 10023 SLI_IOCB_RET_IOCB);
10005 if (retval == IOCB_SUCCESS) { 10024 if (retval == IOCB_SUCCESS) {
10006 timeout_req = timeout * HZ; 10025 timeout_req = msecs_to_jiffies(timeout * 1000);
10007 timeleft = wait_event_timeout(done_q, 10026 timeleft = wait_event_timeout(done_q,
10008 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 10027 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10009 timeout_req); 10028 timeout_req);
@@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10108 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 10127 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10109 wait_event_interruptible_timeout(done_q, 10128 wait_event_interruptible_timeout(done_q,
10110 pmboxq->mbox_flag & LPFC_MBX_WAKE, 10129 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10111 timeout * HZ); 10130 msecs_to_jiffies(timeout * 1000));
10112 10131
10113 spin_lock_irqsave(&phba->hbalock, flag); 10132 spin_lock_irqsave(&phba->hbalock, flag);
10114 pmboxq->context1 = NULL; 10133 pmboxq->context1 = NULL;
@@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12899 } 12918 }
12900 wq->db_regaddr = bar_memmap_p + db_offset; 12919 wq->db_regaddr = bar_memmap_p + db_offset;
12901 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12920 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12902 "3264 WQ[%d]: barset:x%x, offset:x%x\n", 12921 "3264 WQ[%d]: barset:x%x, offset:x%x, "
12903 wq->queue_id, pci_barset, db_offset); 12922 "format:x%x\n", wq->queue_id, pci_barset,
12923 db_offset, wq->db_format);
12904 } else { 12924 } else {
12905 wq->db_format = LPFC_DB_LIST_FORMAT; 12925 wq->db_format = LPFC_DB_LIST_FORMAT;
12906 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 12926 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13120 } 13140 }
13121 hrq->db_regaddr = bar_memmap_p + db_offset; 13141 hrq->db_regaddr = bar_memmap_p + db_offset;
13122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13123 "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n", 13143 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
13124 hrq->queue_id, pci_barset, db_offset); 13144 "format:x%x\n", hrq->queue_id, pci_barset,
13145 db_offset, hrq->db_format);
13125 } else { 13146 } else {
13126 hrq->db_format = LPFC_DB_RING_FORMAT; 13147 hrq->db_format = LPFC_DB_RING_FORMAT;
13127 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 13148 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13971 } 13992 }
13972 13993
13973 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 13994 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
13974 "2538 Received frame rctl:%s type:%s " 13995 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
13975 "Frame Data:%08x %08x %08x %08x %08x %08x\n", 13996 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
13976 rctl_names[fc_hdr->fh_r_ctl], 13997 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
13977 type_names[fc_hdr->fh_type], 13998 type_names[fc_hdr->fh_type], fc_hdr->fh_type,
13978 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 13999 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13979 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 14000 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13980 be32_to_cpu(header[4]), be32_to_cpu(header[5])); 14001 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
14002 be32_to_cpu(header[6]));
13981 return 0; 14003 return 0;
13982drop: 14004drop:
13983 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14005 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index be02b59ea279..67af460184ba 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -346,11 +346,6 @@ struct lpfc_bmbx {
346#define SLI4_CT_VFI 2 346#define SLI4_CT_VFI 2
347#define SLI4_CT_FCFI 3 347#define SLI4_CT_FCFI 3
348 348
349#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
350#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
351#define LPFC_SLI4_MIN_BUF_SIZE 0x400
352#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
353
354/* 349/*
355 * SLI4 specific data structures 350 * SLI4 specific data structures
356 */ 351 */
@@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info {
440 435
441#define LPFC_SLI4_HANDLER_NAME_SZ 16 436#define LPFC_SLI4_HANDLER_NAME_SZ 16
442 437
438/* Used for IRQ vector to CPU mapping */
439struct lpfc_vector_map_info {
440 uint16_t phys_id;
441 uint16_t core_id;
442 uint16_t irq;
443 uint16_t channel_id;
444 struct cpumask maskbits;
445};
446#define LPFC_VECTOR_MAP_EMPTY 0xffff
447#define LPFC_MAX_CPU 256
448
443/* SLI4 HBA data structure entries */ 449/* SLI4 HBA data structure entries */
444struct lpfc_sli4_hba { 450struct lpfc_sli4_hba {
445 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 451 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -573,6 +579,11 @@ struct lpfc_sli4_hba {
573 struct lpfc_iov iov; 579 struct lpfc_iov iov;
574 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 580 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
575 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ 581 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
582
583 /* CPU to vector mapping information */
584 struct lpfc_vector_map_info *cpu_map;
585 uint16_t num_online_cpu;
586 uint16_t num_present_cpu;
576}; 587};
577 588
578enum lpfc_sge_type { 589enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 664cd04f7cd8..a38dc3b16969 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.38" 21#define LPFC_DRIVER_VERSION "8.3.39"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 0fe188e66000..e28e431564b0 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
80 } 80 }
81} 81}
82 82
83static int 83int
84lpfc_alloc_vpi(struct lpfc_hba *phba) 84lpfc_alloc_vpi(struct lpfc_hba *phba)
85{ 85{
86 unsigned long vpi; 86 unsigned long vpi;
@@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
568 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 568 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
569 struct lpfc_hba *phba = vport->phba; 569 struct lpfc_hba *phba = vport->phba;
570 long timeout; 570 long timeout;
571 bool ns_ndlp_referenced = false;
571 572
572 if (vport->port_type == LPFC_PHYSICAL_PORT) { 573 if (vport->port_type == LPFC_PHYSICAL_PORT) {
573 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 574 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
628 629
629 lpfc_debugfs_terminate(vport); 630 lpfc_debugfs_terminate(vport);
630 631
632 /*
633 * The call to fc_remove_host might release the NameServer ndlp. Since
634 * we might need to use the ndlp to send the DA_ID CT command,
635 * increment the reference for the NameServer ndlp to prevent it from
636 * being released.
637 */
638 ndlp = lpfc_findnode_did(vport, NameServer_DID);
639 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
640 lpfc_nlp_get(ndlp);
641 ns_ndlp_referenced = true;
642 }
643
631 /* Remove FC host and then SCSI host with the vport */ 644 /* Remove FC host and then SCSI host with the vport */
632 fc_remove_host(lpfc_shost_from_vport(vport)); 645 fc_remove_host(lpfc_shost_from_vport(vport));
633 scsi_remove_host(lpfc_shost_from_vport(vport)); 646 scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
734 lpfc_discovery_wait(vport); 747 lpfc_discovery_wait(vport);
735 748
736skip_logo: 749skip_logo:
750
751 /*
752 * If the NameServer ndlp has been incremented to allow the DA_ID CT
753 * command to be sent, decrement the ndlp now.
754 */
755 if (ns_ndlp_referenced) {
756 ndlp = lpfc_findnode_did(vport, NameServer_DID);
757 lpfc_nlp_put(ndlp);
758 }
759
737 lpfc_cleanup(vport); 760 lpfc_cleanup(vport);
738 lpfc_sli_host_down(vport); 761 lpfc_sli_host_down(vport);
739 762
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 90828340acea..6b2c94eb8134 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); 90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
91struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); 91struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
92void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); 92void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
93int lpfc_alloc_vpi(struct lpfc_hba *phba);
93 94
94/* 95/*
95 * queuecommand VPORT-specific return codes. Specified in the host byte code. 96 * queuecommand VPORT-specific return codes. Specified in the host byte code.