aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2009-10-02 15:17:02 -0400
committerJames Bottomley <James.Bottomley@suse.de>2009-12-04 13:01:42 -0500
commit45ed119035b27f240345b06e090d559874e3677a (patch)
tree14466c52a644d73ea90f30b885cfe4e3fc88d12e /drivers/scsi
parent0d87841997125971b7a39d21d1435054f91884c3 (diff)
[SCSI] lpfc 8.3.5: fix fcp command polling, add FIP mode, performance optimisations and devloss timout fixes
This patch includes the following changes: - Fixed Panic/Hang when using polling mode for fcp commands - Added support for Read_rev mbox bits indicating FIP mode of HBA - Optimize performance of slow-path handling of els responses - Add code to cleanup orphaned unsolicited receive sequences - Fixed Devloss timeout when multiple initiators are in same zone Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/lpfc/lpfc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c42
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c464
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
12 files changed, 307 insertions, 277 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5ebb5343421..ebeddbe86e67 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -110,6 +110,7 @@ struct hbq_dmabuf {
110 uint32_t size; 110 uint32_t size;
111 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_cq_event cq_event; 112 struct lpfc_cq_event cq_event;
113 unsigned long time_stamp;
113}; 114};
114 115
115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 116/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -405,6 +406,7 @@ struct lpfc_vport {
405 uint8_t stat_data_enabled; 406 uint8_t stat_data_enabled;
406 uint8_t stat_data_blocked; 407 uint8_t stat_data_blocked;
407 struct list_head rcv_buffer_list; 408 struct list_head rcv_buffer_list;
409 unsigned long rcv_buffer_time_stamp;
408 uint32_t vport_flag; 410 uint32_t vport_flag;
409#define STATIC_VPORT 1 411#define STATIC_VPORT 1
410}; 412};
@@ -527,14 +529,16 @@ struct lpfc_hba {
527#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 529#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
528#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ 530#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
529#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ 531#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
530#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ 532#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
531#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ 533#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
532#define FCP_XRI_ABORT_EVENT 0x20 534#define FCP_XRI_ABORT_EVENT 0x20
533#define ELS_XRI_ABORT_EVENT 0x40 535#define ELS_XRI_ABORT_EVENT 0x40
534#define ASYNC_EVENT 0x80 536#define ASYNC_EVENT 0x80
535#define LINK_DISABLED 0x100 /* Link disabled by user */ 537#define LINK_DISABLED 0x100 /* Link disabled by user */
536#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ 538#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
537#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ 539#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */
540#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */
541 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
538 struct lpfc_dmabuf slim2p; 542 struct lpfc_dmabuf slim2p;
539 543
540 MAILBOX_t *mbox; 544 MAILBOX_t *mbox;
@@ -606,7 +610,6 @@ struct lpfc_hba {
606 uint32_t cfg_enable_hba_reset; 610 uint32_t cfg_enable_hba_reset;
607 uint32_t cfg_enable_hba_heartbeat; 611 uint32_t cfg_enable_hba_heartbeat;
608 uint32_t cfg_enable_bg; 612 uint32_t cfg_enable_bg;
609 uint32_t cfg_enable_fip;
610 uint32_t cfg_log_verbose; 613 uint32_t cfg_log_verbose;
611 uint32_t cfg_aer_support; 614 uint32_t cfg_aer_support;
612 615
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 82005b8ad957..d55befb7cf4c 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -100,6 +100,28 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
100 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 100 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
101} 101}
102 102
103/**
104 * lpfc_enable_fip_show - Return the fip mode of the HBA
105 * @dev: class unused variable.
106 * @attr: device attribute, not used.
107 * @buf: on return contains the module description text.
108 *
109 * Returns: size of formatted string.
110 **/
111static ssize_t
112lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
113 char *buf)
114{
115 struct Scsi_Host *shost = class_to_shost(dev);
116 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
117 struct lpfc_hba *phba = vport->phba;
118
119 if (phba->hba_flag & HBA_FIP_SUPPORT)
120 return snprintf(buf, PAGE_SIZE, "1\n");
121 else
122 return snprintf(buf, PAGE_SIZE, "0\n");
123}
124
103static ssize_t 125static ssize_t
104lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, 126lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
105 char *buf) 127 char *buf)
@@ -1134,6 +1156,9 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1134 if ((val & 0x3) != val) 1156 if ((val & 0x3) != val)
1135 return -EINVAL; 1157 return -EINVAL;
1136 1158
1159 if (phba->sli_rev == LPFC_SLI_REV4)
1160 val = 0;
1161
1137 spin_lock_irq(&phba->hbalock); 1162 spin_lock_irq(&phba->hbalock);
1138 1163
1139 old_val = phba->cfg_poll; 1164 old_val = phba->cfg_poll;
@@ -1597,6 +1622,7 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
1597static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); 1622static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
1598static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); 1623static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
1599static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); 1624static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
1625static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
1600static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 1626static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
1601 lpfc_board_mode_show, lpfc_board_mode_store); 1627 lpfc_board_mode_show, lpfc_board_mode_store);
1602static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 1628static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
@@ -3128,15 +3154,6 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
3128LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 3154LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
3129 3155
3130/* 3156/*
3131# lpfc_enable_fip: When set, FIP is required to start discovery. If not
3132# set, the driver will add an FCF record manually if the port has no
3133# FCF records available and start discovery.
3134# Value range is [0,1]. Default value is 1 (enabled)
3135*/
3136LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
3137
3138
3139/*
3140# lpfc_prot_mask: i 3157# lpfc_prot_mask: i
3141# - Bit mask of host protection capabilities used to register with the 3158# - Bit mask of host protection capabilities used to register with the
3142# SCSI mid-layer 3159# SCSI mid-layer
@@ -3194,6 +3211,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3194 &dev_attr_num_discovered_ports, 3211 &dev_attr_num_discovered_ports,
3195 &dev_attr_menlo_mgmt_mode, 3212 &dev_attr_menlo_mgmt_mode,
3196 &dev_attr_lpfc_drvr_version, 3213 &dev_attr_lpfc_drvr_version,
3214 &dev_attr_lpfc_enable_fip,
3197 &dev_attr_lpfc_temp_sensor, 3215 &dev_attr_lpfc_temp_sensor,
3198 &dev_attr_lpfc_log_verbose, 3216 &dev_attr_lpfc_log_verbose,
3199 &dev_attr_lpfc_lun_queue_depth, 3217 &dev_attr_lpfc_lun_queue_depth,
@@ -3201,7 +3219,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
3201 &dev_attr_lpfc_peer_port_login, 3219 &dev_attr_lpfc_peer_port_login,
3202 &dev_attr_lpfc_nodev_tmo, 3220 &dev_attr_lpfc_nodev_tmo,
3203 &dev_attr_lpfc_devloss_tmo, 3221 &dev_attr_lpfc_devloss_tmo,
3204 &dev_attr_lpfc_enable_fip,
3205 &dev_attr_lpfc_fcp_class, 3222 &dev_attr_lpfc_fcp_class,
3206 &dev_attr_lpfc_use_adisc, 3223 &dev_attr_lpfc_use_adisc,
3207 &dev_attr_lpfc_ack0, 3224 &dev_attr_lpfc_ack0,
@@ -3256,7 +3273,6 @@ struct device_attribute *lpfc_vport_attrs[] = {
3256 &dev_attr_lpfc_lun_queue_depth, 3273 &dev_attr_lpfc_lun_queue_depth,
3257 &dev_attr_lpfc_nodev_tmo, 3274 &dev_attr_lpfc_nodev_tmo,
3258 &dev_attr_lpfc_devloss_tmo, 3275 &dev_attr_lpfc_devloss_tmo,
3259 &dev_attr_lpfc_enable_fip,
3260 &dev_attr_lpfc_hba_queue_depth, 3276 &dev_attr_lpfc_hba_queue_depth,
3261 &dev_attr_lpfc_peer_port_login, 3277 &dev_attr_lpfc_peer_port_login,
3262 &dev_attr_lpfc_restrict_login, 3278 &dev_attr_lpfc_restrict_login,
@@ -4412,13 +4428,15 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4412 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4428 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4413 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4429 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4414 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4430 lpfc_enable_bg_init(phba, lpfc_enable_bg);
4431 if (phba->sli_rev == LPFC_SLI_REV4)
4432 phba->cfg_poll = 0;
4433 else
4415 phba->cfg_poll = lpfc_poll; 4434 phba->cfg_poll = lpfc_poll;
4416 phba->cfg_soft_wwnn = 0L; 4435 phba->cfg_soft_wwnn = 0L;
4417 phba->cfg_soft_wwpn = 0L; 4436 phba->cfg_soft_wwpn = 0L;
4418 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4437 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4419 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4438 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4420 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4439 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4421 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4422 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 4440 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4423 lpfc_aer_support_init(phba, lpfc_aer_support); 4441 lpfc_aer_support_init(phba, lpfc_aer_support);
4424 4442
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 0d450ae3a2d4..650494d622c1 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -49,6 +49,8 @@ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
49void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); 49void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
50 50
51struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 51struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
52void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
53void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
52void lpfc_cleanup_rpis(struct lpfc_vport *, int); 54void lpfc_cleanup_rpis(struct lpfc_vport *, int);
53int lpfc_linkdown(struct lpfc_hba *); 55int lpfc_linkdown(struct lpfc_hba *);
54void lpfc_linkdown_port(struct lpfc_vport *); 56void lpfc_linkdown_port(struct lpfc_vport *);
@@ -214,7 +216,10 @@ void lpfc_stop_vport_timers(struct lpfc_vport *);
214void lpfc_poll_timeout(unsigned long ptr); 216void lpfc_poll_timeout(unsigned long ptr);
215void lpfc_poll_start_timer(struct lpfc_hba *); 217void lpfc_poll_start_timer(struct lpfc_hba *);
216void lpfc_poll_eratt(unsigned long); 218void lpfc_poll_eratt(unsigned long);
217void lpfc_sli_poll_fcp_ring(struct lpfc_hba *); 219int
220lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
221 struct lpfc_sli_ring *, uint32_t);
222
218struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 223struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
219void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); 224void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
220uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 225uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 489ddcd4c584..fe0a33c9b874 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -173,7 +173,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */ 174 */
175 if ((did == Fabric_DID) && 175 if ((did == Fabric_DID) &&
176 bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) && 176 (phba->hba_flag & HBA_FIP_SUPPORT) &&
177 ((elscmd == ELS_CMD_FLOGI) || 177 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) || 178 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO))) 179 (elscmd == ELS_CMD_LOGO)))
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e8689cabe5f7..20fca3f6d43b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -568,7 +568,7 @@ lpfc_work_done(struct lpfc_hba *phba)
568 status >>= (4*LPFC_ELS_RING); 568 status >>= (4*LPFC_ELS_RING);
569 if ((status & HA_RXMASK) || 569 if ((status & HA_RXMASK) ||
570 (pring->flag & LPFC_DEFERRED_RING_EVENT) || 570 (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
571 (phba->hba_flag & HBA_RECEIVE_BUFFER)) { 571 (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
572 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 572 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
573 pring->flag |= LPFC_DEFERRED_RING_EVENT; 573 pring->flag |= LPFC_DEFERRED_RING_EVENT;
574 /* Set the lpfc data pending flag */ 574 /* Set the lpfc data pending flag */
@@ -706,6 +706,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
706void 706void
707lpfc_port_link_failure(struct lpfc_vport *vport) 707lpfc_port_link_failure(struct lpfc_vport *vport)
708{ 708{
709 /* Cleanup any outstanding received buffers */
710 lpfc_cleanup_rcv_buffers(vport);
711
709 /* Cleanup any outstanding RSCN activity */ 712 /* Cleanup any outstanding RSCN activity */
710 lpfc_els_flush_rscn(vport); 713 lpfc_els_flush_rscn(vport);
711 714
@@ -1282,7 +1285,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1282 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1285 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1283 return 0; 1286 return 0;
1284 1287
1285 if (!phba->cfg_enable_fip) { 1288 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1286 *boot_flag = 0; 1289 *boot_flag = 0;
1287 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1290 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1288 new_fcf_record); 1291 new_fcf_record);
@@ -1997,7 +2000,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1997 * is phase 1 implementation that support FCF index 0 and driver 2000 * is phase 1 implementation that support FCF index 0 and driver
1998 * defaults. 2001 * defaults.
1999 */ 2002 */
2000 if (phba->cfg_enable_fip == 0) { 2003 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
2001 fcf_record = kzalloc(sizeof(struct fcf_record), 2004 fcf_record = kzalloc(sizeof(struct fcf_record),
2002 GFP_KERNEL); 2005 GFP_KERNEL);
2003 if (unlikely(!fcf_record)) { 2006 if (unlikely(!fcf_record)) {
@@ -4442,7 +4445,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4442 */ 4445 */
4443 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || 4446 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4444 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 4447 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4445 (phba->cfg_enable_fip == 0)) { 4448 (!(phba->hba_flag & HBA_FIP_SUPPORT))) {
4446 spin_unlock_irq(&phba->hbalock); 4449 spin_unlock_irq(&phba->hbalock);
4447 return; 4450 return;
4448 } 4451 }
@@ -4615,14 +4618,6 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
4615 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 4618 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4616 return; 4619 return;
4617 4620
4618 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4619 FIPP_MODE_ON)
4620 phba->cfg_enable_fip = 1;
4621
4622 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4623 FIPP_MODE_OFF)
4624 phba->cfg_enable_fip = 0;
4625
4626 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 4621 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4627 phba->valid_vlan = 1; 4622 phba->valid_vlan = 1;
4628 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 4623 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 0c65091110cc..4f03f1d876d0 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1601,6 +1601,11 @@ struct lpfc_mbx_read_rev {
1601#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 1601#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1602#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 1602#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1603#define lpfc_mbx_rd_rev_fcoe_WORD word1 1603#define lpfc_mbx_rd_rev_fcoe_WORD word1
1604#define lpfc_mbx_rd_rev_cee_ver_SHIFT 21
1605#define lpfc_mbx_rd_rev_cee_ver_MASK 0x00000003
1606#define lpfc_mbx_rd_rev_cee_ver_WORD word1
1607#define LPFC_PREDCBX_CEE_MODE 0
1608#define LPFC_DCBX_CEE_MODE 1
1604#define lpfc_mbx_rd_rev_vpd_SHIFT 29 1609#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1605#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 1610#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1606#define lpfc_mbx_rd_rev_vpd_WORD word1 1611#define lpfc_mbx_rd_rev_vpd_WORD word1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 61925836a09e..d7385d258f78 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -853,12 +853,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
853void 853void
854lpfc_hb_timeout_handler(struct lpfc_hba *phba) 854lpfc_hb_timeout_handler(struct lpfc_hba *phba)
855{ 855{
856 struct lpfc_vport **vports;
856 LPFC_MBOXQ_t *pmboxq; 857 LPFC_MBOXQ_t *pmboxq;
857 struct lpfc_dmabuf *buf_ptr; 858 struct lpfc_dmabuf *buf_ptr;
858 int retval; 859 int retval, i;
859 struct lpfc_sli *psli = &phba->sli; 860 struct lpfc_sli *psli = &phba->sli;
860 LIST_HEAD(completions); 861 LIST_HEAD(completions);
861 862
863 vports = lpfc_create_vport_work_array(phba);
864 if (vports != NULL)
865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
866 lpfc_rcv_seq_check_edtov(vports[i]);
867 lpfc_destroy_vport_work_array(phba, vports);
868
862 if ((phba->link_state == LPFC_HBA_ERROR) || 869 if ((phba->link_state == LPFC_HBA_ERROR) ||
863 (phba->pport->load_flag & FC_UNLOADING) || 870 (phba->pport->load_flag & FC_UNLOADING) ||
864 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 871 (phba->pport->fc_flag & FC_OFFLINE_MODE))
@@ -3519,7 +3526,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3519 /* Driver internel slow-path CQ Event pool */ 3526 /* Driver internel slow-path CQ Event pool */
3520 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3527 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3521 /* Response IOCB work queue list */ 3528 /* Response IOCB work queue list */
3522 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3529 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
3523 /* Asynchronous event CQ Event work queue list */ 3530 /* Asynchronous event CQ Event work queue list */
3524 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3531 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3525 /* Fast-path XRI aborted CQ Event work queue list */ 3532 /* Fast-path XRI aborted CQ Event work queue list */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 500a6b6e778e..51c9a1f576f6 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1759,11 +1759,6 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1759 /* Set up host requested features. */ 1759 /* Set up host requested features. */
1760 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); 1760 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1761 1761
1762 if (phba->cfg_enable_fip)
1763 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1764 else
1765 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
1766
1767 /* Enable DIF (block guard) only if configured to do so. */ 1762 /* Enable DIF (block guard) only if configured to do so. */
1768 if (phba->cfg_enable_bg) 1763 if (phba->cfg_enable_bg)
1769 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); 1764 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bcddb6c1a148..f5ab5dd9bbbf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2773,7 +2773,9 @@ void lpfc_poll_timeout(unsigned long ptr)
2773 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2773 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2774 2774
2775 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2775 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2776 lpfc_sli_poll_fcp_ring (phba); 2776 lpfc_sli_handle_fast_ring_event(phba,
2777 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2778
2777 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2779 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2778 lpfc_poll_rearm_timer(phba); 2780 lpfc_poll_rearm_timer(phba);
2779 } 2781 }
@@ -2932,7 +2934,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2932 goto out_host_busy_free_buf; 2934 goto out_host_busy_free_buf;
2933 } 2935 }
2934 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2936 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2935 lpfc_sli_poll_fcp_ring(phba); 2937 spin_unlock(shost->host_lock);
2938 lpfc_sli_handle_fast_ring_event(phba,
2939 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2940
2941 spin_lock(shost->host_lock);
2936 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2942 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2937 lpfc_poll_rearm_timer(phba); 2943 lpfc_poll_rearm_timer(phba);
2938 } 2944 }
@@ -3028,7 +3034,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3028 } 3034 }
3029 3035
3030 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3036 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3031 lpfc_sli_poll_fcp_ring (phba); 3037 lpfc_sli_handle_fast_ring_event(phba,
3038 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3032 3039
3033 lpfc_cmd->waitq = &waitq; 3040 lpfc_cmd->waitq = &waitq;
3034 /* Wait for abort to complete */ 3041 /* Wait for abort to complete */
@@ -3546,7 +3553,8 @@ lpfc_slave_configure(struct scsi_device *sdev)
3546 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3553 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3547 3554
3548 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3555 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3549 lpfc_sli_poll_fcp_ring(phba); 3556 lpfc_sli_handle_fast_ring_event(phba,
3557 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3550 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3558 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3551 lpfc_poll_rearm_timer(phba); 3559 lpfc_poll_rearm_timer(phba);
3552 } 3560 }
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 42d0f1948a7a..c4b19d094d39 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -59,7 +59,9 @@ typedef enum _lpfc_iocb_type {
59static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 59static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
60 uint32_t); 60 uint32_t);
61static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 61static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
62 uint8_t *, uint32_t *); 62 uint8_t *, uint32_t *);
63static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
64 struct lpfc_iocbq *);
63static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 65static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
64 struct hbq_dmabuf *); 66 struct hbq_dmabuf *);
65static IOCB_t * 67static IOCB_t *
@@ -2329,168 +2331,6 @@ void lpfc_poll_eratt(unsigned long ptr)
2329 return; 2331 return;
2330} 2332}
2331 2333
2332/**
2333 * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
2334 * @phba: Pointer to HBA context object.
2335 *
2336 * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
2337 * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
2338 * is enabled.
2339 *
2340 * The caller does not hold any lock.
2341 * The function processes each response iocb in the response ring until it
2342 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2343 * LE bit set. The function will call the completion handler of the command iocb
2344 * if the response iocb indicates a completion for a command iocb or it is
2345 * an abort completion.
2346 **/
2347void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2348{
2349 struct lpfc_sli *psli = &phba->sli;
2350 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
2351 IOCB_t *irsp = NULL;
2352 IOCB_t *entry = NULL;
2353 struct lpfc_iocbq *cmdiocbq = NULL;
2354 struct lpfc_iocbq rspiocbq;
2355 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2356 uint32_t status;
2357 uint32_t portRspPut, portRspMax;
2358 int type;
2359 uint32_t rsp_cmpl = 0;
2360 uint32_t ha_copy;
2361 unsigned long iflags;
2362
2363 pring->stats.iocb_event++;
2364
2365 /*
2366 * The next available response entry should never exceed the maximum
2367 * entries. If it does, treat it as an adapter hardware error.
2368 */
2369 portRspMax = pring->numRiocb;
2370 portRspPut = le32_to_cpu(pgp->rspPutInx);
2371 if (unlikely(portRspPut >= portRspMax)) {
2372 lpfc_sli_rsp_pointers_error(phba, pring);
2373 return;
2374 }
2375
2376 rmb();
2377 while (pring->rspidx != portRspPut) {
2378 entry = lpfc_resp_iocb(phba, pring);
2379 if (++pring->rspidx >= portRspMax)
2380 pring->rspidx = 0;
2381
2382 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2383 (uint32_t *) &rspiocbq.iocb,
2384 phba->iocb_rsp_size);
2385 irsp = &rspiocbq.iocb;
2386 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2387 pring->stats.iocb_rsp++;
2388 rsp_cmpl++;
2389
2390 if (unlikely(irsp->ulpStatus)) {
2391 /* Rsp ring <ringno> error: IOCB */
2392 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2393 "0326 Rsp Ring %d error: IOCB Data: "
2394 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2395 pring->ringno,
2396 irsp->un.ulpWord[0],
2397 irsp->un.ulpWord[1],
2398 irsp->un.ulpWord[2],
2399 irsp->un.ulpWord[3],
2400 irsp->un.ulpWord[4],
2401 irsp->un.ulpWord[5],
2402 *(uint32_t *)&irsp->un1,
2403 *((uint32_t *)&irsp->un1 + 1));
2404 }
2405
2406 switch (type) {
2407 case LPFC_ABORT_IOCB:
2408 case LPFC_SOL_IOCB:
2409 /*
2410 * Idle exchange closed via ABTS from port. No iocb
2411 * resources need to be recovered.
2412 */
2413 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2414 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2415 "0314 IOCB cmd 0x%x "
2416 "processed. Skipping "
2417 "completion",
2418 irsp->ulpCommand);
2419 break;
2420 }
2421
2422 spin_lock_irqsave(&phba->hbalock, iflags);
2423 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2424 &rspiocbq);
2425 spin_unlock_irqrestore(&phba->hbalock, iflags);
2426 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2427 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2428 &rspiocbq);
2429 }
2430 break;
2431 default:
2432 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2433 char adaptermsg[LPFC_MAX_ADPTMSG];
2434 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2435 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2436 MAX_MSG_DATA);
2437 dev_warn(&((phba->pcidev)->dev),
2438 "lpfc%d: %s\n",
2439 phba->brd_no, adaptermsg);
2440 } else {
2441 /* Unknown IOCB command */
2442 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2443 "0321 Unknown IOCB command "
2444 "Data: x%x, x%x x%x x%x x%x\n",
2445 type, irsp->ulpCommand,
2446 irsp->ulpStatus,
2447 irsp->ulpIoTag,
2448 irsp->ulpContext);
2449 }
2450 break;
2451 }
2452
2453 /*
2454 * The response IOCB has been processed. Update the ring
2455 * pointer in SLIM. If the port response put pointer has not
2456 * been updated, sync the pgp->rspPutInx and fetch the new port
2457 * response put pointer.
2458 */
2459 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2460
2461 if (pring->rspidx == portRspPut)
2462 portRspPut = le32_to_cpu(pgp->rspPutInx);
2463 }
2464
2465 ha_copy = readl(phba->HAregaddr);
2466 ha_copy >>= (LPFC_FCP_RING * 4);
2467
2468 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2469 spin_lock_irqsave(&phba->hbalock, iflags);
2470 pring->stats.iocb_rsp_full++;
2471 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
2472 writel(status, phba->CAregaddr);
2473 readl(phba->CAregaddr);
2474 spin_unlock_irqrestore(&phba->hbalock, iflags);
2475 }
2476 if ((ha_copy & HA_R0CE_RSP) &&
2477 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2478 spin_lock_irqsave(&phba->hbalock, iflags);
2479 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2480 pring->stats.iocb_cmd_empty++;
2481
2482 /* Force update of the local copy of cmdGetInx */
2483 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2484 lpfc_sli_resume_iocb(phba, pring);
2485
2486 if ((pring->lpfc_sli_cmd_available))
2487 (pring->lpfc_sli_cmd_available) (phba, pring);
2488
2489 spin_unlock_irqrestore(&phba->hbalock, iflags);
2490 }
2491
2492 return;
2493}
2494 2334
2495/** 2335/**
2496 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2336 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
@@ -2507,9 +2347,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2507 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2347 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2508 * function if this is an unsolicited iocb. 2348 * function if this is an unsolicited iocb.
2509 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2349 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2510 * to check it explicitly. This function always returns 1. 2350 * to check it explicitly.
2511 **/ 2351 */
2512static int 2352int
2513lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2353lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2514 struct lpfc_sli_ring *pring, uint32_t mask) 2354 struct lpfc_sli_ring *pring, uint32_t mask)
2515{ 2355{
@@ -2539,6 +2379,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2539 spin_unlock_irqrestore(&phba->hbalock, iflag); 2379 spin_unlock_irqrestore(&phba->hbalock, iflag);
2540 return 1; 2380 return 1;
2541 } 2381 }
2382 if (phba->fcp_ring_in_use) {
2383 spin_unlock_irqrestore(&phba->hbalock, iflag);
2384 return 1;
2385 } else
2386 phba->fcp_ring_in_use = 1;
2542 2387
2543 rmb(); 2388 rmb();
2544 while (pring->rspidx != portRspPut) { 2389 while (pring->rspidx != portRspPut) {
@@ -2609,10 +2454,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2609 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2454 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2610 &rspiocbq); 2455 &rspiocbq);
2611 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2456 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2612 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2613 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2614 &rspiocbq);
2615 } else {
2616 spin_unlock_irqrestore(&phba->hbalock, 2457 spin_unlock_irqrestore(&phba->hbalock,
2617 iflag); 2458 iflag);
2618 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2459 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
@@ -2620,7 +2461,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2620 spin_lock_irqsave(&phba->hbalock, 2461 spin_lock_irqsave(&phba->hbalock,
2621 iflag); 2462 iflag);
2622 } 2463 }
2623 }
2624 break; 2464 break;
2625 case LPFC_UNSOL_IOCB: 2465 case LPFC_UNSOL_IOCB:
2626 spin_unlock_irqrestore(&phba->hbalock, iflag); 2466 spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -2680,6 +2520,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2680 2520
2681 } 2521 }
2682 2522
2523 phba->fcp_ring_in_use = 0;
2683 spin_unlock_irqrestore(&phba->hbalock, iflag); 2524 spin_unlock_irqrestore(&phba->hbalock, iflag);
2684 return rc; 2525 return rc;
2685} 2526}
@@ -3027,10 +2868,13 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3027 struct lpfc_cq_event *cq_event; 2868 struct lpfc_cq_event *cq_event;
3028 unsigned long iflag; 2869 unsigned long iflag;
3029 2870
3030 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { 2871 spin_lock_irqsave(&phba->hbalock, iflag);
2872 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
2873 spin_unlock_irqrestore(&phba->hbalock, iflag);
2874 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3031 /* Get the response iocb from the head of work queue */ 2875 /* Get the response iocb from the head of work queue */
3032 spin_lock_irqsave(&phba->hbalock, iflag); 2876 spin_lock_irqsave(&phba->hbalock, iflag);
3033 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, 2877 list_remove_head(&phba->sli4_hba.sp_queue_event,
3034 cq_event, struct lpfc_cq_event, list); 2878 cq_event, struct lpfc_cq_event, list);
3035 spin_unlock_irqrestore(&phba->hbalock, iflag); 2879 spin_unlock_irqrestore(&phba->hbalock, iflag);
3036 2880
@@ -3038,7 +2882,12 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3038 case CQE_CODE_COMPL_WQE: 2882 case CQE_CODE_COMPL_WQE:
3039 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 2883 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3040 cq_event); 2884 cq_event);
3041 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); 2885 /* Translate ELS WCQE to response IOCBQ */
2886 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
2887 irspiocbq);
2888 if (irspiocbq)
2889 lpfc_sli_sp_handle_rspiocb(phba, pring,
2890 irspiocbq);
3042 break; 2891 break;
3043 case CQE_CODE_RECEIVE: 2892 case CQE_CODE_RECEIVE:
3044 dmabuf = container_of(cq_event, struct hbq_dmabuf, 2893 dmabuf = container_of(cq_event, struct hbq_dmabuf,
@@ -4368,6 +4217,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4368 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4217 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4369 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4218 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4370 phba->hba_flag |= HBA_FCOE_SUPPORT; 4219 phba->hba_flag |= HBA_FCOE_SUPPORT;
4220
4221 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4222 LPFC_DCBX_CEE_MODE)
4223 phba->hba_flag |= HBA_FIP_SUPPORT;
4224 else
4225 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4226
4371 if (phba->sli_rev != LPFC_SLI_REV4 || 4227 if (phba->sli_rev != LPFC_SLI_REV4 ||
4372 !(phba->hba_flag & HBA_FCOE_SUPPORT)) { 4228 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4373 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4229 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -4541,10 +4397,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4541 rc = -ENODEV; 4397 rc = -ENODEV;
4542 goto out_free_vpd; 4398 goto out_free_vpd;
4543 } 4399 }
4544 if (phba->cfg_enable_fip)
4545 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4546 else
4547 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4548 4400
4549 /* Set up all the queues to the device */ 4401 /* Set up all the queues to the device */
4550 rc = lpfc_sli4_queue_setup(phba); 4402 rc = lpfc_sli4_queue_setup(phba);
@@ -5905,7 +5757,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5905 uint16_t xritag; 5757 uint16_t xritag;
5906 struct ulp_bde64 *bpl = NULL; 5758 struct ulp_bde64 *bpl = NULL;
5907 5759
5908 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); 5760 fip = phba->hba_flag & HBA_FIP_SUPPORT;
5909 /* The fcp commands will set command type */ 5761 /* The fcp commands will set command type */
5910 if (iocbq->iocb_flag & LPFC_IO_FCP) 5762 if (iocbq->iocb_flag & LPFC_IO_FCP)
5911 command_type = FCP_COMMAND; 5763 command_type = FCP_COMMAND;
@@ -7046,8 +6898,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7046 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 6898 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
7047 6899
7048 spin_lock_irq(&phba->hbalock); 6900 spin_lock_irq(&phba->hbalock);
7049 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 6901 if (phba->sli_rev < LPFC_SLI_REV4) {
7050 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 6902 if (abort_iotag != 0 &&
6903 abort_iotag <= phba->sli.last_iotag)
6904 abort_iocb =
6905 phba->sli.iocbq_lookup[abort_iotag];
6906 } else
6907 /* For sli4 the abort_tag is the XRI,
6908 * so the abort routine puts the iotag of the iocb
6909 * being aborted in the context field of the abort
6910 * IOCB.
6911 */
6912 abort_iocb = phba->sli.iocbq_lookup[abort_context];
7051 6913
7052 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 6914 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
7053 "0327 Cannot abort els iocb %p " 6915 "0327 Cannot abort els iocb %p "
@@ -7061,9 +6923,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7061 * might have completed already. Do not free it again. 6923 * might have completed already. Do not free it again.
7062 */ 6924 */
7063 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 6925 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
7064 spin_unlock_irq(&phba->hbalock); 6926 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
7065 lpfc_sli_release_iocbq(phba, cmdiocb); 6927 spin_unlock_irq(&phba->hbalock);
7066 return; 6928 lpfc_sli_release_iocbq(phba, cmdiocb);
6929 return;
6930 }
6931 /* For SLI4 the ulpContext field for abort IOCB
6932 * holds the iotag of the IOCB being aborted so
6933 * the local abort_context needs to be reset to
6934 * match the aborted IOCBs ulpContext.
6935 */
6936 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
6937 abort_context = abort_iocb->iocb.ulpContext;
7067 } 6938 }
7068 /* 6939 /*
7069 * make sure we have the right iocbq before taking it 6940 * make sure we have the right iocbq before taking it
@@ -7182,8 +7053,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7182 iabt = &abtsiocbp->iocb; 7053 iabt = &abtsiocbp->iocb;
7183 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7054 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
7184 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7055 iabt->un.acxri.abortContextTag = icmd->ulpContext;
7185 if (phba->sli_rev == LPFC_SLI_REV4) 7056 if (phba->sli_rev == LPFC_SLI_REV4) {
7186 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 7057 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7058 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
7059 }
7187 else 7060 else
7188 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7061 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
7189 iabt->ulpLe = 1; 7062 iabt->ulpLe = 1;
@@ -8421,7 +8294,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8421 8294
8422 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8295 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8423 sizeof(struct lpfc_iocbq) - offset); 8296 sizeof(struct lpfc_iocbq) - offset);
8424 pIocbIn->cq_event.cqe.wcqe_cmpl = *wcqe;
8425 /* Map WCQE parameters into irspiocb parameters */ 8297 /* Map WCQE parameters into irspiocb parameters */
8426 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 8298 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8427 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 8299 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
@@ -8436,6 +8308,49 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8436} 8308}
8437 8309
8438/** 8310/**
8311 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
8312 * @phba: Pointer to HBA context object.
8313 * @wcqe: Pointer to work-queue completion queue entry.
8314 *
8315 * This routine handles an ELS work-queue completion event and construct
8316 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
8317 * discovery engine to handle.
8318 *
8319 * Return: Pointer to the receive IOCBQ, NULL otherwise.
8320 **/
8321static struct lpfc_iocbq *
8322lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
8323 struct lpfc_iocbq *irspiocbq)
8324{
8325 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8326 struct lpfc_iocbq *cmdiocbq;
8327 struct lpfc_wcqe_complete *wcqe;
8328 unsigned long iflags;
8329
8330 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
8331 spin_lock_irqsave(&phba->hbalock, iflags);
8332 pring->stats.iocb_event++;
8333 /* Look up the ELS command IOCB and create pseudo response IOCB */
8334 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8335 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8336 spin_unlock_irqrestore(&phba->hbalock, iflags);
8337
8338 if (unlikely(!cmdiocbq)) {
8339 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8340 "0386 ELS complete with no corresponding "
8341 "cmdiocb: iotag (%d)\n",
8342 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8343 lpfc_sli_release_iocbq(phba, irspiocbq);
8344 return NULL;
8345 }
8346
8347 /* Fake the irspiocbq and copy necessary response information */
8348 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8349
8350 return irspiocbq;
8351}
8352
8353/**
8439 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 8354 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8440 * @phba: Pointer to HBA context object. 8355 * @phba: Pointer to HBA context object.
8441 * @cqe: Pointer to mailbox completion queue entry. 8356 * @cqe: Pointer to mailbox completion queue entry.
@@ -8625,46 +8540,26 @@ static bool
8625lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 8540lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8626 struct lpfc_wcqe_complete *wcqe) 8541 struct lpfc_wcqe_complete *wcqe)
8627{ 8542{
8628 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8629 struct lpfc_iocbq *cmdiocbq;
8630 struct lpfc_iocbq *irspiocbq; 8543 struct lpfc_iocbq *irspiocbq;
8631 unsigned long iflags; 8544 unsigned long iflags;
8632 bool workposted = false;
8633 8545
8634 spin_lock_irqsave(&phba->hbalock, iflags); 8546 /* Get an irspiocbq for later ELS response processing use */
8635 pring->stats.iocb_event++;
8636 /* Look up the ELS command IOCB and create pseudo response IOCB */
8637 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8638 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8639 spin_unlock_irqrestore(&phba->hbalock, iflags);
8640
8641 if (unlikely(!cmdiocbq)) {
8642 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8643 "0386 ELS complete with no corresponding "
8644 "cmdiocb: iotag (%d)\n",
8645 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8646 return workposted;
8647 }
8648
8649 /* Fake the irspiocbq and copy necessary response information */
8650 irspiocbq = lpfc_sli_get_iocbq(phba); 8547 irspiocbq = lpfc_sli_get_iocbq(phba);
8651 if (!irspiocbq) { 8548 if (!irspiocbq) {
8652 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8549 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8653 "0387 Failed to allocate an iocbq\n"); 8550 "0387 Failed to allocate an iocbq\n");
8654 return workposted; 8551 return false;
8655 } 8552 }
8656 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8657 8553
8658 /* Add the irspiocb to the response IOCB work list */ 8554 /* Save off the slow-path queue event for work thread to process */
8555 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
8659 spin_lock_irqsave(&phba->hbalock, iflags); 8556 spin_lock_irqsave(&phba->hbalock, iflags);
8660 list_add_tail(&irspiocbq->cq_event.list, 8557 list_add_tail(&irspiocbq->cq_event.list,
8661 &phba->sli4_hba.sp_rspiocb_work_queue); 8558 &phba->sli4_hba.sp_queue_event);
8662 /* Indicate ELS ring attention */ 8559 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8663 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8664 spin_unlock_irqrestore(&phba->hbalock, iflags); 8560 spin_unlock_irqrestore(&phba->hbalock, iflags);
8665 workposted = true;
8666 8561
8667 return workposted; 8562 return true;
8668} 8563}
8669 8564
8670/** 8565/**
@@ -8769,8 +8664,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8769 unsigned long iflags; 8664 unsigned long iflags;
8770 8665
8771 lpfc_sli4_rq_release(hrq, drq); 8666 lpfc_sli4_rq_release(hrq, drq);
8772 if (bf_get(lpfc_rcqe_code, rcqe) != CQE_CODE_RECEIVE)
8773 goto out;
8774 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) 8667 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
8775 goto out; 8668 goto out;
8776 8669
@@ -8789,9 +8682,9 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8789 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 8682 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
8790 /* save off the frame for the word thread to process */ 8683 /* save off the frame for the word thread to process */
8791 list_add_tail(&dma_buf->cq_event.list, 8684 list_add_tail(&dma_buf->cq_event.list,
8792 &phba->sli4_hba.sp_rspiocb_work_queue); 8685 &phba->sli4_hba.sp_queue_event);
8793 /* Frame received */ 8686 /* Frame received */
8794 phba->hba_flag |= HBA_RECEIVE_BUFFER; 8687 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8795 spin_unlock_irqrestore(&phba->hbalock, iflags); 8688 spin_unlock_irqrestore(&phba->hbalock, iflags);
8796 workposted = true; 8689 workposted = true;
8797 break; 8690 break;
@@ -8806,7 +8699,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8806 } 8699 }
8807out: 8700out:
8808 return workposted; 8701 return workposted;
8809
8810} 8702}
8811 8703
8812/** 8704/**
@@ -8824,38 +8716,38 @@ static bool
8824lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 8716lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8825 struct lpfc_cqe *cqe) 8717 struct lpfc_cqe *cqe)
8826{ 8718{
8827 struct lpfc_wcqe_complete wcqe; 8719 struct lpfc_cqe cqevt;
8828 bool workposted = false; 8720 bool workposted = false;
8829 8721
8830 /* Copy the work queue CQE and convert endian order if needed */ 8722 /* Copy the work queue CQE and convert endian order if needed */
8831 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 8723 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
8832 8724
8833 /* Check and process for different type of WCQE and dispatch */ 8725 /* Check and process for different type of WCQE and dispatch */
8834 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 8726 switch (bf_get(lpfc_cqe_code, &cqevt)) {
8835 case CQE_CODE_COMPL_WQE: 8727 case CQE_CODE_COMPL_WQE:
8836 /* Process the WQ complete event */ 8728 /* Process the WQ/RQ complete event */
8837 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 8729 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8838 (struct lpfc_wcqe_complete *)&wcqe); 8730 (struct lpfc_wcqe_complete *)&cqevt);
8839 break; 8731 break;
8840 case CQE_CODE_RELEASE_WQE: 8732 case CQE_CODE_RELEASE_WQE:
8841 /* Process the WQ release event */ 8733 /* Process the WQ release event */
8842 lpfc_sli4_sp_handle_rel_wcqe(phba, 8734 lpfc_sli4_sp_handle_rel_wcqe(phba,
8843 (struct lpfc_wcqe_release *)&wcqe); 8735 (struct lpfc_wcqe_release *)&cqevt);
8844 break; 8736 break;
8845 case CQE_CODE_XRI_ABORTED: 8737 case CQE_CODE_XRI_ABORTED:
8846 /* Process the WQ XRI abort event */ 8738 /* Process the WQ XRI abort event */
8847 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 8739 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8848 (struct sli4_wcqe_xri_aborted *)&wcqe); 8740 (struct sli4_wcqe_xri_aborted *)&cqevt);
8849 break; 8741 break;
8850 case CQE_CODE_RECEIVE: 8742 case CQE_CODE_RECEIVE:
8851 /* Process the RQ event */ 8743 /* Process the RQ event */
8852 workposted = lpfc_sli4_sp_handle_rcqe(phba, 8744 workposted = lpfc_sli4_sp_handle_rcqe(phba,
8853 (struct lpfc_rcqe *)&wcqe); 8745 (struct lpfc_rcqe *)&cqevt);
8854 break; 8746 break;
8855 default: 8747 default:
8856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8857 "0388 Not a valid WCQE code: x%x\n", 8749 "0388 Not a valid WCQE code: x%x\n",
8858 bf_get(lpfc_wcqe_c_code, &wcqe)); 8750 bf_get(lpfc_cqe_code, &cqevt));
8859 break; 8751 break;
8860 } 8752 }
8861 return workposted; 8753 return workposted;
@@ -10841,6 +10733,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10841} 10733}
10842 10734
10843/** 10735/**
10736 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
10737 * @vport: The vport to work on.
10738 *
10739 * This function updates the receive sequence time stamp for this vport. The
10740 * receive sequence time stamp indicates the time that the last frame of the
10741 * the sequence that has been idle for the longest amount of time was received.
10742 * the driver uses this time stamp to indicate if any received sequences have
10743 * timed out.
10744 **/
10745void
10746lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
10747{
10748 struct lpfc_dmabuf *h_buf;
10749 struct hbq_dmabuf *dmabuf = NULL;
10750
10751 /* get the oldest sequence on the rcv list */
10752 h_buf = list_get_first(&vport->rcv_buffer_list,
10753 struct lpfc_dmabuf, list);
10754 if (!h_buf)
10755 return;
10756 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10757 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
10758}
10759
10760/**
10761 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
10762 * @vport: The vport that the received sequences were sent to.
10763 *
10764 * This function cleans up all outstanding received sequences. This is called
10765 * by the driver when a link event or user action invalidates all the received
10766 * sequences.
10767 **/
10768void
10769lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
10770{
10771 struct lpfc_dmabuf *h_buf, *hnext;
10772 struct lpfc_dmabuf *d_buf, *dnext;
10773 struct hbq_dmabuf *dmabuf = NULL;
10774
10775 /* start with the oldest sequence on the rcv list */
10776 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10777 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10778 list_del_init(&dmabuf->hbuf.list);
10779 list_for_each_entry_safe(d_buf, dnext,
10780 &dmabuf->dbuf.list, list) {
10781 list_del_init(&d_buf->list);
10782 lpfc_in_buf_free(vport->phba, d_buf);
10783 }
10784 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10785 }
10786}
10787
10788/**
10789 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
10790 * @vport: The vport that the received sequences were sent to.
10791 *
10792 * This function determines whether any received sequences have timed out by
10793 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
10794 * indicates that there is at least one timed out sequence this routine will
10795 * go through the received sequences one at a time from most inactive to most
10796 * active to determine which ones need to be cleaned up. Once it has determined
10797 * that a sequence needs to be cleaned up it will simply free up the resources
10798 * without sending an abort.
10799 **/
10800void
10801lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
10802{
10803 struct lpfc_dmabuf *h_buf, *hnext;
10804 struct lpfc_dmabuf *d_buf, *dnext;
10805 struct hbq_dmabuf *dmabuf = NULL;
10806 unsigned long timeout;
10807 int abort_count = 0;
10808
10809 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
10810 vport->rcv_buffer_time_stamp);
10811 if (list_empty(&vport->rcv_buffer_list) ||
10812 time_before(jiffies, timeout))
10813 return;
10814 /* start with the oldest sequence on the rcv list */
10815 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10816 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10817 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
10818 dmabuf->time_stamp);
10819 if (time_before(jiffies, timeout))
10820 break;
10821 abort_count++;
10822 list_del_init(&dmabuf->hbuf.list);
10823 list_for_each_entry_safe(d_buf, dnext,
10824 &dmabuf->dbuf.list, list) {
10825 list_del_init(&d_buf->list);
10826 lpfc_in_buf_free(vport->phba, d_buf);
10827 }
10828 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10829 }
10830 if (abort_count)
10831 lpfc_update_rcv_time_stamp(vport);
10832}
10833
10834/**
10844 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 10835 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10845 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 10836 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10846 * 10837 *
@@ -10863,6 +10854,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10863 struct hbq_dmabuf *temp_dmabuf = NULL; 10854 struct hbq_dmabuf *temp_dmabuf = NULL;
10864 10855
10865 INIT_LIST_HEAD(&dmabuf->dbuf.list); 10856 INIT_LIST_HEAD(&dmabuf->dbuf.list);
10857 dmabuf->time_stamp = jiffies;
10866 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 10858 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10867 /* Use the hdr_buf to find the sequence that this frame belongs to */ 10859 /* Use the hdr_buf to find the sequence that this frame belongs to */
10868 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 10860 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
@@ -10881,6 +10873,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10881 * Queue the buffer on the vport's rcv_buffer_list. 10873 * Queue the buffer on the vport's rcv_buffer_list.
10882 */ 10874 */
10883 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 10875 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10876 lpfc_update_rcv_time_stamp(vport);
10884 return dmabuf; 10877 return dmabuf;
10885 } 10878 }
10886 temp_hdr = seq_dmabuf->hbuf.virt; 10879 temp_hdr = seq_dmabuf->hbuf.virt;
@@ -10888,8 +10881,13 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10888 list_del_init(&seq_dmabuf->hbuf.list); 10881 list_del_init(&seq_dmabuf->hbuf.list);
10889 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 10882 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10890 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 10883 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
10884 lpfc_update_rcv_time_stamp(vport);
10891 return dmabuf; 10885 return dmabuf;
10892 } 10886 }
10887 /* move this sequence to the tail to indicate a young sequence */
10888 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
10889 seq_dmabuf->time_stamp = jiffies;
10890 lpfc_update_rcv_time_stamp(vport);
10893 /* find the correct place in the sequence to insert this frame */ 10891 /* find the correct place in the sequence to insert this frame */
10894 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 10892 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10895 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 10893 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -11148,6 +11146,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11148 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11146 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11149 /* remove from receive buffer list */ 11147 /* remove from receive buffer list */
11150 list_del_init(&seq_dmabuf->hbuf.list); 11148 list_del_init(&seq_dmabuf->hbuf.list);
11149 lpfc_update_rcv_time_stamp(vport);
11151 /* get the Remote Port's SID */ 11150 /* get the Remote Port's SID */
11152 sid = sli4_sid_from_fc_hdr(fc_hdr); 11151 sid = sli4_sid_from_fc_hdr(fc_hdr);
11153 /* Get an iocbq struct to fill in. */ 11152 /* Get an iocbq struct to fill in. */
@@ -11274,11 +11273,6 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11274 struct lpfc_vport *vport; 11273 struct lpfc_vport *vport;
11275 uint32_t fcfi; 11274 uint32_t fcfi;
11276 11275
11277 /* Clear hba flag and get all received buffers into the cmplq */
11278 spin_lock_irq(&phba->hbalock);
11279 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11280 spin_unlock_irq(&phba->hbalock);
11281
11282 /* Process each received buffer */ 11276 /* Process each received buffer */
11283 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11277 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11284 /* check to see if this a valid type of frame */ 11278 /* check to see if this a valid type of frame */
@@ -11309,9 +11303,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11309 /* If not last frame in sequence continue processing frames. */ 11303 /* If not last frame in sequence continue processing frames. */
11310 if (!lpfc_seq_complete(seq_dmabuf)) { 11304 if (!lpfc_seq_complete(seq_dmabuf)) {
11311 /* 11305 /*
11312 * When saving off frames post a new one and mark this 11306 * When saving off frames post a new one and mark this
11313 * frame to be freed when it is finished. 11307 * frame to be freed when it is finished.
11314 **/ 11308 **/
11315 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); 11309 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11316 dmabuf->tag = -1; 11310 dmabuf->tag = -1;
11317 return; 11311 return;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 0e518b12f414..7b12663909a7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -49,6 +49,7 @@ struct lpfc_iocbq {
49 struct list_head clist; 49 struct list_head clist;
50 uint16_t iotag; /* pre-assigned IO tag */ 50 uint16_t iotag; /* pre-assigned IO tag */
51 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 51 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
52 struct lpfc_cq_event cq_event;
52 53
53 IOCB_t iocb; /* IOCB cmd */ 54 IOCB_t iocb; /* IOCB cmd */
54 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 55 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -79,7 +80,6 @@ struct lpfc_iocbq {
79 struct lpfc_iocbq *); 80 struct lpfc_iocbq *);
80 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 81 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
81 struct lpfc_iocbq *); 82 struct lpfc_iocbq *);
82 struct lpfc_cq_event cq_event;
83}; 83};
84 84
85#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 85#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1f6cb01e6c6b..4a9cf674555e 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -352,7 +352,7 @@ struct lpfc_sli4_hba {
352 unsigned long *rpi_bmask; 352 unsigned long *rpi_bmask;
353 uint16_t rpi_count; 353 uint16_t rpi_count;
354 struct lpfc_sli4_flags sli4_flags; 354 struct lpfc_sli4_flags sli4_flags;
355 struct list_head sp_rspiocb_work_queue; 355 struct list_head sp_queue_event;
356 struct list_head sp_cqe_event_pool; 356 struct list_head sp_cqe_event_pool;
357 struct list_head sp_asynce_work_queue; 357 struct list_head sp_asynce_work_queue;
358 struct list_head sp_fcp_xri_aborted_work_queue; 358 struct list_head sp_fcp_xri_aborted_work_queue;