aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:15 -0400
committerJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:59 -0400
commit07f9479a40cc778bc1462ada11f95b01360ae4ff (patch)
tree0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /drivers/scsi/lpfc
parent9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff)
parentcd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff)
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/Makefile8
-rw-r--r--drivers/scsi/lpfc/lpfc.h27
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h113
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c539
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
19 files changed, 623 insertions, 296 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index ad05d6edb8f6..88928f00aa2d 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
1#/******************************************************************* 1#/*******************************************************************
2# * This file is part of the Emulex Linux Device Driver for * 2# * This file is part of the Emulex Linux Device Driver for *
3# * Fibre Channel Host Bus Adapters. * 3# * Fibre Channel Host Bus Adapters. *
4# * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4# * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5# * EMULEX and SLI are trademarks of Emulex. * 5# * EMULEX and SLI are trademarks of Emulex. *
6# * www.emulex.com * 6# * www.emulex.com *
7# * * 7# * *
@@ -19,10 +19,8 @@
19# *******************************************************************/ 19# *******************************************************************/
20###################################################################### 20######################################################################
21 21
22ifneq ($(GCOV),) 22ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
23 EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage 23ccflags-$(GCOV) += -O0
24 EXTRA_CFLAGS += -O0
25endif
26 24
27obj-$(CONFIG_SCSI_LPFC) := lpfc.o 25obj-$(CONFIG_SCSI_LPFC) := lpfc.o
28 26
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b64c6da870d3..60e98a62f308 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -539,6 +539,8 @@ struct lpfc_hba {
539 (struct lpfc_hba *, uint32_t); 539 (struct lpfc_hba *, uint32_t);
540 int (*lpfc_hba_down_link) 540 int (*lpfc_hba_down_link)
541 (struct lpfc_hba *, uint32_t); 541 (struct lpfc_hba *, uint32_t);
542 int (*lpfc_selective_reset)
543 (struct lpfc_hba *);
542 544
543 /* SLI4 specific HBA data structure */ 545 /* SLI4 specific HBA data structure */
544 struct lpfc_sli4_hba sli4_hba; 546 struct lpfc_sli4_hba sli4_hba;
@@ -895,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
895 return; 897 return;
896} 898}
897 899
898static inline void 900static inline int
901lpfc_readl(void __iomem *addr, uint32_t *data)
902{
903 uint32_t temp;
904 temp = readl(addr);
905 if (temp == 0xffffffff)
906 return -EIO;
907 *data = temp;
908 return 0;
909}
910
911static inline int
899lpfc_sli_read_hs(struct lpfc_hba *phba) 912lpfc_sli_read_hs(struct lpfc_hba *phba)
900{ 913{
901 /* 914 /*
@@ -904,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
904 */ 917 */
905 phba->sli.slistat.err_attn_event++; 918 phba->sli.slistat.err_attn_event++;
906 919
907 /* Save status info */ 920 /* Save status info and check for unplug error */
908 phba->work_hs = readl(phba->HSregaddr); 921 if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
909 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 922 lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
910 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 923 lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
924 return -EIO;
925 }
911 926
912 /* Clear chip Host Attention error bit */ 927 /* Clear chip Host Attention error bit */
913 writel(HA_ERATT, phba->HAregaddr); 928 writel(HA_ERATT, phba->HAregaddr);
914 readl(phba->HAregaddr); /* flush */ 929 readl(phba->HAregaddr); /* flush */
915 phba->pport->stopped = 1; 930 phba->pport->stopped = 1;
916 931
917 return; 932 return 0;
918} 933}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e7c020df12fa..17d789325f40 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -685,7 +685,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
685 * -EIO reset not configured or error posting the event 685 * -EIO reset not configured or error posting the event
686 * zero for success 686 * zero for success
687 **/ 687 **/
688static int 688int
689lpfc_selective_reset(struct lpfc_hba *phba) 689lpfc_selective_reset(struct lpfc_hba *phba)
690{ 690{
691 struct completion online_compl; 691 struct completion online_compl;
@@ -746,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
746 int status = -EINVAL; 746 int status = -EINVAL;
747 747
748 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 748 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
749 status = lpfc_selective_reset(phba); 749 status = phba->lpfc_selective_reset(phba);
750 750
751 if (status == 0) 751 if (status == 0)
752 return strlen(buf); 752 return strlen(buf);
@@ -1224,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1224 if (val & ENABLE_FCP_RING_POLLING) { 1224 if (val & ENABLE_FCP_RING_POLLING) {
1225 if ((val & DISABLE_FCP_RING_INT) && 1225 if ((val & DISABLE_FCP_RING_INT) &&
1226 !(old_val & DISABLE_FCP_RING_INT)) { 1226 !(old_val & DISABLE_FCP_RING_INT)) {
1227 creg_val = readl(phba->HCregaddr); 1227 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1228 spin_unlock_irq(&phba->hbalock);
1229 return -EINVAL;
1230 }
1228 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 1231 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
1229 writel(creg_val, phba->HCregaddr); 1232 writel(creg_val, phba->HCregaddr);
1230 readl(phba->HCregaddr); /* flush */ 1233 readl(phba->HCregaddr); /* flush */
@@ -1242,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1242 spin_unlock_irq(&phba->hbalock); 1245 spin_unlock_irq(&phba->hbalock);
1243 del_timer(&phba->fcp_poll_timer); 1246 del_timer(&phba->fcp_poll_timer);
1244 spin_lock_irq(&phba->hbalock); 1247 spin_lock_irq(&phba->hbalock);
1245 creg_val = readl(phba->HCregaddr); 1248 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1249 spin_unlock_irq(&phba->hbalock);
1250 return -EINVAL;
1251 }
1246 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1252 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1247 writel(creg_val, phba->HCregaddr); 1253 writel(creg_val, phba->HCregaddr);
1248 readl(phba->HCregaddr); /* flush */ 1254 readl(phba->HCregaddr); /* flush */
@@ -4509,7 +4515,7 @@ static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
4509 * Description: 4515 * Description:
4510 * This function is called by the transport after the @fc_vport's symbolic name 4516 * This function is called by the transport after the @fc_vport's symbolic name
4511 * has been changed. This function re-registers the symbolic name with the 4517 * has been changed. This function re-registers the symbolic name with the
4512 * switch to propogate the change into the fabric if the vport is active. 4518 * switch to propagate the change into the fabric if the vport is active.
4513 **/ 4519 **/
4514static void 4520static void
4515lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) 4521lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 04fef038b1ff..3811ea9ce8e4 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2010 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
348 dd_data->context_un.iocb.bmp = bmp; 348 dd_data->context_un.iocb.bmp = bmp;
349 349
350 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 350 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
351 creg_val = readl(phba->HCregaddr); 351 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
352 rc = -EIO ;
353 goto free_cmdiocbq;
354 }
352 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 355 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
353 writel(creg_val, phba->HCregaddr); 356 writel(creg_val, phba->HCregaddr);
354 readl(phba->HCregaddr); /* flush */ 357 readl(phba->HCregaddr); /* flush */
@@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
599 dd_data->context_un.iocb.ndlp = ndlp; 602 dd_data->context_un.iocb.ndlp = ndlp;
600 603
601 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 604 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
602 creg_val = readl(phba->HCregaddr); 605 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
606 rc = -EIO;
607 goto linkdown_err;
608 }
603 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 609 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
604 writel(creg_val, phba->HCregaddr); 610 writel(creg_val, phba->HCregaddr);
605 readl(phba->HCregaddr); /* flush */ 611 readl(phba->HCregaddr); /* flush */
@@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
613 else 619 else
614 rc = -EIO; 620 rc = -EIO;
615 621
622linkdown_err:
616 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 623 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
617 job->request_payload.sg_cnt, DMA_TO_DEVICE); 624 job->request_payload.sg_cnt, DMA_TO_DEVICE);
618 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 625 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
@@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1357 dd_data->context_un.iocb.ndlp = ndlp; 1364 dd_data->context_un.iocb.ndlp = ndlp;
1358 1365
1359 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1366 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1360 creg_val = readl(phba->HCregaddr); 1367 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1368 rc = -IOCB_ERROR;
1369 goto issue_ct_rsp_exit;
1370 }
1361 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1371 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1362 writel(creg_val, phba->HCregaddr); 1372 writel(creg_val, phba->HCregaddr);
1363 readl(phba->HCregaddr); /* flush */ 1373 readl(phba->HCregaddr); /* flush */
@@ -1929,7 +1939,7 @@ out:
1929 * @rxxri: Receive exchange id 1939 * @rxxri: Receive exchange id
1930 * @len: Number of data bytes 1940 * @len: Number of data bytes
1931 * 1941 *
1932 * This function allocates and posts a data buffer of sufficient size to recieve 1942 * This function allocates and posts a data buffer of sufficient size to receive
1933 * an unsolicted CT command. 1943 * an unsolicted CT command.
1934 **/ 1944 **/
1935static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 1945static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
@@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2479 2489
2480 from = (uint8_t *)dd_data->context_un.mbox.mb; 2490 from = (uint8_t *)dd_data->context_un.mbox.mb;
2481 job = dd_data->context_un.mbox.set_job; 2491 job = dd_data->context_un.mbox.set_job;
2482 size = job->reply_payload.payload_len; 2492 if (job) {
2483 job->reply->reply_payload_rcv_len = 2493 size = job->reply_payload.payload_len;
2484 sg_copy_from_buffer(job->reply_payload.sg_list, 2494 job->reply->reply_payload_rcv_len =
2485 job->reply_payload.sg_cnt, 2495 sg_copy_from_buffer(job->reply_payload.sg_list,
2486 from, size); 2496 job->reply_payload.sg_cnt,
2487 job->reply->result = 0; 2497 from, size);
2498 job->reply->result = 0;
2488 2499
2500 job->dd_data = NULL;
2501 job->job_done(job);
2502 }
2489 dd_data->context_un.mbox.set_job = NULL; 2503 dd_data->context_un.mbox.set_job = NULL;
2490 job->dd_data = NULL;
2491 job->job_done(job);
2492 /* need to hold the lock until we call job done to hold off 2504 /* need to hold the lock until we call job done to hold off
2493 * the timeout handler returning to the midlayer while 2505 * the timeout handler returning to the midlayer while
2494 * we are stillprocessing the job 2506 * we are stillprocessing the job
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 3d40023f4804..f0b332f4eedb 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -254,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
254void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 254void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
255 uint32_t); 255 uint32_t);
256void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); 256void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
257 257int lpfc_selective_reset(struct lpfc_hba *);
258void lpfc_reset_barrier(struct lpfc_hba * phba); 258void lpfc_reset_barrier(struct lpfc_hba *);
259int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 259int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
260int lpfc_sli_brdkill(struct lpfc_hba *); 260int lpfc_sli_brdkill(struct lpfc_hba *);
261int lpfc_sli_brdreset(struct lpfc_hba *); 261int lpfc_sli_brdreset(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a753581509d6..3d967741c708 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -908,7 +908,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
908 if (!debug) 908 if (!debug)
909 goto out; 909 goto out;
910 910
911 /* Round to page boundry */ 911 /* Round to page boundary */
912 printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n", 912 printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
913 __func__, _dump_buf_data); 913 __func__, _dump_buf_data);
914 debug->buffer = _dump_buf_data; 914 debug->buffer = _dump_buf_data;
@@ -938,7 +938,7 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
938 if (!debug) 938 if (!debug)
939 goto out; 939 goto out;
940 940
941 /* Round to page boundry */ 941 /* Round to page boundary */
942 printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n", 942 printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n",
943 __func__, _dump_buf_dif, file->f_dentry->d_name.name); 943 __func__, _dump_buf_dif, file->f_dentry->d_name.name);
944 debug->buffer = _dump_buf_dif; 944 debug->buffer = _dump_buf_dif;
@@ -2158,7 +2158,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2158 debugfs_create_dir(name, phba->hba_debugfs_root); 2158 debugfs_create_dir(name, phba->hba_debugfs_root);
2159 if (!vport->vport_debugfs_root) { 2159 if (!vport->vport_debugfs_root) {
2160 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2160 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2161 "0417 Cant create debugfs\n"); 2161 "0417 Can't create debugfs\n");
2162 goto debug_failed; 2162 goto debug_failed;
2163 } 2163 }
2164 atomic_inc(&phba->debugfs_vport_count); 2164 atomic_inc(&phba->debugfs_vport_count);
@@ -2211,7 +2211,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2211 vport, &lpfc_debugfs_op_nodelist); 2211 vport, &lpfc_debugfs_op_nodelist);
2212 if (!vport->debug_nodelist) { 2212 if (!vport->debug_nodelist) {
2213 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2213 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2214 "0409 Cant create debugfs nodelist\n"); 2214 "0409 Can't create debugfs nodelist\n");
2215 goto debug_failed; 2215 goto debug_failed;
2216 } 2216 }
2217 2217
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8e28edf9801e..d34b69f9cdb1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
89 return 0; 89 return 0;
90 90
91 /* Read the HBA Host Attention Register */ 91 /* Read the HBA Host Attention Register */
92 ha_copy = readl(phba->HAregaddr); 92 if (lpfc_readl(phba->HAregaddr, &ha_copy))
93 return 1;
93 94
94 if (!(ha_copy & HA_LATT)) 95 if (!(ha_copy & HA_LATT))
95 return 0; 96 return 0;
@@ -101,7 +102,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
101 phba->pport->port_state); 102 phba->pport->port_state);
102 103
103 /* CLEAR_LA should re-enable link attention events and 104 /* CLEAR_LA should re-enable link attention events and
104 * we should then imediately take a LATT event. The 105 * we should then immediately take a LATT event. The
105 * LATT processing should call lpfc_linkdown() which 106 * LATT processing should call lpfc_linkdown() which
106 * will cleanup any left over in-progress discovery 107 * will cleanup any left over in-progress discovery
107 * events. 108 * events.
@@ -1598,7 +1599,7 @@ out:
1598 * This routine is the completion callback function for issuing the Port 1599 * This routine is the completion callback function for issuing the Port
1599 * Login (PLOGI) command. For PLOGI completion, there must be an active 1600 * Login (PLOGI) command. For PLOGI completion, there must be an active
1600 * ndlp on the vport node list that matches the remote node ID from the 1601 * ndlp on the vport node list that matches the remote node ID from the
1601 * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply 1602 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1602 * ignored and command IOCB released. The PLOGI response IOCB status is 1603 * ignored and command IOCB released. The PLOGI response IOCB status is
1603 * checked for error conditons. If there is error status reported, PLOGI 1604 * checked for error conditons. If there is error status reported, PLOGI
1604 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1605 * retry shall be attempted by invoking the lpfc_els_retry() routine.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 154c715fb3af..301498301a8f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -739,7 +739,7 @@ lpfc_do_work(void *p)
739 739
740/* 740/*
741 * This is only called to handle FC worker events. Since this a rare 741 * This is only called to handle FC worker events. Since this a rare
742 * occurance, we allocate a struct lpfc_work_evt structure here instead of 742 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
743 * embedding it in the IOCB. 743 * embedding it in the IOCB.
744 */ 744 */
745int 745int
@@ -1348,7 +1348,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1348 int rc; 1348 int rc;
1349 1349
1350 spin_lock_irq(&phba->hbalock); 1350 spin_lock_irq(&phba->hbalock);
1351 /* If the FCF is not availabe do nothing. */ 1351 /* If the FCF is not available do nothing. */
1352 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1352 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1353 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1353 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1354 spin_unlock_irq(&phba->hbalock); 1354 spin_unlock_irq(&phba->hbalock);
@@ -1538,7 +1538,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1538 1538
1539 /* 1539 /*
1540 * If user did not specify any addressing mode, or if the 1540 * If user did not specify any addressing mode, or if the
1541 * prefered addressing mode specified by user is not supported 1541 * preferred addressing mode specified by user is not supported
1542 * by FCF, allow fabric to pick the addressing mode. 1542 * by FCF, allow fabric to pick the addressing mode.
1543 */ 1543 */
1544 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1544 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
@@ -1553,7 +1553,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1553 FCFCNCT_AM_SPMA) ? 1553 FCFCNCT_AM_SPMA) ?
1554 LPFC_FCF_SPMA : LPFC_FCF_FPMA; 1554 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1555 /* 1555 /*
1556 * If the user specified a prefered address mode, use the 1556 * If the user specified a preferred address mode, use the
1557 * addr mode only if FCF support the addr_mode. 1557 * addr mode only if FCF support the addr_mode.
1558 */ 1558 */
1559 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1559 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
@@ -3117,7 +3117,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3117 * back at reg login state so this 3117 * back at reg login state so this
3118 * mbox needs to be ignored becase 3118 * mbox needs to be ignored becase
3119 * there is another reg login in 3119 * there is another reg login in
3120 * proccess. 3120 * process.
3121 */ 3121 */
3122 spin_lock_irq(shost->host_lock); 3122 spin_lock_irq(shost->host_lock);
3123 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 3123 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
@@ -4477,7 +4477,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
4477 if ((vport->fc_flag & FC_RSCN_MODE) && 4477 if ((vport->fc_flag & FC_RSCN_MODE) &&
4478 !(vport->fc_flag & FC_NDISC_ACTIVE)) { 4478 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
4479 if (lpfc_rscn_payload_check(vport, did)) { 4479 if (lpfc_rscn_payload_check(vport, did)) {
4480 /* If we've already recieved a PLOGI from this NPort 4480 /* If we've already received a PLOGI from this NPort
4481 * we don't need to try to discover it again. 4481 * we don't need to try to discover it again.
4482 */ 4482 */
4483 if (ndlp->nlp_flag & NLP_RCV_PLOGI) 4483 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
@@ -4493,7 +4493,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
4493 } else 4493 } else
4494 ndlp = NULL; 4494 ndlp = NULL;
4495 } else { 4495 } else {
4496 /* If we've already recieved a PLOGI from this NPort, 4496 /* If we've already received a PLOGI from this NPort,
4497 * or we are already in the process of discovery on it, 4497 * or we are already in the process of discovery on it,
4498 * we don't need to try to discover it again. 4498 * we don't need to try to discover it again.
4499 */ 4499 */
@@ -5756,7 +5756,7 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
5756 * @size: Size of the data buffer. 5756 * @size: Size of the data buffer.
5757 * @rec_type: Record type to be searched. 5757 * @rec_type: Record type to be searched.
5758 * 5758 *
5759 * This function searches config region data to find the begining 5759 * This function searches config region data to find the beginning
5760 * of the record specified by record_type. If record found, this 5760 * of the record specified by record_type. If record found, this
5761 * function return pointer to the record else return NULL. 5761 * function return pointer to the record else return NULL.
5762 */ 5762 */
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 94ae37c5111a..95f11ed79463 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1344,7 +1344,7 @@ typedef struct { /* FireFly BIU registers */
1344#define HS_FFER1 0x80000000 /* Bit 31 */ 1344#define HS_FFER1 0x80000000 /* Bit 31 */
1345#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ 1345#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */
1346#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ 1346#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */
1347 1347#define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */
1348/* Host Control Register */ 1348/* Host Control Register */
1349 1349
1350#define HC_REG_OFFSET 12 /* Byte offset from register base address */ 1350#define HC_REG_OFFSET 12 /* Byte offset from register base address */
@@ -1713,6 +1713,17 @@ struct lpfc_pde6 {
1713#define pde6_apptagval_WORD word2 1713#define pde6_apptagval_WORD word2
1714}; 1714};
1715 1715
1716struct lpfc_pde7 {
1717 uint32_t word0;
1718#define pde7_type_SHIFT 24
1719#define pde7_type_MASK 0x000000ff
1720#define pde7_type_WORD word0
1721#define pde7_rsvd0_SHIFT 0
1722#define pde7_rsvd0_MASK 0x00ffffff
1723#define pde7_rsvd0_WORD word0
1724 uint32_t addrHigh;
1725 uint32_t addrLow;
1726};
1716 1727
1717/* Structure for MB Command LOAD_SM and DOWN_LOAD */ 1728/* Structure for MB Command LOAD_SM and DOWN_LOAD */
1718 1729
@@ -3621,7 +3632,7 @@ typedef struct _IOCB { /* IOCB structure */
3621 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ 3632 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
3622 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ 3633 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
3623 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ 3634 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
3624 struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ 3635 struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */
3625 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ 3636 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
3626 } un; 3637 } un;
3627 union { 3638 union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index c7178d60c7bf..8433ac0d9fb4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -215,7 +215,7 @@ struct lpfc_sli4_flags {
215#define lpfc_fip_flag_WORD word0 215#define lpfc_fip_flag_WORD word0
216}; 216};
217 217
218struct sli4_bls_acc { 218struct sli4_bls_rsp {
219 uint32_t word0_rsvd; /* Word0 must be reserved */ 219 uint32_t word0_rsvd; /* Word0 must be reserved */
220 uint32_t word1; 220 uint32_t word1;
221#define lpfc_abts_orig_SHIFT 0 221#define lpfc_abts_orig_SHIFT 0
@@ -231,6 +231,16 @@ struct sli4_bls_acc {
231#define lpfc_abts_oxid_MASK 0x0000FFFF 231#define lpfc_abts_oxid_MASK 0x0000FFFF
232#define lpfc_abts_oxid_WORD word2 232#define lpfc_abts_oxid_WORD word2
233 uint32_t word3; 233 uint32_t word3;
234#define lpfc_vndr_code_SHIFT 0
235#define lpfc_vndr_code_MASK 0x000000FF
236#define lpfc_vndr_code_WORD word3
237#define lpfc_rsn_expln_SHIFT 8
238#define lpfc_rsn_expln_MASK 0x000000FF
239#define lpfc_rsn_expln_WORD word3
240#define lpfc_rsn_code_SHIFT 16
241#define lpfc_rsn_code_MASK 0x000000FF
242#define lpfc_rsn_code_WORD word3
243
234 uint32_t word4; 244 uint32_t word4;
235 uint32_t word5_rsvd; /* Word5 must be reserved */ 245 uint32_t word5_rsvd; /* Word5 must be reserved */
236}; 246};
@@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr {
711union lpfc_sli4_cfg_shdr { 721union lpfc_sli4_cfg_shdr {
712 struct { 722 struct {
713 uint32_t word6; 723 uint32_t word6;
714#define lpfc_mbox_hdr_opcode_SHIFT 0 724#define lpfc_mbox_hdr_opcode_SHIFT 0
715#define lpfc_mbox_hdr_opcode_MASK 0x000000FF 725#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
716#define lpfc_mbox_hdr_opcode_WORD word6 726#define lpfc_mbox_hdr_opcode_WORD word6
717#define lpfc_mbox_hdr_subsystem_SHIFT 8 727#define lpfc_mbox_hdr_subsystem_SHIFT 8
718#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF 728#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
719#define lpfc_mbox_hdr_subsystem_WORD word6 729#define lpfc_mbox_hdr_subsystem_WORD word6
720#define lpfc_mbox_hdr_port_number_SHIFT 16 730#define lpfc_mbox_hdr_port_number_SHIFT 16
721#define lpfc_mbox_hdr_port_number_MASK 0x000000FF 731#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
722#define lpfc_mbox_hdr_port_number_WORD word6 732#define lpfc_mbox_hdr_port_number_WORD word6
723#define lpfc_mbox_hdr_domain_SHIFT 24 733#define lpfc_mbox_hdr_domain_SHIFT 24
724#define lpfc_mbox_hdr_domain_MASK 0x000000FF 734#define lpfc_mbox_hdr_domain_MASK 0x000000FF
725#define lpfc_mbox_hdr_domain_WORD word6 735#define lpfc_mbox_hdr_domain_WORD word6
726 uint32_t timeout; 736 uint32_t timeout;
727 uint32_t request_length; 737 uint32_t request_length;
728 uint32_t reserved9; 738 uint32_t word9;
739#define lpfc_mbox_hdr_version_SHIFT 0
740#define lpfc_mbox_hdr_version_MASK 0x000000FF
741#define lpfc_mbox_hdr_version_WORD word9
742#define LPFC_Q_CREATE_VERSION_2 2
743#define LPFC_Q_CREATE_VERSION_1 1
744#define LPFC_Q_CREATE_VERSION_0 0
729 } request; 745 } request;
730 struct { 746 struct {
731 uint32_t word6; 747 uint32_t word6;
@@ -917,9 +933,12 @@ struct cq_context {
917#define LPFC_CQ_CNT_512 0x1 933#define LPFC_CQ_CNT_512 0x1
918#define LPFC_CQ_CNT_1024 0x2 934#define LPFC_CQ_CNT_1024 0x2
919 uint32_t word1; 935 uint32_t word1;
920#define lpfc_cq_eq_id_SHIFT 22 936#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
921#define lpfc_cq_eq_id_MASK 0x000000FF 937#define lpfc_cq_eq_id_MASK 0x000000FF
922#define lpfc_cq_eq_id_WORD word1 938#define lpfc_cq_eq_id_WORD word1
939#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
940#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
941#define lpfc_cq_eq_id_2_WORD word1
923 uint32_t reserved0; 942 uint32_t reserved0;
924 uint32_t reserved1; 943 uint32_t reserved1;
925}; 944};
@@ -929,6 +948,9 @@ struct lpfc_mbx_cq_create {
929 union { 948 union {
930 struct { 949 struct {
931 uint32_t word0; 950 uint32_t word0;
951#define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */
952#define lpfc_mbx_cq_create_page_size_MASK 0x000000FF
953#define lpfc_mbx_cq_create_page_size_WORD word0
932#define lpfc_mbx_cq_create_num_pages_SHIFT 0 954#define lpfc_mbx_cq_create_num_pages_SHIFT 0
933#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF 955#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
934#define lpfc_mbx_cq_create_num_pages_WORD word0 956#define lpfc_mbx_cq_create_num_pages_WORD word0
@@ -969,7 +991,7 @@ struct wq_context {
969struct lpfc_mbx_wq_create { 991struct lpfc_mbx_wq_create {
970 struct mbox_header header; 992 struct mbox_header header;
971 union { 993 union {
972 struct { 994 struct { /* Version 0 Request */
973 uint32_t word0; 995 uint32_t word0;
974#define lpfc_mbx_wq_create_num_pages_SHIFT 0 996#define lpfc_mbx_wq_create_num_pages_SHIFT 0
975#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF 997#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
@@ -979,6 +1001,23 @@ struct lpfc_mbx_wq_create {
979#define lpfc_mbx_wq_create_cq_id_WORD word0 1001#define lpfc_mbx_wq_create_cq_id_WORD word0
980 struct dma_address page[LPFC_MAX_WQ_PAGE]; 1002 struct dma_address page[LPFC_MAX_WQ_PAGE];
981 } request; 1003 } request;
1004 struct { /* Version 1 Request */
1005 uint32_t word0; /* Word 0 is the same as in v0 */
1006 uint32_t word1;
1007#define lpfc_mbx_wq_create_page_size_SHIFT 0
1008#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF
1009#define lpfc_mbx_wq_create_page_size_WORD word1
1010#define lpfc_mbx_wq_create_wqe_size_SHIFT 8
1011#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F
1012#define lpfc_mbx_wq_create_wqe_size_WORD word1
1013#define LPFC_WQ_WQE_SIZE_64 0x5
1014#define LPFC_WQ_WQE_SIZE_128 0x6
1015#define lpfc_mbx_wq_create_wqe_count_SHIFT 16
1016#define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF
1017#define lpfc_mbx_wq_create_wqe_count_WORD word1
1018 uint32_t word2;
1019 struct dma_address page[LPFC_MAX_WQ_PAGE-1];
1020 } request_1;
982 struct { 1021 struct {
983 uint32_t word0; 1022 uint32_t word0;
984#define lpfc_mbx_wq_create_q_id_SHIFT 0 1023#define lpfc_mbx_wq_create_q_id_SHIFT 0
@@ -1007,13 +1046,22 @@ struct lpfc_mbx_wq_destroy {
1007#define LPFC_DATA_BUF_SIZE 2048 1046#define LPFC_DATA_BUF_SIZE 2048
1008struct rq_context { 1047struct rq_context {
1009 uint32_t word0; 1048 uint32_t word0;
1010#define lpfc_rq_context_rq_size_SHIFT 16 1049#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */
1011#define lpfc_rq_context_rq_size_MASK 0x0000000F 1050#define lpfc_rq_context_rqe_count_MASK 0x0000000F
1012#define lpfc_rq_context_rq_size_WORD word0 1051#define lpfc_rq_context_rqe_count_WORD word0
1013#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ 1052#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
1014#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ 1053#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
1015#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ 1054#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
1016#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ 1055#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
1056#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */
1057#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF
1058#define lpfc_rq_context_rqe_count_1_WORD word0
1059#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */
1060#define lpfc_rq_context_rqe_size_MASK 0x0000000F
1061#define lpfc_rq_context_rqe_size_WORD word0
1062#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
1063#define lpfc_rq_context_page_size_MASK 0x000000FF
1064#define lpfc_rq_context_page_size_WORD word0
1017 uint32_t reserved1; 1065 uint32_t reserved1;
1018 uint32_t word2; 1066 uint32_t word2;
1019#define lpfc_rq_context_cq_id_SHIFT 16 1067#define lpfc_rq_context_cq_id_SHIFT 16
@@ -1022,7 +1070,7 @@ struct rq_context {
1022#define lpfc_rq_context_buf_size_SHIFT 0 1070#define lpfc_rq_context_buf_size_SHIFT 0
1023#define lpfc_rq_context_buf_size_MASK 0x0000FFFF 1071#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
1024#define lpfc_rq_context_buf_size_WORD word2 1072#define lpfc_rq_context_buf_size_WORD word2
1025 uint32_t reserved3; 1073 uint32_t buffer_size; /* Version 1 Only */
1026}; 1074};
1027 1075
1028struct lpfc_mbx_rq_create { 1076struct lpfc_mbx_rq_create {
@@ -1062,16 +1110,16 @@ struct lpfc_mbx_rq_destroy {
1062 1110
1063struct mq_context { 1111struct mq_context {
1064 uint32_t word0; 1112 uint32_t word0;
1065#define lpfc_mq_context_cq_id_SHIFT 22 1113#define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */
1066#define lpfc_mq_context_cq_id_MASK 0x000003FF 1114#define lpfc_mq_context_cq_id_MASK 0x000003FF
1067#define lpfc_mq_context_cq_id_WORD word0 1115#define lpfc_mq_context_cq_id_WORD word0
1068#define lpfc_mq_context_count_SHIFT 16 1116#define lpfc_mq_context_ring_size_SHIFT 16
1069#define lpfc_mq_context_count_MASK 0x0000000F 1117#define lpfc_mq_context_ring_size_MASK 0x0000000F
1070#define lpfc_mq_context_count_WORD word0 1118#define lpfc_mq_context_ring_size_WORD word0
1071#define LPFC_MQ_CNT_16 0x5 1119#define LPFC_MQ_RING_SIZE_16 0x5
1072#define LPFC_MQ_CNT_32 0x6 1120#define LPFC_MQ_RING_SIZE_32 0x6
1073#define LPFC_MQ_CNT_64 0x7 1121#define LPFC_MQ_RING_SIZE_64 0x7
1074#define LPFC_MQ_CNT_128 0x8 1122#define LPFC_MQ_RING_SIZE_128 0x8
1075 uint32_t word1; 1123 uint32_t word1;
1076#define lpfc_mq_context_valid_SHIFT 31 1124#define lpfc_mq_context_valid_SHIFT 31
1077#define lpfc_mq_context_valid_MASK 0x00000001 1125#define lpfc_mq_context_valid_MASK 0x00000001
@@ -1105,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext {
1105 union { 1153 union {
1106 struct { 1154 struct {
1107 uint32_t word0; 1155 uint32_t word0;
1108#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 1156#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
1109#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF 1157#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
1110#define lpfc_mbx_mq_create_ext_num_pages_WORD word0 1158#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
1159#define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */
1160#define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF
1161#define lpfc_mbx_mq_create_ext_cq_id_WORD word0
1111 uint32_t async_evt_bmap; 1162 uint32_t async_evt_bmap;
1112#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK 1163#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
1113#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 1164#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 35665cfb5689..505f88443b5c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
507 phba->hba_flag &= ~HBA_ERATT_HANDLED; 507 phba->hba_flag &= ~HBA_ERATT_HANDLED;
508 508
509 /* Enable appropriate host interrupts */ 509 /* Enable appropriate host interrupts */
510 status = readl(phba->HCregaddr); 510 if (lpfc_readl(phba->HCregaddr, &status)) {
511 spin_unlock_irq(&phba->hbalock);
512 return -EIO;
513 }
511 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 514 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
512 if (psli->num_rings > 0) 515 if (psli->num_rings > 0)
513 status |= HC_R0INT_ENA; 516 status |= HC_R0INT_ENA;
@@ -1222,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1222 /* Wait for the ER1 bit to clear.*/ 1225 /* Wait for the ER1 bit to clear.*/
1223 while (phba->work_hs & HS_FFER1) { 1226 while (phba->work_hs & HS_FFER1) {
1224 msleep(100); 1227 msleep(100);
1225 phba->work_hs = readl(phba->HSregaddr); 1228 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1229 phba->work_hs = UNPLUG_ERR ;
1230 break;
1231 }
1226 /* If driver is unloading let the worker thread continue */ 1232 /* If driver is unloading let the worker thread continue */
1227 if (phba->pport->load_flag & FC_UNLOADING) { 1233 if (phba->pport->load_flag & FC_UNLOADING) {
1228 phba->work_hs = 0; 1234 phba->work_hs = 0;
@@ -4460,7 +4466,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4460} 4466}
4461 4467
4462/** 4468/**
4463 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4469 * lpfc_init_api_table_setup - Set up init api function jump table
4464 * @phba: The hba struct for which this call is being executed. 4470 * @phba: The hba struct for which this call is being executed.
4465 * @dev_grp: The HBA PCI-Device group number. 4471 * @dev_grp: The HBA PCI-Device group number.
4466 * 4472 *
@@ -4474,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4474{ 4480{
4475 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4481 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4476 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4482 phba->lpfc_hba_down_link = lpfc_hba_down_link;
4483 phba->lpfc_selective_reset = lpfc_selective_reset;
4477 switch (dev_grp) { 4484 switch (dev_grp) {
4478 case LPFC_PCI_DEV_LP: 4485 case LPFC_PCI_DEV_LP:
4479 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4486 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -4843,7 +4850,7 @@ out_free_mem:
4843 * 4850 *
4844 * Return codes 4851 * Return codes
4845 * 0 - successful 4852 * 0 - successful
4846 * -ENOMEM - No availble memory 4853 * -ENOMEM - No available memory
4847 * -EIO - The mailbox failed to complete successfully. 4854 * -EIO - The mailbox failed to complete successfully.
4848 **/ 4855 **/
4849int 4856int
@@ -5385,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5385 int i, port_error = 0; 5392 int i, port_error = 0;
5386 uint32_t if_type; 5393 uint32_t if_type;
5387 5394
5395 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5396 memset(&reg_data, 0, sizeof(reg_data));
5388 if (!phba->sli4_hba.PSMPHRregaddr) 5397 if (!phba->sli4_hba.PSMPHRregaddr)
5389 return -ENODEV; 5398 return -ENODEV;
5390 5399
5391 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5400 /* Wait up to 30 seconds for the SLI Port POST done and ready */
5392 for (i = 0; i < 3000; i++) { 5401 for (i = 0; i < 3000; i++) {
5393 portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); 5402 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5394 if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { 5403 &portsmphr_reg.word0) ||
5404 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5395 /* Port has a fatal POST error, break out */ 5405 /* Port has a fatal POST error, break out */
5396 port_error = -ENODEV; 5406 port_error = -ENODEV;
5397 break; 5407 break;
@@ -5472,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5472 break; 5482 break;
5473 case LPFC_SLI_INTF_IF_TYPE_2: 5483 case LPFC_SLI_INTF_IF_TYPE_2:
5474 /* Final checks. The port status should be clean. */ 5484 /* Final checks. The port status should be clean. */
5475 reg_data.word0 = 5485 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5476 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 5486 &reg_data.word0) ||
5477 if (bf_get(lpfc_sliport_status_err, &reg_data)) { 5487 bf_get(lpfc_sliport_status_err, &reg_data)) {
5478 phba->work_status[0] = 5488 phba->work_status[0] =
5479 readl(phba->sli4_hba.u.if_type2. 5489 readl(phba->sli4_hba.u.if_type2.
5480 ERR1regaddr); 5490 ERR1regaddr);
@@ -5720,7 +5730,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5720 * 5730 *
5721 * Return codes 5731 * Return codes
5722 * 0 - successful 5732 * 0 - successful
5723 * -ENOMEM - No availble memory 5733 * -ENOMEM - No available memory
5724 * -EIO - The mailbox failed to complete successfully. 5734 * -EIO - The mailbox failed to complete successfully.
5725 **/ 5735 **/
5726static int 5736static int
@@ -5825,7 +5835,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5825 * 5835 *
5826 * Return codes 5836 * Return codes
5827 * 0 - successful 5837 * 0 - successful
5828 * -ENOMEM - No availble memory 5838 * -ENOMEM - No available memory
5829 * -EIO - The mailbox failed to complete successfully. 5839 * -EIO - The mailbox failed to complete successfully.
5830 **/ 5840 **/
5831static int 5841static int
@@ -5884,7 +5894,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
5884 * 5894 *
5885 * Return codes 5895 * Return codes
5886 * 0 - successful 5896 * 0 - successful
5887 * -ENOMEM - No availble memory 5897 * -ENOMEM - No available memory
5888 * -EIO - The mailbox failed to complete successfully. 5898 * -EIO - The mailbox failed to complete successfully.
5889 **/ 5899 **/
5890static int 5900static int
@@ -6179,7 +6189,7 @@ out_error:
6179 * 6189 *
6180 * Return codes 6190 * Return codes
6181 * 0 - successful 6191 * 0 - successful
6182 * -ENOMEM - No availble memory 6192 * -ENOMEM - No available memory
6183 * -EIO - The mailbox failed to complete successfully. 6193 * -EIO - The mailbox failed to complete successfully.
6184 **/ 6194 **/
6185static void 6195static void
@@ -6243,7 +6253,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6243 * 6253 *
6244 * Return codes 6254 * Return codes
6245 * 0 - successful 6255 * 0 - successful
6246 * -ENOMEM - No availble memory 6256 * -ENOMEM - No available memory
6247 * -EIO - The mailbox failed to complete successfully. 6257 * -EIO - The mailbox failed to complete successfully.
6248 **/ 6258 **/
6249int 6259int
@@ -6488,7 +6498,7 @@ out_error:
6488 * 6498 *
6489 * Return codes 6499 * Return codes
6490 * 0 - successful 6500 * 0 - successful
6491 * -ENOMEM - No availble memory 6501 * -ENOMEM - No available memory
6492 * -EIO - The mailbox failed to complete successfully. 6502 * -EIO - The mailbox failed to complete successfully.
6493 **/ 6503 **/
6494void 6504void
@@ -6533,7 +6543,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6533 * 6543 *
6534 * Return codes 6544 * Return codes
6535 * 0 - successful 6545 * 0 - successful
6536 * -ENOMEM - No availble memory 6546 * -ENOMEM - No available memory
6537 **/ 6547 **/
6538static int 6548static int
6539lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6549lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
@@ -6694,7 +6704,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6694 * 6704 *
6695 * Return codes 6705 * Return codes
6696 * 0 - successful 6706 * 0 - successful
6697 * -ENOMEM - No availble memory 6707 * -ENOMEM - No available memory
6698 * -EIO - The mailbox failed to complete successfully. 6708 * -EIO - The mailbox failed to complete successfully.
6699 **/ 6709 **/
6700int 6710int
@@ -6760,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
6760 * the loop again. 6770 * the loop again.
6761 */ 6771 */
6762 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 6772 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
6763 reg_data.word0 = 6773 if (lpfc_readl(phba->sli4_hba.u.if_type2.
6764 readl(phba->sli4_hba.u.if_type2. 6774 STATUSregaddr, &reg_data.word0)) {
6765 STATUSregaddr); 6775 rc = -ENODEV;
6776 break;
6777 }
6766 if (bf_get(lpfc_sliport_status_rdy, &reg_data)) 6778 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
6767 break; 6779 break;
6768 if (bf_get(lpfc_sliport_status_rn, &reg_data)) { 6780 if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
@@ -6783,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
6783 } 6795 }
6784 6796
6785 /* Detect any port errors. */ 6797 /* Detect any port errors. */
6786 reg_data.word0 = readl(phba->sli4_hba.u.if_type2. 6798 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6787 STATUSregaddr); 6799 &reg_data.word0)) {
6800 rc = -ENODEV;
6801 break;
6802 }
6788 if ((bf_get(lpfc_sliport_status_err, &reg_data)) || 6803 if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
6789 (rdy_chk >= 1000)) { 6804 (rdy_chk >= 1000)) {
6790 phba->work_status[0] = readl( 6805 phba->work_status[0] = readl(
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index dba32dfdb59b..fbab9734e9b4 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1834,7 +1834,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1834 * @fcf_index: index to fcf table. 1834 * @fcf_index: index to fcf table.
1835 * 1835 *
1836 * This routine routine allocates and constructs non-embedded mailbox command 1836 * This routine routine allocates and constructs non-embedded mailbox command
1837 * for reading a FCF table entry refered by @fcf_index. 1837 * for reading a FCF table entry referred by @fcf_index.
1838 * 1838 *
1839 * Return: pointer to the mailbox command constructed if successful, otherwise 1839 * Return: pointer to the mailbox command constructed if successful, otherwise
1840 * NULL. 1840 * NULL.
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index f3cfbe2ce986..f2b1bbcb196f 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -50,7 +50,7 @@
50 * and subcategory. The event type must come first. 50 * and subcategory. The event type must come first.
51 * The subcategory further defines the data that follows in the rest 51 * The subcategory further defines the data that follows in the rest
52 * of the payload. Each category will have its own unique header plus 52 * of the payload. Each category will have its own unique header plus
53 * any addtional data unique to the subcategory. 53 * any additional data unique to the subcategory.
54 * The payload sent via the fc transport is one-way driver->application. 54 * The payload sent via the fc transport is one-way driver->application.
55 */ 55 */
56 56
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 52b35159fc35..0d92d4205ea6 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -658,7 +658,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
658 return 0; 658 return 0;
659} 659}
660/** 660/**
661 * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd. 661 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
662 * @phba : Pointer to lpfc_hba structure. 662 * @phba : Pointer to lpfc_hba structure.
663 * @vport: Pointer to lpfc_vport structure. 663 * @vport: Pointer to lpfc_vport structure.
664 * @rpi : rpi to be release. 664 * @rpi : rpi to be release.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bf34178b80bf..fe7cc84e773b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -577,7 +577,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
577 iocb->un.fcpi64.bdl.addrHigh = 0; 577 iocb->un.fcpi64.bdl.addrHigh = 0;
578 iocb->ulpBdeCount = 0; 578 iocb->ulpBdeCount = 0;
579 iocb->ulpLe = 0; 579 iocb->ulpLe = 0;
580 /* fill in responce BDE */ 580 /* fill in response BDE */
581 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 581 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
582 BUFF_TYPE_BDE_64; 582 BUFF_TYPE_BDE_64;
583 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 583 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
@@ -1217,10 +1217,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1217 (2 * sizeof(struct ulp_bde64))); 1217 (2 * sizeof(struct ulp_bde64)));
1218 data_bde->addrHigh = putPaddrHigh(physaddr); 1218 data_bde->addrHigh = putPaddrHigh(physaddr);
1219 data_bde->addrLow = putPaddrLow(physaddr); 1219 data_bde->addrLow = putPaddrLow(physaddr);
1220 /* ebde count includes the responce bde and data bpl */ 1220 /* ebde count includes the response bde and data bpl */
1221 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 1221 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1222 } else { 1222 } else {
1223 /* ebde count includes the responce bde and data bdes */ 1223 /* ebde count includes the response bde and data bdes */
1224 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1224 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1225 } 1225 }
1226 } else { 1226 } else {
@@ -1514,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1514 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1514 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1515 struct lpfc_pde5 *pde5 = NULL; 1515 struct lpfc_pde5 *pde5 = NULL;
1516 struct lpfc_pde6 *pde6 = NULL; 1516 struct lpfc_pde6 *pde6 = NULL;
1517 struct ulp_bde64 *prot_bde = NULL; 1517 struct lpfc_pde7 *pde7 = NULL;
1518 dma_addr_t dataphysaddr, protphysaddr; 1518 dma_addr_t dataphysaddr, protphysaddr;
1519 unsigned short curr_data = 0, curr_prot = 0; 1519 unsigned short curr_data = 0, curr_prot = 0;
1520 unsigned int split_offset, protgroup_len; 1520 unsigned int split_offset;
1521 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1521 unsigned int protgrp_blks, protgrp_bytes; 1522 unsigned int protgrp_blks, protgrp_bytes;
1522 unsigned int remainder, subtotal; 1523 unsigned int remainder, subtotal;
1523 int status; 1524 int status;
@@ -1585,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1585 bpl++; 1586 bpl++;
1586 1587
1587 /* setup the first BDE that points to protection buffer */ 1588 /* setup the first BDE that points to protection buffer */
1588 prot_bde = (struct ulp_bde64 *) bpl; 1589 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1589 protphysaddr = sg_dma_address(sgpe); 1590 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1590 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
1591 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1592 protgroup_len = sg_dma_len(sgpe);
1593 1591
1594 /* must be integer multiple of the DIF block length */ 1592 /* must be integer multiple of the DIF block length */
1595 BUG_ON(protgroup_len % 8); 1593 BUG_ON(protgroup_len % 8);
1596 1594
1595 pde7 = (struct lpfc_pde7 *) bpl;
1596 memset(pde7, 0, sizeof(struct lpfc_pde7));
1597 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1598
1599 pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
1600 pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1601
1597 protgrp_blks = protgroup_len / 8; 1602 protgrp_blks = protgroup_len / 8;
1598 protgrp_bytes = protgrp_blks * blksize; 1603 protgrp_bytes = protgrp_blks * blksize;
1599 1604
1600 prot_bde->tus.f.bdeSize = protgroup_len; 1605 /* check if this pde is crossing the 4K boundary; if so split */
1601 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; 1606 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1602 prot_bde->tus.w = le32_to_cpu(bpl->tus.w); 1607 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1608 protgroup_offset += protgroup_remainder;
1609 protgrp_blks = protgroup_remainder / 8;
1610 protgrp_bytes = protgroup_remainder * blksize;
1611 } else {
1612 protgroup_offset = 0;
1613 curr_prot++;
1614 }
1603 1615
1604 curr_prot++;
1605 num_bde++; 1616 num_bde++;
1606 1617
1607 /* setup BDE's for data blocks associated with DIF data */ 1618 /* setup BDE's for data blocks associated with DIF data */
@@ -1653,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1653 1664
1654 } 1665 }
1655 1666
1667 if (protgroup_offset) {
1668 /* update the reference tag */
1669 reftag += protgrp_blks;
1670 bpl++;
1671 continue;
1672 }
1673
1656 /* are we done ? */ 1674 /* are we done ? */
1657 if (curr_prot == protcnt) { 1675 if (curr_prot == protcnt) {
1658 alldone = 1; 1676 alldone = 1;
@@ -1675,6 +1693,7 @@ out:
1675 1693
1676 return num_bde; 1694 return num_bde;
1677} 1695}
1696
1678/* 1697/*
1679 * Given a SCSI command that supports DIF, determine composition of protection 1698 * Given a SCSI command that supports DIF, determine composition of protection
1680 * groups involved in setting up buffer lists 1699 * groups involved in setting up buffer lists
@@ -2361,7 +2380,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2361 } 2380 }
2362 /* 2381 /*
2363 * The cmnd->underflow is the minimum number of bytes that must 2382 * The cmnd->underflow is the minimum number of bytes that must
2364 * be transfered for this command. Provided a sense condition 2383 * be transferred for this command. Provided a sense condition
2365 * is not present, make sure the actual amount transferred is at 2384 * is not present, make sure the actual amount transferred is at
2366 * least the underflow value or fail. 2385 * least the underflow value or fail.
2367 */ 2386 */
@@ -2854,7 +2873,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2854} 2873}
2855 2874
2856/** 2875/**
2857 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table 2876 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
2858 * @phba: The hba struct for which this call is being executed. 2877 * @phba: The hba struct for which this call is being executed.
2859 * @dev_grp: The HBA PCI-Device group number. 2878 * @dev_grp: The HBA PCI-Device group number.
2860 * 2879 *
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 5932273870a5..ce645b20a6ad 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -130,7 +130,7 @@ struct lpfc_scsi_buf {
130 dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ 130 dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
131 131
132 /* 132 /*
133 * data and dma_handle are the kernel virutal and bus address of the 133 * data and dma_handle are the kernel virtual and bus address of the
134 * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter 134 * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
135 * gather bde list that supports the sg_tablesize value. 135 * gather bde list that supports the sg_tablesize value.
136 */ 136 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2ee0374a9908..dacabbe0a586 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2817,7 +2817,7 @@ void lpfc_poll_eratt(unsigned long ptr)
2817 * This function is called from the interrupt context when there is a ring 2817 * This function is called from the interrupt context when there is a ring
2818 * event for the fcp ring. The caller does not hold any lock. 2818 * event for the fcp ring. The caller does not hold any lock.
2819 * The function processes each response iocb in the response ring until it 2819 * The function processes each response iocb in the response ring until it
2820 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with 2820 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2821 * LE bit set. The function will call the completion handler of the command iocb 2821 * LE bit set. The function will call the completion handler of the command iocb
2822 * if the response iocb indicates a completion for a command iocb or it is 2822 * if the response iocb indicates a completion for a command iocb or it is
2823 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2823 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
@@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3477 int retval = 0; 3477 int retval = 0;
3478 3478
3479 /* Read the HBA Host Status Register */ 3479 /* Read the HBA Host Status Register */
3480 status = readl(phba->HSregaddr); 3480 if (lpfc_readl(phba->HSregaddr, &status))
3481 return 1;
3481 3482
3482 /* 3483 /*
3483 * Check status register every 100ms for 5 retries, then every 3484 * Check status register every 100ms for 5 retries, then every
@@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3502 lpfc_sli_brdrestart(phba); 3503 lpfc_sli_brdrestart(phba);
3503 } 3504 }
3504 /* Read the HBA Host Status Register */ 3505 /* Read the HBA Host Status Register */
3505 status = readl(phba->HSregaddr); 3506 if (lpfc_readl(phba->HSregaddr, &status)) {
3507 retval = 1;
3508 break;
3509 }
3506 } 3510 }
3507 3511
3508 /* Check to see if any errors occurred during init */ 3512 /* Check to see if any errors occurred during init */
@@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
3584 uint32_t __iomem *resp_buf; 3588 uint32_t __iomem *resp_buf;
3585 uint32_t __iomem *mbox_buf; 3589 uint32_t __iomem *mbox_buf;
3586 volatile uint32_t mbox; 3590 volatile uint32_t mbox;
3587 uint32_t hc_copy; 3591 uint32_t hc_copy, ha_copy, resp_data;
3588 int i; 3592 int i;
3589 uint8_t hdrtype; 3593 uint8_t hdrtype;
3590 3594
@@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
3601 resp_buf = phba->MBslimaddr; 3605 resp_buf = phba->MBslimaddr;
3602 3606
3603 /* Disable the error attention */ 3607 /* Disable the error attention */
3604 hc_copy = readl(phba->HCregaddr); 3608 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3609 return;
3605 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3610 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3606 readl(phba->HCregaddr); /* flush */ 3611 readl(phba->HCregaddr); /* flush */
3607 phba->link_flag |= LS_IGNORE_ERATT; 3612 phba->link_flag |= LS_IGNORE_ERATT;
3608 3613
3609 if (readl(phba->HAregaddr) & HA_ERATT) { 3614 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3615 return;
3616 if (ha_copy & HA_ERATT) {
3610 /* Clear Chip error bit */ 3617 /* Clear Chip error bit */
3611 writel(HA_ERATT, phba->HAregaddr); 3618 writel(HA_ERATT, phba->HAregaddr);
3612 phba->pport->stopped = 1; 3619 phba->pport->stopped = 1;
@@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
3620 mbox_buf = phba->MBslimaddr; 3627 mbox_buf = phba->MBslimaddr;
3621 writel(mbox, mbox_buf); 3628 writel(mbox, mbox_buf);
3622 3629
3623 for (i = 0; 3630 for (i = 0; i < 50; i++) {
3624 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 3631 if (lpfc_readl((resp_buf + 1), &resp_data))
3625 mdelay(1); 3632 return;
3626 3633 if (resp_data != ~(BARRIER_TEST_PATTERN))
3627 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3634 mdelay(1);
3635 else
3636 break;
3637 }
3638 resp_data = 0;
3639 if (lpfc_readl((resp_buf + 1), &resp_data))
3640 return;
3641 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
3628 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3642 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3629 phba->pport->stopped) 3643 phba->pport->stopped)
3630 goto restore_hc; 3644 goto restore_hc;
@@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
3633 } 3647 }
3634 3648
3635 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3649 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3636 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 3650 resp_data = 0;
3637 mdelay(1); 3651 for (i = 0; i < 500; i++) {
3652 if (lpfc_readl(resp_buf, &resp_data))
3653 return;
3654 if (resp_data != mbox)
3655 mdelay(1);
3656 else
3657 break;
3658 }
3638 3659
3639clear_errat: 3660clear_errat:
3640 3661
3641 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 3662 while (++i < 500) {
3642 mdelay(1); 3663 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3664 return;
3665 if (!(ha_copy & HA_ERATT))
3666 mdelay(1);
3667 else
3668 break;
3669 }
3643 3670
3644 if (readl(phba->HAregaddr) & HA_ERATT) { 3671 if (readl(phba->HAregaddr) & HA_ERATT) {
3645 writel(HA_ERATT, phba->HAregaddr); 3672 writel(HA_ERATT, phba->HAregaddr);
@@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3686 3713
3687 /* Disable the error attention */ 3714 /* Disable the error attention */
3688 spin_lock_irq(&phba->hbalock); 3715 spin_lock_irq(&phba->hbalock);
3689 status = readl(phba->HCregaddr); 3716 if (lpfc_readl(phba->HCregaddr, &status)) {
3717 spin_unlock_irq(&phba->hbalock);
3718 mempool_free(pmb, phba->mbox_mem_pool);
3719 return 1;
3720 }
3690 status &= ~HC_ERINT_ENA; 3721 status &= ~HC_ERINT_ENA;
3691 writel(status, phba->HCregaddr); 3722 writel(status, phba->HCregaddr);
3692 readl(phba->HCregaddr); /* flush */ 3723 readl(phba->HCregaddr); /* flush */
@@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3720 * 3 seconds we still set HBA_ERROR state because the status of the 3751 * 3 seconds we still set HBA_ERROR state because the status of the
3721 * board is now undefined. 3752 * board is now undefined.
3722 */ 3753 */
3723 ha_copy = readl(phba->HAregaddr); 3754 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3724 3755 return 1;
3725 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3756 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3726 mdelay(100); 3757 mdelay(100);
3727 ha_copy = readl(phba->HAregaddr); 3758 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3759 return 1;
3728 } 3760 }
3729 3761
3730 del_timer_sync(&psli->mbox_tmo); 3762 del_timer_sync(&psli->mbox_tmo);
@@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
4018 uint32_t status, i = 0; 4050 uint32_t status, i = 0;
4019 4051
4020 /* Read the HBA Host Status Register */ 4052 /* Read the HBA Host Status Register */
4021 status = readl(phba->HSregaddr); 4053 if (lpfc_readl(phba->HSregaddr, &status))
4054 return -EIO;
4022 4055
4023 /* Check status register to see what current state is */ 4056 /* Check status register to see what current state is */
4024 i = 0; 4057 i = 0;
@@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
4073 lpfc_sli_brdrestart(phba); 4106 lpfc_sli_brdrestart(phba);
4074 } 4107 }
4075 /* Read the HBA Host Status Register */ 4108 /* Read the HBA Host Status Register */
4076 status = readl(phba->HSregaddr); 4109 if (lpfc_readl(phba->HSregaddr, &status))
4110 return -EIO;
4077 } 4111 }
4078 4112
4079 /* Check to see if any errors occurred during init */ 4113 /* Check to see if any errors occurred during init */
@@ -5083,7 +5117,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
5083 5117
5084 /* Setting state unknown so lpfc_sli_abort_iocb_ring 5118 /* Setting state unknown so lpfc_sli_abort_iocb_ring
5085 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 5119 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
5086 * it to fail all oustanding SCSI IO. 5120 * it to fail all outstanding SCSI IO.
5087 */ 5121 */
5088 spin_lock_irq(&phba->pport->work_port_lock); 5122 spin_lock_irq(&phba->pport->work_port_lock);
5089 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 5123 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
@@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
5136 MAILBOX_t *mb; 5170 MAILBOX_t *mb;
5137 struct lpfc_sli *psli = &phba->sli; 5171 struct lpfc_sli *psli = &phba->sli;
5138 uint32_t status, evtctr; 5172 uint32_t status, evtctr;
5139 uint32_t ha_copy; 5173 uint32_t ha_copy, hc_copy;
5140 int i; 5174 int i;
5141 unsigned long timeout; 5175 unsigned long timeout;
5142 unsigned long drvr_flag = 0; 5176 unsigned long drvr_flag = 0;
@@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
5202 goto out_not_finished; 5236 goto out_not_finished;
5203 } 5237 }
5204 5238
5205 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 5239 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
5206 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 5240 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
5207 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5241 !(hc_copy & HC_MBINT_ENA)) {
5208 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5242 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5243 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5209 "(%d):2528 Mailbox command x%x cannot " 5244 "(%d):2528 Mailbox command x%x cannot "
5210 "issue Data: x%x x%x\n", 5245 "issue Data: x%x x%x\n",
5211 pmbox->vport ? pmbox->vport->vpi : 0, 5246 pmbox->vport ? pmbox->vport->vpi : 0,
5212 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 5247 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
5213 goto out_not_finished; 5248 goto out_not_finished;
5249 }
5214 } 5250 }
5215 5251
5216 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 5252 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
5408 word0 = le32_to_cpu(word0); 5444 word0 = le32_to_cpu(word0);
5409 } else { 5445 } else {
5410 /* First read mbox status word */ 5446 /* First read mbox status word */
5411 word0 = readl(phba->MBslimaddr); 5447 if (lpfc_readl(phba->MBslimaddr, &word0)) {
5448 spin_unlock_irqrestore(&phba->hbalock,
5449 drvr_flag);
5450 goto out_not_finished;
5451 }
5412 } 5452 }
5413 5453
5414 /* Read the HBA Host Attention Register */ 5454 /* Read the HBA Host Attention Register */
5415 ha_copy = readl(phba->HAregaddr); 5455 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
5456 spin_unlock_irqrestore(&phba->hbalock,
5457 drvr_flag);
5458 goto out_not_finished;
5459 }
5416 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 5460 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
5417 mb->mbxCommand) * 5461 mb->mbxCommand) *
5418 1000) + jiffies; 5462 1000) + jiffies;
@@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
5463 word0 = readl(phba->MBslimaddr); 5507 word0 = readl(phba->MBslimaddr);
5464 } 5508 }
5465 /* Read the HBA Host Attention Register */ 5509 /* Read the HBA Host Attention Register */
5466 ha_copy = readl(phba->HAregaddr); 5510 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
5511 spin_unlock_irqrestore(&phba->hbalock,
5512 drvr_flag);
5513 goto out_not_finished;
5514 }
5467 } 5515 }
5468 5516
5469 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5517 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
@@ -5983,7 +6031,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5983} 6031}
5984 6032
5985/** 6033/**
5986 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table 6034 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
5987 * @phba: The hba struct for which this call is being executed. 6035 * @phba: The hba struct for which this call is being executed.
5988 * @dev_grp: The HBA PCI-Device group number. 6036 * @dev_grp: The HBA PCI-Device group number.
5989 * 6037 *
@@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
6263 bf_set(lpfc_sli4_sge_last, sgl, 1); 6311 bf_set(lpfc_sli4_sge_last, sgl, 1);
6264 else 6312 else
6265 bf_set(lpfc_sli4_sge_last, sgl, 0); 6313 bf_set(lpfc_sli4_sge_last, sgl, 0);
6266 sgl->word2 = cpu_to_le32(sgl->word2);
6267 /* swap the size field back to the cpu so we 6314 /* swap the size field back to the cpu so we
6268 * can assign it to the sgl. 6315 * can assign it to the sgl.
6269 */ 6316 */
@@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
6283 bf_set(lpfc_sli4_sge_offset, sgl, offset); 6330 bf_set(lpfc_sli4_sge_offset, sgl, offset);
6284 offset += bde.tus.f.bdeSize; 6331 offset += bde.tus.f.bdeSize;
6285 } 6332 }
6333 sgl->word2 = cpu_to_le32(sgl->word2);
6286 bpl++; 6334 bpl++;
6287 sgl++; 6335 sgl++;
6288 } 6336 }
@@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6528 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 6576 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
6529 sizeof(struct ulp_bde64); 6577 sizeof(struct ulp_bde64);
6530 for (i = 0; i < numBdes; i++) { 6578 for (i = 0; i < numBdes; i++) {
6531 if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
6532 break;
6533 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 6579 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
6580 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
6581 break;
6534 xmit_len += bde.tus.f.bdeSize; 6582 xmit_len += bde.tus.f.bdeSize;
6535 } 6583 }
6536 /* word3 iocb=IO_TAG wqe=request_payload_len */ 6584 /* word3 iocb=IO_TAG wqe=request_payload_len */
@@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6620 xritag = 0; 6668 xritag = 0;
6621 break; 6669 break;
6622 case CMD_XMIT_BLS_RSP64_CX: 6670 case CMD_XMIT_BLS_RSP64_CX:
6623 /* As BLS ABTS-ACC WQE is very different from other WQEs, 6671 /* As BLS ABTS RSP WQE is very different from other WQEs,
6624 * we re-construct this WQE here based on information in 6672 * we re-construct this WQE here based on information in
6625 * iocbq from scratch. 6673 * iocbq from scratch.
6626 */ 6674 */
6627 memset(wqe, 0, sizeof(union lpfc_wqe)); 6675 memset(wqe, 0, sizeof(union lpfc_wqe));
6628 /* OX_ID is invariable to who sent ABTS to CT exchange */ 6676 /* OX_ID is invariable to who sent ABTS to CT exchange */
6629 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 6677 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
6630 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); 6678 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
6631 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == 6679 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
6632 LPFC_ABTS_UNSOL_INT) { 6680 LPFC_ABTS_UNSOL_INT) {
6633 /* ABTS sent by initiator to CT exchange, the 6681 /* ABTS sent by initiator to CT exchange, the
6634 * RX_ID field will be filled with the newly 6682 * RX_ID field will be filled with the newly
@@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6642 * RX_ID from ABTS. 6690 * RX_ID from ABTS.
6643 */ 6691 */
6644 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 6692 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6645 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); 6693 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
6646 } 6694 }
6647 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 6695 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
6648 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 6696 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
@@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6653 LPFC_WQE_LENLOC_NONE); 6701 LPFC_WQE_LENLOC_NONE);
6654 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 6702 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6655 command_type = OTHER_COMMAND; 6703 command_type = OTHER_COMMAND;
6704 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
6705 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
6706 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
6707 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
6708 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
6709 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
6710 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
6711 }
6712
6656 break; 6713 break;
6657 case CMD_XRI_ABORTED_CX: 6714 case CMD_XRI_ABORTED_CX:
6658 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6715 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
@@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6701 6758
6702 if (piocb->sli4_xritag == NO_XRI) { 6759 if (piocb->sli4_xritag == NO_XRI) {
6703 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6760 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6704 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6761 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
6762 piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
6705 sglq = NULL; 6763 sglq = NULL;
6706 else { 6764 else {
6707 if (pring->txq_cnt) { 6765 if (pring->txq_cnt) {
@@ -6789,7 +6847,7 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6789} 6847}
6790 6848
6791/** 6849/**
6792 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table 6850 * lpfc_sli_api_table_setup - Set up sli api function jump table
6793 * @phba: The hba struct for which this call is being executed. 6851 * @phba: The hba struct for which this call is being executed.
6794 * @dev_grp: The HBA PCI-Device group number. 6852 * @dev_grp: The HBA PCI-Device group number.
6795 * 6853 *
@@ -7463,7 +7521,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7463 struct lpfc_dmabuf *mp, *next_mp; 7521 struct lpfc_dmabuf *mp, *next_mp;
7464 struct list_head *slp = &pring->postbufq; 7522 struct list_head *slp = &pring->postbufq;
7465 7523
7466 /* Search postbufq, from the begining, looking for a match on tag */ 7524 /* Search postbufq, from the beginning, looking for a match on tag */
7467 spin_lock_irq(&phba->hbalock); 7525 spin_lock_irq(&phba->hbalock);
7468 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 7526 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
7469 if (mp->buffer_tag == tag) { 7527 if (mp->buffer_tag == tag) {
@@ -7507,7 +7565,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7507 struct lpfc_dmabuf *mp, *next_mp; 7565 struct lpfc_dmabuf *mp, *next_mp;
7508 struct list_head *slp = &pring->postbufq; 7566 struct list_head *slp = &pring->postbufq;
7509 7567
7510 /* Search postbufq, from the begining, looking for a match on phys */ 7568 /* Search postbufq, from the beginning, looking for a match on phys */
7511 spin_lock_irq(&phba->hbalock); 7569 spin_lock_irq(&phba->hbalock);
7512 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 7570 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
7513 if (mp->phys == phys) { 7571 if (mp->phys == phys) {
@@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
8194 piocb->iocb_flag &= ~LPFC_IO_WAKE; 8252 piocb->iocb_flag &= ~LPFC_IO_WAKE;
8195 8253
8196 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 8254 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
8197 creg_val = readl(phba->HCregaddr); 8255 if (lpfc_readl(phba->HCregaddr, &creg_val))
8256 return IOCB_ERROR;
8198 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 8257 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
8199 writel(creg_val, phba->HCregaddr); 8258 writel(creg_val, phba->HCregaddr);
8200 readl(phba->HCregaddr); /* flush */ 8259 readl(phba->HCregaddr); /* flush */
@@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
8236 } 8295 }
8237 8296
8238 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 8297 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
8239 creg_val = readl(phba->HCregaddr); 8298 if (lpfc_readl(phba->HCregaddr, &creg_val))
8299 return IOCB_ERROR;
8240 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 8300 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
8241 writel(creg_val, phba->HCregaddr); 8301 writel(creg_val, phba->HCregaddr);
8242 readl(phba->HCregaddr); /* flush */ 8302 readl(phba->HCregaddr); /* flush */
@@ -8378,7 +8438,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
8378 * for possible error attention events. The caller must hold the hostlock 8438 * for possible error attention events. The caller must hold the hostlock
8379 * with spin_lock_irq(). 8439 * with spin_lock_irq().
8380 * 8440 *
8381 * This fucntion returns 1 when there is Error Attention in the Host Attention 8441 * This function returns 1 when there is Error Attention in the Host Attention
8382 * Register and returns 0 otherwise. 8442 * Register and returns 0 otherwise.
8383 **/ 8443 **/
8384static int 8444static int
@@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
8387 uint32_t ha_copy; 8447 uint32_t ha_copy;
8388 8448
8389 /* Read chip Host Attention (HA) register */ 8449 /* Read chip Host Attention (HA) register */
8390 ha_copy = readl(phba->HAregaddr); 8450 if (lpfc_readl(phba->HAregaddr, &ha_copy))
8451 goto unplug_err;
8452
8391 if (ha_copy & HA_ERATT) { 8453 if (ha_copy & HA_ERATT) {
8392 /* Read host status register to retrieve error event */ 8454 /* Read host status register to retrieve error event */
8393 lpfc_sli_read_hs(phba); 8455 if (lpfc_sli_read_hs(phba))
8456 goto unplug_err;
8394 8457
8395 /* Check if there is a deferred error condition is active */ 8458 /* Check if there is a deferred error condition is active */
8396 if ((HS_FFER1 & phba->work_hs) && 8459 if ((HS_FFER1 & phba->work_hs) &&
@@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
8409 return 1; 8472 return 1;
8410 } 8473 }
8411 return 0; 8474 return 0;
8475
8476unplug_err:
8477 /* Set the driver HS work bitmap */
8478 phba->work_hs |= UNPLUG_ERR;
8479 /* Set the driver HA work bitmap */
8480 phba->work_ha |= HA_ERATT;
8481 /* Indicate polling handles this ERATT */
8482 phba->hba_flag |= HBA_ERATT_HANDLED;
8483 return 1;
8412} 8484}
8413 8485
8414/** 8486/**
@@ -8419,7 +8491,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
8419 * for possible error attention events. The caller must hold the hostlock 8491 * for possible error attention events. The caller must hold the hostlock
8420 * with spin_lock_irq(). 8492 * with spin_lock_irq().
8421 * 8493 *
8422 * This fucntion returns 1 when there is Error Attention in the Host Attention 8494 * This function returns 1 when there is Error Attention in the Host Attention
8423 * Register and returns 0 otherwise. 8495 * Register and returns 0 otherwise.
8424 **/ 8496 **/
8425static int 8497static int
@@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
8436 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8508 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8437 switch (if_type) { 8509 switch (if_type) {
8438 case LPFC_SLI_INTF_IF_TYPE_0: 8510 case LPFC_SLI_INTF_IF_TYPE_0:
8439 uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 8511 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
8440 uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 8512 &uerr_sta_lo) ||
8513 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
8514 &uerr_sta_hi)) {
8515 phba->work_hs |= UNPLUG_ERR;
8516 phba->work_ha |= HA_ERATT;
8517 phba->hba_flag |= HBA_ERATT_HANDLED;
8518 return 1;
8519 }
8441 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 8520 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
8442 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 8521 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
8443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
8456 } 8535 }
8457 break; 8536 break;
8458 case LPFC_SLI_INTF_IF_TYPE_2: 8537 case LPFC_SLI_INTF_IF_TYPE_2:
8459 portstat_reg.word0 = 8538 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8460 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 8539 &portstat_reg.word0) ||
8461 portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); 8540 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
8541 &portsmphr)){
8542 phba->work_hs |= UNPLUG_ERR;
8543 phba->work_ha |= HA_ERATT;
8544 phba->hba_flag |= HBA_ERATT_HANDLED;
8545 return 1;
8546 }
8462 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 8547 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
8463 phba->work_status[0] = 8548 phba->work_status[0] =
8464 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 8549 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
@@ -8496,7 +8581,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
8496 * This function is called from timer soft interrupt context to check HBA's 8581 * This function is called from timer soft interrupt context to check HBA's
8497 * error attention register bit for error attention events. 8582 * error attention register bit for error attention events.
8498 * 8583 *
8499 * This fucntion returns 1 when there is Error Attention in the Host Attention 8584 * This function returns 1 when there is Error Attention in the Host Attention
8500 * Register and returns 0 otherwise. 8585 * Register and returns 0 otherwise.
8501 **/ 8586 **/
8502int 8587int
@@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8639 return IRQ_NONE; 8724 return IRQ_NONE;
8640 /* Need to read HA REG for slow-path events */ 8725 /* Need to read HA REG for slow-path events */
8641 spin_lock_irqsave(&phba->hbalock, iflag); 8726 spin_lock_irqsave(&phba->hbalock, iflag);
8642 ha_copy = readl(phba->HAregaddr); 8727 if (lpfc_readl(phba->HAregaddr, &ha_copy))
8728 goto unplug_error;
8643 /* If somebody is waiting to handle an eratt don't process it 8729 /* If somebody is waiting to handle an eratt don't process it
8644 * here. The brdkill function will do this. 8730 * here. The brdkill function will do this.
8645 */ 8731 */
@@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8665 } 8751 }
8666 8752
8667 /* Clear up only attention source related to slow-path */ 8753 /* Clear up only attention source related to slow-path */
8668 hc_copy = readl(phba->HCregaddr); 8754 if (lpfc_readl(phba->HCregaddr, &hc_copy))
8755 goto unplug_error;
8756
8669 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 8757 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
8670 HC_LAINT_ENA | HC_ERINT_ENA), 8758 HC_LAINT_ENA | HC_ERINT_ENA),
8671 phba->HCregaddr); 8759 phba->HCregaddr);
@@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8688 */ 8776 */
8689 spin_lock_irqsave(&phba->hbalock, iflag); 8777 spin_lock_irqsave(&phba->hbalock, iflag);
8690 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 8778 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
8691 control = readl(phba->HCregaddr); 8779 if (lpfc_readl(phba->HCregaddr, &control))
8780 goto unplug_error;
8692 control &= ~HC_LAINT_ENA; 8781 control &= ~HC_LAINT_ENA;
8693 writel(control, phba->HCregaddr); 8782 writel(control, phba->HCregaddr);
8694 readl(phba->HCregaddr); /* flush */ 8783 readl(phba->HCregaddr); /* flush */
@@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8708 status >>= (4*LPFC_ELS_RING); 8797 status >>= (4*LPFC_ELS_RING);
8709 if (status & HA_RXMASK) { 8798 if (status & HA_RXMASK) {
8710 spin_lock_irqsave(&phba->hbalock, iflag); 8799 spin_lock_irqsave(&phba->hbalock, iflag);
8711 control = readl(phba->HCregaddr); 8800 if (lpfc_readl(phba->HCregaddr, &control))
8801 goto unplug_error;
8712 8802
8713 lpfc_debugfs_slow_ring_trc(phba, 8803 lpfc_debugfs_slow_ring_trc(phba,
8714 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 8804 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
@@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8741 } 8831 }
8742 spin_lock_irqsave(&phba->hbalock, iflag); 8832 spin_lock_irqsave(&phba->hbalock, iflag);
8743 if (work_ha_copy & HA_ERATT) { 8833 if (work_ha_copy & HA_ERATT) {
8744 lpfc_sli_read_hs(phba); 8834 if (lpfc_sli_read_hs(phba))
8835 goto unplug_error;
8745 /* 8836 /*
8746 * Check if there is a deferred error condition 8837 * Check if there is a deferred error condition
8747 * is active 8838 * is active
@@ -8872,6 +8963,9 @@ send_current_mbox:
8872 lpfc_worker_wake_up(phba); 8963 lpfc_worker_wake_up(phba);
8873 } 8964 }
8874 return IRQ_HANDLED; 8965 return IRQ_HANDLED;
8966unplug_error:
8967 spin_unlock_irqrestore(&phba->hbalock, iflag);
8968 return IRQ_HANDLED;
8875 8969
8876} /* lpfc_sli_sp_intr_handler */ 8970} /* lpfc_sli_sp_intr_handler */
8877 8971
@@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id)
8919 if (lpfc_intr_state_check(phba)) 9013 if (lpfc_intr_state_check(phba))
8920 return IRQ_NONE; 9014 return IRQ_NONE;
8921 /* Need to read HA REG for FCP ring and other ring events */ 9015 /* Need to read HA REG for FCP ring and other ring events */
8922 ha_copy = readl(phba->HAregaddr); 9016 if (lpfc_readl(phba->HAregaddr, &ha_copy))
9017 return IRQ_HANDLED;
8923 /* Clear up only attention source related to fast-path */ 9018 /* Clear up only attention source related to fast-path */
8924 spin_lock_irqsave(&phba->hbalock, iflag); 9019 spin_lock_irqsave(&phba->hbalock, iflag);
8925 /* 9020 /*
@@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
9004 return IRQ_NONE; 9099 return IRQ_NONE;
9005 9100
9006 spin_lock(&phba->hbalock); 9101 spin_lock(&phba->hbalock);
9007 phba->ha_copy = readl(phba->HAregaddr); 9102 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
9103 spin_unlock(&phba->hbalock);
9104 return IRQ_HANDLED;
9105 }
9106
9008 if (unlikely(!phba->ha_copy)) { 9107 if (unlikely(!phba->ha_copy)) {
9009 spin_unlock(&phba->hbalock); 9108 spin_unlock(&phba->hbalock);
9010 return IRQ_NONE; 9109 return IRQ_NONE;
@@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
9026 } 9125 }
9027 9126
9028 /* Clear attention sources except link and error attentions */ 9127 /* Clear attention sources except link and error attentions */
9029 hc_copy = readl(phba->HCregaddr); 9128 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
9129 spin_unlock(&phba->hbalock);
9130 return IRQ_HANDLED;
9131 }
9030 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 9132 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
9031 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 9133 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
9032 phba->HCregaddr); 9134 phba->HCregaddr);
@@ -9582,7 +9684,7 @@ out:
9582 * @cq: Pointer to the completion queue. 9684 * @cq: Pointer to the completion queue.
9583 * @wcqe: Pointer to a completion queue entry. 9685 * @wcqe: Pointer to a completion queue entry.
9584 * 9686 *
9585 * This routine process a slow-path work-queue or recieve queue completion queue 9687 * This routine process a slow-path work-queue or receive queue completion queue
9586 * entry. 9688 * entry.
9587 * 9689 *
9588 * Return: true if work posted to worker thread, otherwise false. 9690 * Return: true if work posted to worker thread, otherwise false.
@@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
10403 if (!phba->sli4_hba.pc_sli4_params.supported) 10505 if (!phba->sli4_hba.pc_sli4_params.supported)
10404 hw_page_size = SLI4_PAGE_SIZE; 10506 hw_page_size = SLI4_PAGE_SIZE;
10405 10507
10406
10407 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10508 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10408 if (!mbox) 10509 if (!mbox)
10409 return -ENOMEM; 10510 return -ENOMEM;
@@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
10413 LPFC_MBOX_OPCODE_CQ_CREATE, 10514 LPFC_MBOX_OPCODE_CQ_CREATE,
10414 length, LPFC_SLI4_MBX_EMBED); 10515 length, LPFC_SLI4_MBX_EMBED);
10415 cq_create = &mbox->u.mqe.un.cq_create; 10516 cq_create = &mbox->u.mqe.un.cq_create;
10517 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
10416 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 10518 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
10417 cq->page_count); 10519 cq->page_count);
10418 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 10520 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
10419 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 10521 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
10420 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); 10522 bf_set(lpfc_mbox_hdr_version, &shdr->request,
10523 phba->sli4_hba.pc_sli4_params.cqv);
10524 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
10525 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
10526 (PAGE_SIZE/SLI4_PAGE_SIZE));
10527 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
10528 eq->queue_id);
10529 } else {
10530 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
10531 eq->queue_id);
10532 }
10421 switch (cq->entry_count) { 10533 switch (cq->entry_count) {
10422 default: 10534 default:
10423 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10535 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
10449 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10561 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10450 10562
10451 /* The IOCTL status is embedded in the mailbox subheader. */ 10563 /* The IOCTL status is embedded in the mailbox subheader. */
10452 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
10453 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10564 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10454 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10565 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10455 if (shdr_status || shdr_add_status || rc) { 10566 if (shdr_status || shdr_add_status || rc) {
@@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
10515 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 10626 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
10516 switch (mq->entry_count) { 10627 switch (mq->entry_count) {
10517 case 16: 10628 case 16:
10518 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10629 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
10519 LPFC_MQ_CNT_16); 10630 LPFC_MQ_RING_SIZE_16);
10520 break; 10631 break;
10521 case 32: 10632 case 32:
10522 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10633 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
10523 LPFC_MQ_CNT_32); 10634 LPFC_MQ_RING_SIZE_32);
10524 break; 10635 break;
10525 case 64: 10636 case 64:
10526 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10637 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
10527 LPFC_MQ_CNT_64); 10638 LPFC_MQ_RING_SIZE_64);
10528 break; 10639 break;
10529 case 128: 10640 case 128:
10530 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10641 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
10531 LPFC_MQ_CNT_128); 10642 LPFC_MQ_RING_SIZE_128);
10532 break; 10643 break;
10533 } 10644 }
10534 list_for_each_entry(dmabuf, &mq->page_list, list) { 10645 list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
10586 length, LPFC_SLI4_MBX_EMBED); 10697 length, LPFC_SLI4_MBX_EMBED);
10587 10698
10588 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 10699 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
10700 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
10589 bf_set(lpfc_mbx_mq_create_ext_num_pages, 10701 bf_set(lpfc_mbx_mq_create_ext_num_pages,
10590 &mq_create_ext->u.request, mq->page_count); 10702 &mq_create_ext->u.request, mq->page_count);
10591 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 10703 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
@@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
10598 &mq_create_ext->u.request, 1); 10710 &mq_create_ext->u.request, 1);
10599 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 10711 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
10600 &mq_create_ext->u.request, 1); 10712 &mq_create_ext->u.request, 1);
10601 bf_set(lpfc_mq_context_cq_id,
10602 &mq_create_ext->u.request.context, cq->queue_id);
10603 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 10713 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
10714 bf_set(lpfc_mbox_hdr_version, &shdr->request,
10715 phba->sli4_hba.pc_sli4_params.mqv);
10716 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
10717 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
10718 cq->queue_id);
10719 else
10720 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
10721 cq->queue_id);
10604 switch (mq->entry_count) { 10722 switch (mq->entry_count) {
10605 default: 10723 default:
10606 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10724 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
10610 return -EINVAL; 10728 return -EINVAL;
10611 /* otherwise default to smallest count (drop through) */ 10729 /* otherwise default to smallest count (drop through) */
10612 case 16: 10730 case 16:
10613 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10731 bf_set(lpfc_mq_context_ring_size,
10614 LPFC_MQ_CNT_16); 10732 &mq_create_ext->u.request.context,
10733 LPFC_MQ_RING_SIZE_16);
10615 break; 10734 break;
10616 case 32: 10735 case 32:
10617 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10736 bf_set(lpfc_mq_context_ring_size,
10618 LPFC_MQ_CNT_32); 10737 &mq_create_ext->u.request.context,
10738 LPFC_MQ_RING_SIZE_32);
10619 break; 10739 break;
10620 case 64: 10740 case 64:
10621 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10741 bf_set(lpfc_mq_context_ring_size,
10622 LPFC_MQ_CNT_64); 10742 &mq_create_ext->u.request.context,
10743 LPFC_MQ_RING_SIZE_64);
10623 break; 10744 break;
10624 case 128: 10745 case 128:
10625 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10746 bf_set(lpfc_mq_context_ring_size,
10626 LPFC_MQ_CNT_128); 10747 &mq_create_ext->u.request.context,
10748 LPFC_MQ_RING_SIZE_128);
10627 break; 10749 break;
10628 } 10750 }
10629 list_for_each_entry(dmabuf, &mq->page_list, list) { 10751 list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
10634 putPaddrHigh(dmabuf->phys); 10756 putPaddrHigh(dmabuf->phys);
10635 } 10757 }
10636 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10758 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10637 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
10638 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 10759 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
10639 &mq_create_ext->u.response); 10760 &mq_create_ext->u.response);
10640 if (rc != MBX_SUCCESS) { 10761 if (rc != MBX_SUCCESS) {
@@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
10711 uint32_t shdr_status, shdr_add_status; 10832 uint32_t shdr_status, shdr_add_status;
10712 union lpfc_sli4_cfg_shdr *shdr; 10833 union lpfc_sli4_cfg_shdr *shdr;
10713 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10834 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
10835 struct dma_address *page;
10714 10836
10715 if (!phba->sli4_hba.pc_sli4_params.supported) 10837 if (!phba->sli4_hba.pc_sli4_params.supported)
10716 hw_page_size = SLI4_PAGE_SIZE; 10838 hw_page_size = SLI4_PAGE_SIZE;
@@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
10724 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 10846 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
10725 length, LPFC_SLI4_MBX_EMBED); 10847 length, LPFC_SLI4_MBX_EMBED);
10726 wq_create = &mbox->u.mqe.un.wq_create; 10848 wq_create = &mbox->u.mqe.un.wq_create;
10849 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
10727 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 10850 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
10728 wq->page_count); 10851 wq->page_count);
10729 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 10852 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
10730 cq->queue_id); 10853 cq->queue_id);
10854 bf_set(lpfc_mbox_hdr_version, &shdr->request,
10855 phba->sli4_hba.pc_sli4_params.wqv);
10856 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
10857 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
10858 wq->entry_count);
10859 switch (wq->entry_size) {
10860 default:
10861 case 64:
10862 bf_set(lpfc_mbx_wq_create_wqe_size,
10863 &wq_create->u.request_1,
10864 LPFC_WQ_WQE_SIZE_64);
10865 break;
10866 case 128:
10867 bf_set(lpfc_mbx_wq_create_wqe_size,
10868 &wq_create->u.request_1,
10869 LPFC_WQ_WQE_SIZE_128);
10870 break;
10871 }
10872 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
10873 (PAGE_SIZE/SLI4_PAGE_SIZE));
10874 page = wq_create->u.request_1.page;
10875 } else {
10876 page = wq_create->u.request.page;
10877 }
10731 list_for_each_entry(dmabuf, &wq->page_list, list) { 10878 list_for_each_entry(dmabuf, &wq->page_list, list) {
10732 memset(dmabuf->virt, 0, hw_page_size); 10879 memset(dmabuf->virt, 0, hw_page_size);
10733 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10880 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
10734 putPaddrLow(dmabuf->phys); 10881 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
10735 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
10736 putPaddrHigh(dmabuf->phys);
10737 } 10882 }
10738 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10883 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10739 /* The IOCTL status is embedded in the mailbox subheader. */ 10884 /* The IOCTL status is embedded in the mailbox subheader. */
10740 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
10741 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10885 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10742 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10886 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10743 if (shdr_status || shdr_add_status || rc) { 10887 if (shdr_status || shdr_add_status || rc) {
@@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10815 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 10959 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
10816 length, LPFC_SLI4_MBX_EMBED); 10960 length, LPFC_SLI4_MBX_EMBED);
10817 rq_create = &mbox->u.mqe.un.rq_create; 10961 rq_create = &mbox->u.mqe.un.rq_create;
10818 switch (hrq->entry_count) { 10962 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
10819 default: 10963 bf_set(lpfc_mbox_hdr_version, &shdr->request,
10820 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10964 phba->sli4_hba.pc_sli4_params.rqv);
10821 "2535 Unsupported RQ count. (%d)\n", 10965 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
10822 hrq->entry_count); 10966 bf_set(lpfc_rq_context_rqe_count_1,
10823 if (hrq->entry_count < 512) 10967 &rq_create->u.request.context,
10824 return -EINVAL; 10968 hrq->entry_count);
10825 /* otherwise default to smallest count (drop through) */ 10969 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
10826 case 512: 10970 } else {
10827 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10971 switch (hrq->entry_count) {
10828 LPFC_RQ_RING_SIZE_512); 10972 default:
10829 break; 10973 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10830 case 1024: 10974 "2535 Unsupported RQ count. (%d)\n",
10831 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10975 hrq->entry_count);
10832 LPFC_RQ_RING_SIZE_1024); 10976 if (hrq->entry_count < 512)
10833 break; 10977 return -EINVAL;
10834 case 2048: 10978 /* otherwise default to smallest count (drop through) */
10835 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10979 case 512:
10836 LPFC_RQ_RING_SIZE_2048); 10980 bf_set(lpfc_rq_context_rqe_count,
10837 break; 10981 &rq_create->u.request.context,
10838 case 4096: 10982 LPFC_RQ_RING_SIZE_512);
10839 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10983 break;
10840 LPFC_RQ_RING_SIZE_4096); 10984 case 1024:
10841 break; 10985 bf_set(lpfc_rq_context_rqe_count,
10986 &rq_create->u.request.context,
10987 LPFC_RQ_RING_SIZE_1024);
10988 break;
10989 case 2048:
10990 bf_set(lpfc_rq_context_rqe_count,
10991 &rq_create->u.request.context,
10992 LPFC_RQ_RING_SIZE_2048);
10993 break;
10994 case 4096:
10995 bf_set(lpfc_rq_context_rqe_count,
10996 &rq_create->u.request.context,
10997 LPFC_RQ_RING_SIZE_4096);
10998 break;
10999 }
11000 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
11001 LPFC_HDR_BUF_SIZE);
10842 } 11002 }
10843 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 11003 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
10844 cq->queue_id); 11004 cq->queue_id);
10845 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 11005 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
10846 hrq->page_count); 11006 hrq->page_count);
10847 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
10848 LPFC_HDR_BUF_SIZE);
10849 list_for_each_entry(dmabuf, &hrq->page_list, list) { 11007 list_for_each_entry(dmabuf, &hrq->page_list, list) {
10850 memset(dmabuf->virt, 0, hw_page_size); 11008 memset(dmabuf->virt, 0, hw_page_size);
10851 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 11009 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
@@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10855 } 11013 }
10856 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11014 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10857 /* The IOCTL status is embedded in the mailbox subheader. */ 11015 /* The IOCTL status is embedded in the mailbox subheader. */
10858 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
10859 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11016 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10860 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11017 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10861 if (shdr_status || shdr_add_status || rc) { 11018 if (shdr_status || shdr_add_status || rc) {
@@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10881 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11038 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10882 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 11039 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
10883 length, LPFC_SLI4_MBX_EMBED); 11040 length, LPFC_SLI4_MBX_EMBED);
10884 switch (drq->entry_count) { 11041 bf_set(lpfc_mbox_hdr_version, &shdr->request,
10885 default: 11042 phba->sli4_hba.pc_sli4_params.rqv);
10886 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11043 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
10887 "2536 Unsupported RQ count. (%d)\n", 11044 bf_set(lpfc_rq_context_rqe_count_1,
10888 drq->entry_count); 11045 &rq_create->u.request.context,
10889 if (drq->entry_count < 512) 11046 hrq->entry_count);
10890 return -EINVAL; 11047 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
10891 /* otherwise default to smallest count (drop through) */ 11048 } else {
10892 case 512: 11049 switch (drq->entry_count) {
10893 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 11050 default:
10894 LPFC_RQ_RING_SIZE_512); 11051 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10895 break; 11052 "2536 Unsupported RQ count. (%d)\n",
10896 case 1024: 11053 drq->entry_count);
10897 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 11054 if (drq->entry_count < 512)
10898 LPFC_RQ_RING_SIZE_1024); 11055 return -EINVAL;
10899 break; 11056 /* otherwise default to smallest count (drop through) */
10900 case 2048: 11057 case 512:
10901 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 11058 bf_set(lpfc_rq_context_rqe_count,
10902 LPFC_RQ_RING_SIZE_2048); 11059 &rq_create->u.request.context,
10903 break; 11060 LPFC_RQ_RING_SIZE_512);
10904 case 4096: 11061 break;
10905 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 11062 case 1024:
10906 LPFC_RQ_RING_SIZE_4096); 11063 bf_set(lpfc_rq_context_rqe_count,
10907 break; 11064 &rq_create->u.request.context,
11065 LPFC_RQ_RING_SIZE_1024);
11066 break;
11067 case 2048:
11068 bf_set(lpfc_rq_context_rqe_count,
11069 &rq_create->u.request.context,
11070 LPFC_RQ_RING_SIZE_2048);
11071 break;
11072 case 4096:
11073 bf_set(lpfc_rq_context_rqe_count,
11074 &rq_create->u.request.context,
11075 LPFC_RQ_RING_SIZE_4096);
11076 break;
11077 }
11078 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
11079 LPFC_DATA_BUF_SIZE);
10908 } 11080 }
10909 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 11081 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
10910 cq->queue_id); 11082 cq->queue_id);
10911 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 11083 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
10912 drq->page_count); 11084 drq->page_count);
10913 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
10914 LPFC_DATA_BUF_SIZE);
10915 list_for_each_entry(dmabuf, &drq->page_list, list) { 11085 list_for_each_entry(dmabuf, &drq->page_list, list) {
10916 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 11086 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
10917 putPaddrLow(dmabuf->phys); 11087 putPaddrLow(dmabuf->phys);
@@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
11580 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 11750 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
11581 char *type_names[] = FC_TYPE_NAMES_INIT; 11751 char *type_names[] = FC_TYPE_NAMES_INIT;
11582 struct fc_vft_header *fc_vft_hdr; 11752 struct fc_vft_header *fc_vft_hdr;
11753 uint32_t *header = (uint32_t *) fc_hdr;
11583 11754
11584 switch (fc_hdr->fh_r_ctl) { 11755 switch (fc_hdr->fh_r_ctl) {
11585 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 11756 case FC_RCTL_DD_UNCAT: /* uncategorized information */
@@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
11628 default: 11799 default:
11629 goto drop; 11800 goto drop;
11630 } 11801 }
11802
11631 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11803 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11632 "2538 Received frame rctl:%s type:%s\n", 11804 "2538 Received frame rctl:%s type:%s "
11805 "Frame Data:%08x %08x %08x %08x %08x %08x\n",
11633 rctl_names[fc_hdr->fh_r_ctl], 11806 rctl_names[fc_hdr->fh_r_ctl],
11634 type_names[fc_hdr->fh_type]); 11807 type_names[fc_hdr->fh_type],
11808 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
11809 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
11810 be32_to_cpu(header[4]), be32_to_cpu(header[5]));
11635 return 0; 11811 return 0;
11636drop: 11812drop:
11637 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 11813 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
11928} 12104}
11929 12105
11930/** 12106/**
11931 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler 12107 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
11932 * @phba: Pointer to HBA context object. 12108 * @phba: Pointer to HBA context object.
11933 * @cmd_iocbq: pointer to the command iocbq structure. 12109 * @cmd_iocbq: pointer to the command iocbq structure.
11934 * @rsp_iocbq: pointer to the response iocbq structure. 12110 * @rsp_iocbq: pointer to the response iocbq structure.
11935 * 12111 *
11936 * This function handles the sequence abort accept iocb command complete 12112 * This function handles the sequence abort response iocb command complete
11937 * event. It properly releases the memory allocated to the sequence abort 12113 * event. It properly releases the memory allocated to the sequence abort
11938 * accept iocb. 12114 * accept iocb.
11939 **/ 12115 **/
11940static void 12116static void
11941lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, 12117lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
11942 struct lpfc_iocbq *cmd_iocbq, 12118 struct lpfc_iocbq *cmd_iocbq,
11943 struct lpfc_iocbq *rsp_iocbq) 12119 struct lpfc_iocbq *rsp_iocbq)
11944{ 12120{
@@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
11947} 12123}
11948 12124
11949/** 12125/**
11950 * lpfc_sli4_seq_abort_acc - Accept sequence abort 12126 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
11951 * @phba: Pointer to HBA context object. 12127 * @phba: Pointer to HBA context object.
11952 * @fc_hdr: pointer to a FC frame header. 12128 * @fc_hdr: pointer to a FC frame header.
11953 * 12129 *
11954 * This function sends a basic accept to a previous unsol sequence abort 12130 * This function sends a basic response to a previous unsol sequence abort
11955 * event after aborting the sequence handling. 12131 * event after aborting the sequence handling.
11956 **/ 12132 **/
11957static void 12133static void
11958lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, 12134lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
11959 struct fc_frame_header *fc_hdr) 12135 struct fc_frame_header *fc_hdr)
11960{ 12136{
11961 struct lpfc_iocbq *ctiocb = NULL; 12137 struct lpfc_iocbq *ctiocb = NULL;
@@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11963 uint16_t oxid, rxid; 12139 uint16_t oxid, rxid;
11964 uint32_t sid, fctl; 12140 uint32_t sid, fctl;
11965 IOCB_t *icmd; 12141 IOCB_t *icmd;
12142 int rc;
11966 12143
11967 if (!lpfc_is_link_up(phba)) 12144 if (!lpfc_is_link_up(phba))
11968 return; 12145 return;
@@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11983 + phba->sli4_hba.max_cfg_param.xri_base)) 12160 + phba->sli4_hba.max_cfg_param.xri_base))
11984 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 12161 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
11985 12162
11986 /* Allocate buffer for acc iocb */ 12163 /* Allocate buffer for rsp iocb */
11987 ctiocb = lpfc_sli_get_iocbq(phba); 12164 ctiocb = lpfc_sli_get_iocbq(phba);
11988 if (!ctiocb) 12165 if (!ctiocb)
11989 return; 12166 return;
@@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
12008 12185
12009 ctiocb->iocb_cmpl = NULL; 12186 ctiocb->iocb_cmpl = NULL;
12010 ctiocb->vport = phba->pport; 12187 ctiocb->vport = phba->pport;
12011 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; 12188 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
12189 ctiocb->sli4_xritag = NO_XRI;
12190
12191 /* If the oxid maps to the FCP XRI range or if it is out of range,
12192 * send a BLS_RJT. The driver no longer has that exchange.
12193 * Override the IOCB for a BA_RJT.
12194 */
12195 if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
12196 phba->sli4_hba.max_cfg_param.xri_base) ||
12197 oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
12198 phba->sli4_hba.max_cfg_param.xri_base)) {
12199 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
12200 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
12201 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
12202 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
12203 }
12012 12204
12013 if (fctl & FC_FC_EX_CTX) { 12205 if (fctl & FC_FC_EX_CTX) {
12014 /* ABTS sent by responder to CT exchange, construction 12206 /* ABTS sent by responder to CT exchange, construction
12015 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 12207 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
12016 * field and RX_ID from ABTS for RX_ID field. 12208 * field and RX_ID from ABTS for RX_ID field.
12017 */ 12209 */
12018 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); 12210 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
12019 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); 12211 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
12020 ctiocb->sli4_xritag = oxid;
12021 } else { 12212 } else {
12022 /* ABTS sent by initiator to CT exchange, construction 12213 /* ABTS sent by initiator to CT exchange, construction
12023 * of BA_ACC will need to allocate a new XRI as for the 12214 * of BA_ACC will need to allocate a new XRI as for the
12024 * XRI_TAG and RX_ID fields. 12215 * XRI_TAG and RX_ID fields.
12025 */ 12216 */
12026 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); 12217 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
12027 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); 12218 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
12028 ctiocb->sli4_xritag = NO_XRI;
12029 } 12219 }
12030 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); 12220 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
12031 12221
12032 /* Xmit CT abts accept on exchange <xid> */ 12222 /* Xmit CT abts response on exchange <xid> */
12033 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 12223 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12034 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", 12224 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
12035 CMD_XMIT_BLS_RSP64_CX, phba->link_state); 12225 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
12036 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 12226
12227 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
12228 if (rc == IOCB_ERROR) {
12229 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
12230 "2925 Failed to issue CT ABTS RSP x%x on "
12231 "xri x%x, Data x%x\n",
12232 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
12233 phba->link_state);
12234 lpfc_sli_release_iocbq(phba, ctiocb);
12235 }
12037} 12236}
12038 12237
12039/** 12238/**
@@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
12081 lpfc_in_buf_free(phba, &dmabuf->dbuf); 12280 lpfc_in_buf_free(phba, &dmabuf->dbuf);
12082 } 12281 }
12083 /* Send basic accept (BA_ACC) to the abort requester */ 12282 /* Send basic accept (BA_ACC) to the abort requester */
12084 lpfc_sli4_seq_abort_acc(phba, &fc_hdr); 12283 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
12085} 12284}
12086 12285
12087/** 12286/**
@@ -12772,7 +12971,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
12772 * record and processing it one at a time starting from the @fcf_index 12971 * record and processing it one at a time starting from the @fcf_index
12773 * for initial FCF discovery or fast FCF failover rediscovery. 12972 * for initial FCF discovery or fast FCF failover rediscovery.
12774 * 12973 *
12775 * Return 0 if the mailbox command is submitted sucessfully, none 0 12974 * Return 0 if the mailbox command is submitted successfully, none 0
12776 * otherwise. 12975 * otherwise.
12777 **/ 12976 **/
12778int 12977int
@@ -12833,7 +13032,7 @@ fail_fcf_scan:
12833 * This routine is invoked to read an FCF record indicated by @fcf_index 13032 * This routine is invoked to read an FCF record indicated by @fcf_index
12834 * and to use it for FLOGI roundrobin FCF failover. 13033 * and to use it for FLOGI roundrobin FCF failover.
12835 * 13034 *
12836 * Return 0 if the mailbox command is submitted sucessfully, none 0 13035 * Return 0 if the mailbox command is submitted successfully, none 0
12837 * otherwise. 13036 * otherwise.
12838 **/ 13037 **/
12839int 13038int
@@ -12879,7 +13078,7 @@ fail_fcf_read:
12879 * This routine is invoked to read an FCF record indicated by @fcf_index to 13078 * This routine is invoked to read an FCF record indicated by @fcf_index to
12880 * determine whether it's eligible for FLOGI roundrobin failover list. 13079 * determine whether it's eligible for FLOGI roundrobin failover list.
12881 * 13080 *
12882 * Return 0 if the mailbox command is submitted sucessfully, none 0 13081 * Return 0 if the mailbox command is submitted successfully, none 0
12883 * otherwise. 13082 * otherwise.
12884 **/ 13083 **/
12885int 13084int
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 595056b89608..1a3cbf88f2ce 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0a4d376dbca5..2404d1d65563 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.21" 21#define LPFC_DRIVER_VERSION "8.3.22"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"