aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2011-12-13 13:22:37 -0500
committerJames Bottomley <JBottomley@Parallels.com>2011-12-15 01:57:45 -0500
commit2e90f4b5a2a0ce5ab72c0c81c74269bd0a62522b (patch)
tree8e131436d13de07d6ee771243384e1d91bbb68c4 /drivers/scsi/lpfc
parentdf9e1b59f9e4671930a7762b9518461df4ea85f5 (diff)
[SCSI] lpfc 8.3.28: Critical Miscellaneous fixes
- Make lpfc_sli4_pci_mem_unset interface type aware (CR 124390) - Convert byte count to word count when calling __iowrite32_copy (CR 122550) - Checked the ERR1 and ERR2 registers for error attention due to SLI Port state affected by forced debug dump. (CR 122986, 122426, 124859) - Use the lpfc_readl routine instead of the readl for the port status register read in lpfc_handle_eratt_s4 (CR 125403) - Call lpfc_sli4_queue_destroy inside of lpfc_sli4_brdreset before doing a pci function reset (CR 125124, 125168, 125572, 125622) - Zero out the HBQ when it is allocated (CR 125663) - Alter port reset log messages to indicate error type (CR 125989) - Added proper NULL pointer checking to all the places that accessing the queue memory (CR 125832) Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c172
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c164
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c135
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h9
6 files changed, 350 insertions, 139 deletions
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 75e2e569dede..c88e556ea62e 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
82static inline void 82static inline void
83lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes) 83lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
84{ 84{
85 __iowrite32_copy(dest, src, bytes); 85 /* convert bytes in argument list to word count for copy function */
86 __iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
86} 87}
87 88
88static inline void 89static inline void
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 28382596fb9a..3587a3fe8fcb 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1997,7 +1997,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1997 /* Get slow-path event queue information */ 1997 /* Get slow-path event queue information */
1998 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1998 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1999 "Slow-path EQ information:\n"); 1999 "Slow-path EQ information:\n");
2000 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2000 if (phba->sli4_hba.sp_eq) {
2001 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2001 "\tEQID[%02d], " 2002 "\tEQID[%02d], "
2002 "QE-COUNT[%04d], QE-SIZE[%04d], " 2003 "QE-COUNT[%04d], QE-SIZE[%04d], "
2003 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2004 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2006 phba->sli4_hba.sp_eq->entry_size, 2007 phba->sli4_hba.sp_eq->entry_size,
2007 phba->sli4_hba.sp_eq->host_index, 2008 phba->sli4_hba.sp_eq->host_index,
2008 phba->sli4_hba.sp_eq->hba_index); 2009 phba->sli4_hba.sp_eq->hba_index);
2010 }
2009 2011
2010 /* Get fast-path event queue information */ 2012 /* Get fast-path event queue information */
2011 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2013 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2012 "Fast-path EQ information:\n"); 2014 "Fast-path EQ information:\n");
2013 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 2015 if (phba->sli4_hba.fp_eq) {
2014 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2016 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
2017 fcp_qidx++) {
2018 if (phba->sli4_hba.fp_eq[fcp_qidx]) {
2019 len += snprintf(pbuffer+len,
2020 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2015 "\tEQID[%02d], " 2021 "\tEQID[%02d], "
2016 "QE-COUNT[%04d], QE-SIZE[%04d], " 2022 "QE-COUNT[%04d], QE-SIZE[%04d], "
2017 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2023 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2020,16 +2026,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2020 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, 2026 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
2021 phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 2027 phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
2022 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 2028 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
2029 }
2030 }
2023 } 2031 }
2024 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2032 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2025 2033
2026 /* Get mailbox complete queue information */ 2034 /* Get mailbox complete queue information */
2027 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2035 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2028 "Slow-path MBX CQ information:\n"); 2036 "Slow-path MBX CQ information:\n");
2029 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2037 if (phba->sli4_hba.mbx_cq) {
2038 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2030 "Associated EQID[%02d]:\n", 2039 "Associated EQID[%02d]:\n",
2031 phba->sli4_hba.mbx_cq->assoc_qid); 2040 phba->sli4_hba.mbx_cq->assoc_qid);
2032 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2041 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2033 "\tCQID[%02d], " 2042 "\tCQID[%02d], "
2034 "QE-COUNT[%04d], QE-SIZE[%04d], " 2043 "QE-COUNT[%04d], QE-SIZE[%04d], "
2035 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2044 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2038,14 +2047,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2038 phba->sli4_hba.mbx_cq->entry_size, 2047 phba->sli4_hba.mbx_cq->entry_size,
2039 phba->sli4_hba.mbx_cq->host_index, 2048 phba->sli4_hba.mbx_cq->host_index,
2040 phba->sli4_hba.mbx_cq->hba_index); 2049 phba->sli4_hba.mbx_cq->hba_index);
2050 }
2041 2051
2042 /* Get slow-path complete queue information */ 2052 /* Get slow-path complete queue information */
2043 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2053 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2044 "Slow-path ELS CQ information:\n"); 2054 "Slow-path ELS CQ information:\n");
2045 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2055 if (phba->sli4_hba.els_cq) {
2056 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2046 "Associated EQID[%02d]:\n", 2057 "Associated EQID[%02d]:\n",
2047 phba->sli4_hba.els_cq->assoc_qid); 2058 phba->sli4_hba.els_cq->assoc_qid);
2048 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2059 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2049 "\tCQID [%02d], " 2060 "\tCQID [%02d], "
2050 "QE-COUNT[%04d], QE-SIZE[%04d], " 2061 "QE-COUNT[%04d], QE-SIZE[%04d], "
2051 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2062 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2054 phba->sli4_hba.els_cq->entry_size, 2065 phba->sli4_hba.els_cq->entry_size,
2055 phba->sli4_hba.els_cq->host_index, 2066 phba->sli4_hba.els_cq->host_index,
2056 phba->sli4_hba.els_cq->hba_index); 2067 phba->sli4_hba.els_cq->hba_index);
2068 }
2057 2069
2058 /* Get fast-path complete queue information */ 2070 /* Get fast-path complete queue information */
2059 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2071 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2060 "Fast-path FCP CQ information:\n"); 2072 "Fast-path FCP CQ information:\n");
2061 fcp_qidx = 0; 2073 fcp_qidx = 0;
2062 do { 2074 if (phba->sli4_hba.fcp_cq) {
2063 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2075 do {
2076 if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
2077 len += snprintf(pbuffer+len,
2078 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2064 "Associated EQID[%02d]:\n", 2079 "Associated EQID[%02d]:\n",
2065 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); 2080 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
2066 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2081 len += snprintf(pbuffer+len,
2082 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2067 "\tCQID[%02d], " 2083 "\tCQID[%02d], "
2068 "QE-COUNT[%04d], QE-SIZE[%04d], " 2084 "QE-COUNT[%04d], QE-SIZE[%04d], "
2069 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2085 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2072,16 +2088,20 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2072 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, 2088 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
2073 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 2089 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
2074 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 2090 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
2075 } while (++fcp_qidx < phba->cfg_fcp_eq_count); 2091 }
2076 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2092 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
2093 len += snprintf(pbuffer+len,
2094 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2095 }
2077 2096
2078 /* Get mailbox queue information */ 2097 /* Get mailbox queue information */
2079 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2098 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2080 "Slow-path MBX MQ information:\n"); 2099 "Slow-path MBX MQ information:\n");
2081 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2100 if (phba->sli4_hba.mbx_wq) {
2101 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2082 "Associated CQID[%02d]:\n", 2102 "Associated CQID[%02d]:\n",
2083 phba->sli4_hba.mbx_wq->assoc_qid); 2103 phba->sli4_hba.mbx_wq->assoc_qid);
2084 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2104 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2085 "\tWQID[%02d], " 2105 "\tWQID[%02d], "
2086 "QE-COUNT[%04d], QE-SIZE[%04d], " 2106 "QE-COUNT[%04d], QE-SIZE[%04d], "
2087 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2107 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2090,14 +2110,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2090 phba->sli4_hba.mbx_wq->entry_size, 2110 phba->sli4_hba.mbx_wq->entry_size,
2091 phba->sli4_hba.mbx_wq->host_index, 2111 phba->sli4_hba.mbx_wq->host_index,
2092 phba->sli4_hba.mbx_wq->hba_index); 2112 phba->sli4_hba.mbx_wq->hba_index);
2113 }
2093 2114
2094 /* Get slow-path work queue information */ 2115 /* Get slow-path work queue information */
2095 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2116 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2096 "Slow-path ELS WQ information:\n"); 2117 "Slow-path ELS WQ information:\n");
2097 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2118 if (phba->sli4_hba.els_wq) {
2119 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2098 "Associated CQID[%02d]:\n", 2120 "Associated CQID[%02d]:\n",
2099 phba->sli4_hba.els_wq->assoc_qid); 2121 phba->sli4_hba.els_wq->assoc_qid);
2100 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2122 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2101 "\tWQID[%02d], " 2123 "\tWQID[%02d], "
2102 "QE-COUNT[%04d], QE-SIZE[%04d], " 2124 "QE-COUNT[%04d], QE-SIZE[%04d], "
2103 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2125 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2106 phba->sli4_hba.els_wq->entry_size, 2128 phba->sli4_hba.els_wq->entry_size,
2107 phba->sli4_hba.els_wq->host_index, 2129 phba->sli4_hba.els_wq->host_index,
2108 phba->sli4_hba.els_wq->hba_index); 2130 phba->sli4_hba.els_wq->hba_index);
2131 }
2109 2132
2110 /* Get fast-path work queue information */ 2133 /* Get fast-path work queue information */
2111 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2134 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2112 "Fast-path FCP WQ information:\n"); 2135 "Fast-path FCP WQ information:\n");
2113 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { 2136 if (phba->sli4_hba.fcp_wq) {
2114 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2137 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
2138 fcp_qidx++) {
2139 if (!phba->sli4_hba.fcp_wq[fcp_qidx])
2140 continue;
2141 len += snprintf(pbuffer+len,
2142 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2115 "Associated CQID[%02d]:\n", 2143 "Associated CQID[%02d]:\n",
2116 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); 2144 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
2117 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2145 len += snprintf(pbuffer+len,
2146 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2118 "\tWQID[%02d], " 2147 "\tWQID[%02d], "
2119 "QE-COUNT[%04d], WQE-SIZE[%04d], " 2148 "QE-COUNT[%04d], WQE-SIZE[%04d], "
2120 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2149 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2123,16 +2152,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2123 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, 2152 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
2124 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, 2153 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
2125 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); 2154 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
2155 }
2156 len += snprintf(pbuffer+len,
2157 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2126 } 2158 }
2127 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2128 2159
2129 /* Get receive queue information */ 2160 /* Get receive queue information */
2130 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2161 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2131 "Slow-path RQ information:\n"); 2162 "Slow-path RQ information:\n");
2132 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2163 if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
2164 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2133 "Associated CQID[%02d]:\n", 2165 "Associated CQID[%02d]:\n",
2134 phba->sli4_hba.hdr_rq->assoc_qid); 2166 phba->sli4_hba.hdr_rq->assoc_qid);
2135 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2167 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2136 "\tHQID[%02d], " 2168 "\tHQID[%02d], "
2137 "QE-COUNT[%04d], QE-SIZE[%04d], " 2169 "QE-COUNT[%04d], QE-SIZE[%04d], "
2138 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2170 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2141,7 +2173,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2141 phba->sli4_hba.hdr_rq->entry_size, 2173 phba->sli4_hba.hdr_rq->entry_size,
2142 phba->sli4_hba.hdr_rq->host_index, 2174 phba->sli4_hba.hdr_rq->host_index,
2143 phba->sli4_hba.hdr_rq->hba_index); 2175 phba->sli4_hba.hdr_rq->hba_index);
2144 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2176 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2145 "\tDQID[%02d], " 2177 "\tDQID[%02d], "
2146 "QE-COUNT[%04d], QE-SIZE[%04d], " 2178 "QE-COUNT[%04d], QE-SIZE[%04d], "
2147 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2179 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
2150 phba->sli4_hba.dat_rq->entry_size, 2182 phba->sli4_hba.dat_rq->entry_size,
2151 phba->sli4_hba.dat_rq->host_index, 2183 phba->sli4_hba.dat_rq->host_index,
2152 phba->sli4_hba.dat_rq->hba_index); 2184 phba->sli4_hba.dat_rq->hba_index);
2153 2185 }
2154 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 2186 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2155} 2187}
2156 2188
@@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2360 switch (quetp) { 2392 switch (quetp) {
2361 case LPFC_IDIAG_EQ: 2393 case LPFC_IDIAG_EQ:
2362 /* Slow-path event queue */ 2394 /* Slow-path event queue */
2363 if (phba->sli4_hba.sp_eq->queue_id == queid) { 2395 if (phba->sli4_hba.sp_eq &&
2396 phba->sli4_hba.sp_eq->queue_id == queid) {
2364 /* Sanity check */ 2397 /* Sanity check */
2365 rc = lpfc_idiag_que_param_check( 2398 rc = lpfc_idiag_que_param_check(
2366 phba->sli4_hba.sp_eq, index, count); 2399 phba->sli4_hba.sp_eq, index, count);
@@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2370 goto pass_check; 2403 goto pass_check;
2371 } 2404 }
2372 /* Fast-path event queue */ 2405 /* Fast-path event queue */
2373 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { 2406 if (phba->sli4_hba.fp_eq) {
2374 if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) { 2407 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
2375 /* Sanity check */ 2408 if (phba->sli4_hba.fp_eq[qidx] &&
2376 rc = lpfc_idiag_que_param_check( 2409 phba->sli4_hba.fp_eq[qidx]->queue_id ==
2410 queid) {
2411 /* Sanity check */
2412 rc = lpfc_idiag_que_param_check(
2377 phba->sli4_hba.fp_eq[qidx], 2413 phba->sli4_hba.fp_eq[qidx],
2378 index, count); 2414 index, count);
2379 if (rc) 2415 if (rc)
2380 goto error_out; 2416 goto error_out;
2381 idiag.ptr_private = phba->sli4_hba.fp_eq[qidx]; 2417 idiag.ptr_private =
2382 goto pass_check; 2418 phba->sli4_hba.fp_eq[qidx];
2419 goto pass_check;
2420 }
2383 } 2421 }
2384 } 2422 }
2385 goto error_out; 2423 goto error_out;
2386 break; 2424 break;
2387 case LPFC_IDIAG_CQ: 2425 case LPFC_IDIAG_CQ:
2388 /* MBX complete queue */ 2426 /* MBX complete queue */
2389 if (phba->sli4_hba.mbx_cq->queue_id == queid) { 2427 if (phba->sli4_hba.mbx_cq &&
2428 phba->sli4_hba.mbx_cq->queue_id == queid) {
2390 /* Sanity check */ 2429 /* Sanity check */
2391 rc = lpfc_idiag_que_param_check( 2430 rc = lpfc_idiag_que_param_check(
2392 phba->sli4_hba.mbx_cq, index, count); 2431 phba->sli4_hba.mbx_cq, index, count);
@@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2396 goto pass_check; 2435 goto pass_check;
2397 } 2436 }
2398 /* ELS complete queue */ 2437 /* ELS complete queue */
2399 if (phba->sli4_hba.els_cq->queue_id == queid) { 2438 if (phba->sli4_hba.els_cq &&
2439 phba->sli4_hba.els_cq->queue_id == queid) {
2400 /* Sanity check */ 2440 /* Sanity check */
2401 rc = lpfc_idiag_que_param_check( 2441 rc = lpfc_idiag_que_param_check(
2402 phba->sli4_hba.els_cq, index, count); 2442 phba->sli4_hba.els_cq, index, count);
@@ -2406,25 +2446,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2406 goto pass_check; 2446 goto pass_check;
2407 } 2447 }
2408 /* FCP complete queue */ 2448 /* FCP complete queue */
2409 qidx = 0; 2449 if (phba->sli4_hba.fcp_cq) {
2410 do { 2450 qidx = 0;
2411 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { 2451 do {
2412 /* Sanity check */ 2452 if (phba->sli4_hba.fcp_cq[qidx] &&
2413 rc = lpfc_idiag_que_param_check( 2453 phba->sli4_hba.fcp_cq[qidx]->queue_id ==
2454 queid) {
2455 /* Sanity check */
2456 rc = lpfc_idiag_que_param_check(
2414 phba->sli4_hba.fcp_cq[qidx], 2457 phba->sli4_hba.fcp_cq[qidx],
2415 index, count); 2458 index, count);
2416 if (rc) 2459 if (rc)
2417 goto error_out; 2460 goto error_out;
2418 idiag.ptr_private = 2461 idiag.ptr_private =
2419 phba->sli4_hba.fcp_cq[qidx]; 2462 phba->sli4_hba.fcp_cq[qidx];
2420 goto pass_check; 2463 goto pass_check;
2421 } 2464 }
2422 } while (++qidx < phba->cfg_fcp_eq_count); 2465 } while (++qidx < phba->cfg_fcp_eq_count);
2466 }
2423 goto error_out; 2467 goto error_out;
2424 break; 2468 break;
2425 case LPFC_IDIAG_MQ: 2469 case LPFC_IDIAG_MQ:
2426 /* MBX work queue */ 2470 /* MBX work queue */
2427 if (phba->sli4_hba.mbx_wq->queue_id == queid) { 2471 if (phba->sli4_hba.mbx_wq &&
2472 phba->sli4_hba.mbx_wq->queue_id == queid) {
2428 /* Sanity check */ 2473 /* Sanity check */
2429 rc = lpfc_idiag_que_param_check( 2474 rc = lpfc_idiag_que_param_check(
2430 phba->sli4_hba.mbx_wq, index, count); 2475 phba->sli4_hba.mbx_wq, index, count);
@@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2433 idiag.ptr_private = phba->sli4_hba.mbx_wq; 2478 idiag.ptr_private = phba->sli4_hba.mbx_wq;
2434 goto pass_check; 2479 goto pass_check;
2435 } 2480 }
2481 goto error_out;
2436 break; 2482 break;
2437 case LPFC_IDIAG_WQ: 2483 case LPFC_IDIAG_WQ:
2438 /* ELS work queue */ 2484 /* ELS work queue */
2439 if (phba->sli4_hba.els_wq->queue_id == queid) { 2485 if (phba->sli4_hba.els_wq &&
2486 phba->sli4_hba.els_wq->queue_id == queid) {
2440 /* Sanity check */ 2487 /* Sanity check */
2441 rc = lpfc_idiag_que_param_check( 2488 rc = lpfc_idiag_que_param_check(
2442 phba->sli4_hba.els_wq, index, count); 2489 phba->sli4_hba.els_wq, index, count);
@@ -2446,24 +2493,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2446 goto pass_check; 2493 goto pass_check;
2447 } 2494 }
2448 /* FCP work queue */ 2495 /* FCP work queue */
2449 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { 2496 if (phba->sli4_hba.fcp_wq) {
2450 if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) { 2497 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
2451 /* Sanity check */ 2498 if (!phba->sli4_hba.fcp_wq[qidx])
2452 rc = lpfc_idiag_que_param_check( 2499 continue;
2500 if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
2501 queid) {
2502 /* Sanity check */
2503 rc = lpfc_idiag_que_param_check(
2453 phba->sli4_hba.fcp_wq[qidx], 2504 phba->sli4_hba.fcp_wq[qidx],
2454 index, count); 2505 index, count);
2455 if (rc) 2506 if (rc)
2456 goto error_out; 2507 goto error_out;
2457 idiag.ptr_private = 2508 idiag.ptr_private =
2458 phba->sli4_hba.fcp_wq[qidx]; 2509 phba->sli4_hba.fcp_wq[qidx];
2459 goto pass_check; 2510 goto pass_check;
2511 }
2460 } 2512 }
2461 } 2513 }
2462 goto error_out; 2514 goto error_out;
2463 break; 2515 break;
2464 case LPFC_IDIAG_RQ: 2516 case LPFC_IDIAG_RQ:
2465 /* HDR queue */ 2517 /* HDR queue */
2466 if (phba->sli4_hba.hdr_rq->queue_id == queid) { 2518 if (phba->sli4_hba.hdr_rq &&
2519 phba->sli4_hba.hdr_rq->queue_id == queid) {
2467 /* Sanity check */ 2520 /* Sanity check */
2468 rc = lpfc_idiag_que_param_check( 2521 rc = lpfc_idiag_que_param_check(
2469 phba->sli4_hba.hdr_rq, index, count); 2522 phba->sli4_hba.hdr_rq, index, count);
@@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2473 goto pass_check; 2526 goto pass_check;
2474 } 2527 }
2475 /* DAT queue */ 2528 /* DAT queue */
2476 if (phba->sli4_hba.dat_rq->queue_id == queid) { 2529 if (phba->sli4_hba.dat_rq &&
2530 phba->sli4_hba.dat_rq->queue_id == queid) {
2477 /* Sanity check */ 2531 /* Sanity check */
2478 rc = lpfc_idiag_que_param_check( 2532 rc = lpfc_idiag_que_param_check(
2479 phba->sli4_hba.dat_rq, index, count); 2533 phba->sli4_hba.dat_rq, index, count);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6096c9a091d1..cb714d2342d4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1417,7 +1417,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1417 uint32_t event_data; 1417 uint32_t event_data;
1418 struct Scsi_Host *shost; 1418 struct Scsi_Host *shost;
1419 uint32_t if_type; 1419 uint32_t if_type;
1420 struct lpfc_register portstat_reg; 1420 struct lpfc_register portstat_reg = {0};
1421 uint32_t reg_err1, reg_err2;
1422 uint32_t uerrlo_reg, uemasklo_reg;
1423 uint32_t pci_rd_rc1, pci_rd_rc2;
1421 int rc; 1424 int rc;
1422 1425
1423 /* If the pci channel is offline, ignore possible errors, since 1426 /* If the pci channel is offline, ignore possible errors, since
@@ -1429,27 +1432,29 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1429 if (!phba->cfg_enable_hba_reset) 1432 if (!phba->cfg_enable_hba_reset)
1430 return; 1433 return;
1431 1434
1432 /* Send an internal error event to mgmt application */
1433 lpfc_board_errevt_to_mgmt(phba);
1434
1435 /* For now, the actual action for SLI4 device handling is not
1436 * specified yet, just treated it as adaptor hardware failure
1437 */
1438 event_data = FC_REG_DUMP_EVENT;
1439 shost = lpfc_shost_from_vport(vport);
1440 fc_host_post_vendor_event(shost, fc_get_event_number(),
1441 sizeof(event_data), (char *) &event_data,
1442 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1443
1444 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1435 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1445 switch (if_type) { 1436 switch (if_type) {
1446 case LPFC_SLI_INTF_IF_TYPE_0: 1437 case LPFC_SLI_INTF_IF_TYPE_0:
1438 pci_rd_rc1 = lpfc_readl(
1439 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1440 &uerrlo_reg);
1441 pci_rd_rc2 = lpfc_readl(
1442 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1443 &uemasklo_reg);
1444 /* consider PCI bus read error as pci_channel_offline */
1445 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1446 return;
1447 lpfc_sli4_offline_eratt(phba); 1447 lpfc_sli4_offline_eratt(phba);
1448 break; 1448 break;
1449 case LPFC_SLI_INTF_IF_TYPE_2: 1449 case LPFC_SLI_INTF_IF_TYPE_2:
1450 portstat_reg.word0 = 1450 pci_rd_rc1 = lpfc_readl(
1451 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1451 phba->sli4_hba.u.if_type2.STATUSregaddr,
1452 1452 &portstat_reg.word0);
1453 /* consider PCI bus read error as pci_channel_offline */
1454 if (pci_rd_rc1 == -EIO)
1455 return;
1456 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1457 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1453 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1458 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1454 /* TODO: Register for Overtemp async events. */ 1459 /* TODO: Register for Overtemp async events. */
1455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1459,8 +1464,20 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1459 phba->over_temp_state = HBA_OVER_TEMP; 1464 phba->over_temp_state = HBA_OVER_TEMP;
1460 spin_unlock_irq(&phba->hbalock); 1465 spin_unlock_irq(&phba->hbalock);
1461 lpfc_sli4_offline_eratt(phba); 1466 lpfc_sli4_offline_eratt(phba);
1462 return; 1467 break;
1463 } 1468 }
1469 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1470 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1472 "3143 Port Down: Firmware Restarted\n");
1473 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1474 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1476 "3144 Port Down: Debug Dump\n");
1477 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1478 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1480 "3145 Port Down: Provisioning\n");
1464 /* 1481 /*
1465 * On error status condition, driver need to wait for port 1482 * On error status condition, driver need to wait for port
1466 * ready before performing reset. 1483 * ready before performing reset.
@@ -1469,14 +1486,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1469 if (!rc) { 1486 if (!rc) {
1470 /* need reset: attempt for port recovery */ 1487 /* need reset: attempt for port recovery */
1471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1472 "2887 Port Error: Attempting " 1489 "2887 Reset Needed: Attempting Port "
1473 "Port Recovery\n"); 1490 "Recovery...\n");
1474 lpfc_offline_prep(phba); 1491 lpfc_offline_prep(phba);
1475 lpfc_offline(phba); 1492 lpfc_offline(phba);
1476 lpfc_sli_brdrestart(phba); 1493 lpfc_sli_brdrestart(phba);
1477 if (lpfc_online(phba) == 0) { 1494 if (lpfc_online(phba) == 0) {
1478 lpfc_unblock_mgmt_io(phba); 1495 lpfc_unblock_mgmt_io(phba);
1479 return; 1496 /* don't report event on forced debug dump */
1497 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1498 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1499 return;
1500 else
1501 break;
1480 } 1502 }
1481 /* fall through for not able to recover */ 1503 /* fall through for not able to recover */
1482 } 1504 }
@@ -1486,6 +1508,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1486 default: 1508 default:
1487 break; 1509 break;
1488 } 1510 }
1511 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1512 "3123 Report dump event to upper layer\n");
1513 /* Send an internal error event to mgmt application */
1514 lpfc_board_errevt_to_mgmt(phba);
1515
1516 event_data = FC_REG_DUMP_EVENT;
1517 shost = lpfc_shost_from_vport(vport);
1518 fc_host_post_vendor_event(shost, fc_get_event_number(),
1519 sizeof(event_data), (char *) &event_data,
1520 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1489} 1521}
1490 1522
1491/** 1523/**
@@ -6475,6 +6507,7 @@ out_free_fcp_wq:
6475 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6507 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6476 } 6508 }
6477 kfree(phba->sli4_hba.fcp_wq); 6509 kfree(phba->sli4_hba.fcp_wq);
6510 phba->sli4_hba.fcp_wq = NULL;
6478out_free_els_wq: 6511out_free_els_wq:
6479 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6512 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6480 phba->sli4_hba.els_wq = NULL; 6513 phba->sli4_hba.els_wq = NULL;
@@ -6487,6 +6520,7 @@ out_free_fcp_cq:
6487 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6520 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6488 } 6521 }
6489 kfree(phba->sli4_hba.fcp_cq); 6522 kfree(phba->sli4_hba.fcp_cq);
6523 phba->sli4_hba.fcp_cq = NULL;
6490out_free_els_cq: 6524out_free_els_cq:
6491 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6525 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6492 phba->sli4_hba.els_cq = NULL; 6526 phba->sli4_hba.els_cq = NULL;
@@ -6499,6 +6533,7 @@ out_free_fp_eq:
6499 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6533 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6500 } 6534 }
6501 kfree(phba->sli4_hba.fp_eq); 6535 kfree(phba->sli4_hba.fp_eq);
6536 phba->sli4_hba.fp_eq = NULL;
6502out_free_sp_eq: 6537out_free_sp_eq:
6503 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6538 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6504 phba->sli4_hba.sp_eq = NULL; 6539 phba->sli4_hba.sp_eq = NULL;
@@ -6532,8 +6567,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6532 phba->sli4_hba.els_wq = NULL; 6567 phba->sli4_hba.els_wq = NULL;
6533 6568
6534 /* Release FCP work queue */ 6569 /* Release FCP work queue */
6535 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6570 if (phba->sli4_hba.fcp_wq != NULL)
6536 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6571 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6572 fcp_qidx++)
6573 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6537 kfree(phba->sli4_hba.fcp_wq); 6574 kfree(phba->sli4_hba.fcp_wq);
6538 phba->sli4_hba.fcp_wq = NULL; 6575 phba->sli4_hba.fcp_wq = NULL;
6539 6576
@@ -6553,15 +6590,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6553 6590
6554 /* Release FCP response complete queue */ 6591 /* Release FCP response complete queue */
6555 fcp_qidx = 0; 6592 fcp_qidx = 0;
6556 do 6593 if (phba->sli4_hba.fcp_cq != NULL)
6557 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6594 do
6558 while (++fcp_qidx < phba->cfg_fcp_eq_count); 6595 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6596 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6559 kfree(phba->sli4_hba.fcp_cq); 6597 kfree(phba->sli4_hba.fcp_cq);
6560 phba->sli4_hba.fcp_cq = NULL; 6598 phba->sli4_hba.fcp_cq = NULL;
6561 6599
6562 /* Release fast-path event queue */ 6600 /* Release fast-path event queue */
6563 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6601 if (phba->sli4_hba.fp_eq != NULL)
6564 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6602 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6603 fcp_qidx++)
6604 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6565 kfree(phba->sli4_hba.fp_eq); 6605 kfree(phba->sli4_hba.fp_eq);
6566 phba->sli4_hba.fp_eq = NULL; 6606 phba->sli4_hba.fp_eq = NULL;
6567 6607
@@ -6614,6 +6654,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6614 phba->sli4_hba.sp_eq->queue_id); 6654 phba->sli4_hba.sp_eq->queue_id);
6615 6655
6616 /* Set up fast-path event queue */ 6656 /* Set up fast-path event queue */
6657 if (!phba->sli4_hba.fp_eq) {
6658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6659 "3147 Fast-path EQs not allocated\n");
6660 goto out_destroy_sp_eq;
6661 }
6617 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6662 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6618 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6663 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -6678,6 +6723,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6678 phba->sli4_hba.sp_eq->queue_id); 6723 phba->sli4_hba.sp_eq->queue_id);
6679 6724
6680 /* Set up fast-path FCP Response Complete Queue */ 6725 /* Set up fast-path FCP Response Complete Queue */
6726 if (!phba->sli4_hba.fcp_cq) {
6727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6728 "3148 Fast-path FCP CQ array not "
6729 "allocated\n");
6730 goto out_destroy_els_cq;
6731 }
6681 fcp_cqidx = 0; 6732 fcp_cqidx = 0;
6682 do { 6733 do {
6683 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6734 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -6757,6 +6808,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6757 phba->sli4_hba.els_cq->queue_id); 6808 phba->sli4_hba.els_cq->queue_id);
6758 6809
6759 /* Set up fast-path FCP Work Queue */ 6810 /* Set up fast-path FCP Work Queue */
6811 if (!phba->sli4_hba.fcp_wq) {
6812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6813 "3149 Fast-path FCP WQ array not "
6814 "allocated\n");
6815 goto out_destroy_els_wq;
6816 }
6760 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6817 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6761 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6818 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -6818,18 +6875,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6818out_destroy_fcp_wq: 6875out_destroy_fcp_wq:
6819 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6876 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6820 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6877 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6878out_destroy_els_wq:
6821 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6879 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6822out_destroy_mbx_wq: 6880out_destroy_mbx_wq:
6823 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6881 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6824out_destroy_fcp_cq: 6882out_destroy_fcp_cq:
6825 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6883 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6826 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6884 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6885out_destroy_els_cq:
6827 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6886 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6828out_destroy_mbx_cq: 6887out_destroy_mbx_cq:
6829 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6888 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6830out_destroy_fp_eq: 6889out_destroy_fp_eq:
6831 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6890 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6832 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6891 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6892out_destroy_sp_eq:
6833 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6893 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6834out_error: 6894out_error:
6835 return rc; 6895 return rc;
@@ -6866,13 +6926,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6866 /* Unset ELS complete queue */ 6926 /* Unset ELS complete queue */
6867 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6927 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6868 /* Unset FCP response complete queue */ 6928 /* Unset FCP response complete queue */
6869 fcp_qidx = 0; 6929 if (phba->sli4_hba.fcp_cq) {
6870 do { 6930 fcp_qidx = 0;
6871 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6931 do {
6872 } while (++fcp_qidx < phba->cfg_fcp_eq_count); 6932 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6933 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
6934 }
6873 /* Unset fast-path event queue */ 6935 /* Unset fast-path event queue */
6874 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6936 if (phba->sli4_hba.fp_eq) {
6875 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6937 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6938 fcp_qidx++)
6939 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6940 }
6876 /* Unset slow-path event queue */ 6941 /* Unset slow-path event queue */
6877 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6942 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6878} 6943}
@@ -7411,22 +7476,25 @@ out:
7411static void 7476static void
7412lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7477lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7413{ 7478{
7414 struct pci_dev *pdev; 7479 uint32_t if_type;
7415 7480 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7416 /* Obtain PCI device reference */
7417 if (!phba->pcidev)
7418 return;
7419 else
7420 pdev = phba->pcidev;
7421
7422 /* Free coherent DMA memory allocated */
7423
7424 /* Unmap I/O memory space */
7425 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7426 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7427 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7428 7481
7429 return; 7482 switch (if_type) {
7483 case LPFC_SLI_INTF_IF_TYPE_0:
7484 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7485 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7486 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7487 break;
7488 case LPFC_SLI_INTF_IF_TYPE_2:
7489 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7490 break;
7491 case LPFC_SLI_INTF_IF_TYPE_1:
7492 default:
7493 dev_printk(KERN_ERR, &phba->pcidev->dev,
7494 "FATAL - unsupported SLI4 interface type - %d\n",
7495 if_type);
7496 break;
7497 }
7430} 7498}
7431 7499
7432/** 7500/**
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 10d5b5e41499..ade763d3930a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
389{ 389{
390 struct hbq_dmabuf *hbqbp; 390 struct hbq_dmabuf *hbqbp;
391 391
392 hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 392 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
393 if (!hbqbp) 393 if (!hbqbp)
394 return NULL; 394 return NULL;
395 395
@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
441{ 441{
442 struct hbq_dmabuf *dma_buf; 442 struct hbq_dmabuf *dma_buf;
443 443
444 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 444 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
445 if (!dma_buf) 445 if (!dma_buf)
446 return NULL; 446 return NULL;
447 447
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 97bbafb21bc2..e27cb44b3ec2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -89,10 +89,15 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
89static uint32_t 89static uint32_t
90lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 90lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
91{ 91{
92 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; 92 union lpfc_wqe *temp_wqe;
93 struct lpfc_register doorbell; 93 struct lpfc_register doorbell;
94 uint32_t host_index; 94 uint32_t host_index;
95 95
96 /* sanity check on queue memory */
97 if (unlikely(!q))
98 return -ENOMEM;
99 temp_wqe = q->qe[q->host_index].wqe;
100
96 /* If the host has not yet processed the next entry then we are done */ 101 /* If the host has not yet processed the next entry then we are done */
97 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 102 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
98 return -ENOMEM; 103 return -ENOMEM;
@@ -134,6 +139,10 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
134{ 139{
135 uint32_t released = 0; 140 uint32_t released = 0;
136 141
142 /* sanity check on queue memory */
143 if (unlikely(!q))
144 return 0;
145
137 if (q->hba_index == index) 146 if (q->hba_index == index)
138 return 0; 147 return 0;
139 do { 148 do {
@@ -158,10 +167,15 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
158static uint32_t 167static uint32_t
159lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 168lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
160{ 169{
161 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; 170 struct lpfc_mqe *temp_mqe;
162 struct lpfc_register doorbell; 171 struct lpfc_register doorbell;
163 uint32_t host_index; 172 uint32_t host_index;
164 173
174 /* sanity check on queue memory */
175 if (unlikely(!q))
176 return -ENOMEM;
177 temp_mqe = q->qe[q->host_index].mqe;
178
165 /* If the host has not yet processed the next entry then we are done */ 179 /* If the host has not yet processed the next entry then we are done */
166 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 180 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
167 return -ENOMEM; 181 return -ENOMEM;
@@ -195,6 +209,10 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
195static uint32_t 209static uint32_t
196lpfc_sli4_mq_release(struct lpfc_queue *q) 210lpfc_sli4_mq_release(struct lpfc_queue *q)
197{ 211{
212 /* sanity check on queue memory */
213 if (unlikely(!q))
214 return 0;
215
198 /* Clear the mailbox pointer for completion */ 216 /* Clear the mailbox pointer for completion */
199 q->phba->mbox = NULL; 217 q->phba->mbox = NULL;
200 q->hba_index = ((q->hba_index + 1) % q->entry_count); 218 q->hba_index = ((q->hba_index + 1) % q->entry_count);
@@ -213,7 +231,12 @@ lpfc_sli4_mq_release(struct lpfc_queue *q)
213static struct lpfc_eqe * 231static struct lpfc_eqe *
214lpfc_sli4_eq_get(struct lpfc_queue *q) 232lpfc_sli4_eq_get(struct lpfc_queue *q)
215{ 233{
216 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 234 struct lpfc_eqe *eqe;
235
236 /* sanity check on queue memory */
237 if (unlikely(!q))
238 return NULL;
239 eqe = q->qe[q->hba_index].eqe;
217 240
218 /* If the next EQE is not valid then we are done */ 241 /* If the next EQE is not valid then we are done */
219 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 242 if (!bf_get_le32(lpfc_eqe_valid, eqe))
@@ -248,6 +271,10 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
248 struct lpfc_eqe *temp_eqe; 271 struct lpfc_eqe *temp_eqe;
249 struct lpfc_register doorbell; 272 struct lpfc_register doorbell;
250 273
274 /* sanity check on queue memory */
275 if (unlikely(!q))
276 return 0;
277
251 /* while there are valid entries */ 278 /* while there are valid entries */
252 while (q->hba_index != q->host_index) { 279 while (q->hba_index != q->host_index) {
253 temp_eqe = q->qe[q->host_index].eqe; 280 temp_eqe = q->qe[q->host_index].eqe;
@@ -288,6 +315,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
288{ 315{
289 struct lpfc_cqe *cqe; 316 struct lpfc_cqe *cqe;
290 317
318 /* sanity check on queue memory */
319 if (unlikely(!q))
320 return NULL;
321
291 /* If the next CQE is not valid then we are done */ 322 /* If the next CQE is not valid then we are done */
292 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 323 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
293 return NULL; 324 return NULL;
@@ -322,6 +353,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
322 struct lpfc_cqe *temp_qe; 353 struct lpfc_cqe *temp_qe;
323 struct lpfc_register doorbell; 354 struct lpfc_register doorbell;
324 355
356 /* sanity check on queue memory */
357 if (unlikely(!q))
358 return 0;
325 /* while there are valid entries */ 359 /* while there are valid entries */
326 while (q->hba_index != q->host_index) { 360 while (q->hba_index != q->host_index) {
327 temp_qe = q->qe[q->host_index].cqe; 361 temp_qe = q->qe[q->host_index].cqe;
@@ -359,11 +393,17 @@ static int
359lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 393lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
360 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 394 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
361{ 395{
362 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; 396 struct lpfc_rqe *temp_hrqe;
363 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; 397 struct lpfc_rqe *temp_drqe;
364 struct lpfc_register doorbell; 398 struct lpfc_register doorbell;
365 int put_index = hq->host_index; 399 int put_index = hq->host_index;
366 400
401 /* sanity check on queue memory */
402 if (unlikely(!hq) || unlikely(!dq))
403 return -ENOMEM;
404 temp_hrqe = hq->qe[hq->host_index].rqe;
405 temp_drqe = dq->qe[dq->host_index].rqe;
406
367 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 407 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
368 return -EINVAL; 408 return -EINVAL;
369 if (hq->host_index != dq->host_index) 409 if (hq->host_index != dq->host_index)
@@ -402,6 +442,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
402static uint32_t 442static uint32_t
403lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 443lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
404{ 444{
445 /* sanity check on queue memory */
446 if (unlikely(!hq) || unlikely(!dq))
447 return 0;
448
405 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 449 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
406 return 0; 450 return 0;
407 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 451 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
@@ -3851,7 +3895,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3851{ 3895{
3852 struct lpfc_sli *psli = &phba->sli; 3896 struct lpfc_sli *psli = &phba->sli;
3853 uint16_t cfg_value; 3897 uint16_t cfg_value;
3854 uint8_t qindx;
3855 3898
3856 /* Reset HBA */ 3899 /* Reset HBA */
3857 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3900 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3867,19 +3910,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3867 spin_lock_irq(&phba->hbalock); 3910 spin_lock_irq(&phba->hbalock);
3868 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3911 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3869 phba->fcf.fcf_flag = 0; 3912 phba->fcf.fcf_flag = 0;
3870 /* Clean up the child queue list for the CQs */
3871 list_del_init(&phba->sli4_hba.mbx_wq->list);
3872 list_del_init(&phba->sli4_hba.els_wq->list);
3873 list_del_init(&phba->sli4_hba.hdr_rq->list);
3874 list_del_init(&phba->sli4_hba.dat_rq->list);
3875 list_del_init(&phba->sli4_hba.mbx_cq->list);
3876 list_del_init(&phba->sli4_hba.els_cq->list);
3877 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3878 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3879 qindx = 0;
3880 do
3881 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3882 while (++qindx < phba->cfg_fcp_eq_count);
3883 spin_unlock_irq(&phba->hbalock); 3913 spin_unlock_irq(&phba->hbalock);
3884 3914
3885 /* Now physically reset the device */ 3915 /* Now physically reset the device */
@@ -3892,6 +3922,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3892 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3922 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3893 3923
3894 /* Perform FCoE PCI function reset */ 3924 /* Perform FCoE PCI function reset */
3925 lpfc_sli4_queue_destroy(phba);
3895 lpfc_pci_function_reset(phba); 3926 lpfc_pci_function_reset(phba);
3896 3927
3897 /* Restore PCI cmd register */ 3928 /* Restore PCI cmd register */
@@ -4869,14 +4900,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4869 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4900 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4870 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4901 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4871 fcp_eqidx = 0; 4902 fcp_eqidx = 0;
4872 do 4903 if (phba->sli4_hba.fcp_cq) {
4873 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4904 do
4874 LPFC_QUEUE_REARM); 4905 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4875 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4906 LPFC_QUEUE_REARM);
4907 while (++fcp_eqidx < phba->cfg_fcp_eq_count);
4908 }
4876 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4909 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4877 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4910 if (phba->sli4_hba.fp_eq) {
4878 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4911 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
4879 LPFC_QUEUE_REARM); 4912 fcp_eqidx++)
4913 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4914 LPFC_QUEUE_REARM);
4915 }
4880} 4916}
4881 4917
4882/** 4918/**
@@ -8083,6 +8119,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8083 */ 8119 */
8084 if (piocb->iocb_flag & LPFC_IO_FCP) 8120 if (piocb->iocb_flag & LPFC_IO_FCP)
8085 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8121 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8122 if (unlikely(!phba->sli4_hba.fcp_wq))
8123 return IOCB_ERROR;
8086 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8124 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8087 &wqe)) 8125 &wqe))
8088 return IOCB_ERROR; 8126 return IOCB_ERROR;
@@ -9900,7 +9938,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
9900 phba->work_status[1] = 9938 phba->work_status[1] =
9901 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 9939 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
9902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9903 "2885 Port Error Detected: " 9941 "2885 Port Status Event: "
9904 "port status reg 0x%x, " 9942 "port status reg 0x%x, "
9905 "port smphr reg 0x%x, " 9943 "port smphr reg 0x%x, "
9906 "error 1=0x%x, error 2=0x%x\n", 9944 "error 1=0x%x, error 2=0x%x\n",
@@ -10906,6 +10944,9 @@ static void
10906lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 10944lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
10907 struct lpfc_wcqe_release *wcqe) 10945 struct lpfc_wcqe_release *wcqe)
10908{ 10946{
10947 /* sanity check on queue memory */
10948 if (unlikely(!phba->sli4_hba.els_wq))
10949 return;
10909 /* Check for the slow-path ELS work queue */ 10950 /* Check for the slow-path ELS work queue */
10910 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 10951 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
10911 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 10952 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
@@ -10995,6 +11036,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
10995 uint32_t status, rq_id; 11036 uint32_t status, rq_id;
10996 unsigned long iflags; 11037 unsigned long iflags;
10997 11038
11039 /* sanity check on queue memory */
11040 if (unlikely(!hrq) || unlikely(!drq))
11041 return workposted;
11042
10998 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11043 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
10999 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 11044 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11000 else 11045 else
@@ -11129,6 +11174,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11129 11174
11130 /* Search for completion queue pointer matching this cqid */ 11175 /* Search for completion queue pointer matching this cqid */
11131 speq = phba->sli4_hba.sp_eq; 11176 speq = phba->sli4_hba.sp_eq;
11177 /* sanity check on queue memory */
11178 if (unlikely(!speq))
11179 return;
11132 list_for_each_entry(childq, &speq->child_list, list) { 11180 list_for_each_entry(childq, &speq->child_list, list) {
11133 if (childq->queue_id == cqid) { 11181 if (childq->queue_id == cqid) {
11134 cq = childq; 11182 cq = childq;
@@ -11370,12 +11418,18 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11370 return; 11418 return;
11371 } 11419 }
11372 11420
11421 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11422 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11423 "3146 Fast-path completion queues "
11424 "does not exist\n");
11425 return;
11426 }
11373 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11427 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
11374 if (unlikely(!cq)) { 11428 if (unlikely(!cq)) {
11375 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11429 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11376 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11377 "0367 Fast-path completion queue " 11431 "0367 Fast-path completion queue "
11378 "does not exist\n"); 11432 "(%d) does not exist\n", fcp_cqidx);
11379 return; 11433 return;
11380 } 11434 }
11381 11435
@@ -11546,6 +11600,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11546 11600
11547 /* Get to the EQ struct associated with this vector */ 11601 /* Get to the EQ struct associated with this vector */
11548 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11602 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
11603 if (unlikely(!fpeq))
11604 return IRQ_NONE;
11549 11605
11550 /* Check device state for handling interrupt */ 11606 /* Check device state for handling interrupt */
11551 if (unlikely(lpfc_intr_state_check(phba))) { 11607 if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11764,6 +11820,9 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
11764 uint16_t dmult; 11820 uint16_t dmult;
11765 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11821 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11766 11822
11823 /* sanity check on queue memory */
11824 if (!eq)
11825 return -ENODEV;
11767 if (!phba->sli4_hba.pc_sli4_params.supported) 11826 if (!phba->sli4_hba.pc_sli4_params.supported)
11768 hw_page_size = SLI4_PAGE_SIZE; 11827 hw_page_size = SLI4_PAGE_SIZE;
11769 11828
@@ -11880,6 +11939,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
11880 union lpfc_sli4_cfg_shdr *shdr; 11939 union lpfc_sli4_cfg_shdr *shdr;
11881 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11940 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11882 11941
11942 /* sanity check on queue memory */
11943 if (!cq || !eq)
11944 return -ENODEV;
11883 if (!phba->sli4_hba.pc_sli4_params.supported) 11945 if (!phba->sli4_hba.pc_sli4_params.supported)
11884 hw_page_size = SLI4_PAGE_SIZE; 11946 hw_page_size = SLI4_PAGE_SIZE;
11885 11947
@@ -12062,6 +12124,9 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12062 union lpfc_sli4_cfg_shdr *shdr; 12124 union lpfc_sli4_cfg_shdr *shdr;
12063 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12125 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12064 12126
12127 /* sanity check on queue memory */
12128 if (!mq || !cq)
12129 return -ENODEV;
12065 if (!phba->sli4_hba.pc_sli4_params.supported) 12130 if (!phba->sli4_hba.pc_sli4_params.supported)
12066 hw_page_size = SLI4_PAGE_SIZE; 12131 hw_page_size = SLI4_PAGE_SIZE;
12067 12132
@@ -12212,6 +12277,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12212 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12277 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12213 struct dma_address *page; 12278 struct dma_address *page;
12214 12279
12280 /* sanity check on queue memory */
12281 if (!wq || !cq)
12282 return -ENODEV;
12215 if (!phba->sli4_hba.pc_sli4_params.supported) 12283 if (!phba->sli4_hba.pc_sli4_params.supported)
12216 hw_page_size = SLI4_PAGE_SIZE; 12284 hw_page_size = SLI4_PAGE_SIZE;
12217 12285
@@ -12304,6 +12372,9 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12304{ 12372{
12305 uint32_t cnt; 12373 uint32_t cnt;
12306 12374
12375 /* sanity check on queue memory */
12376 if (!rq)
12377 return;
12307 cnt = lpfc_hbq_defs[qno]->entry_count; 12378 cnt = lpfc_hbq_defs[qno]->entry_count;
12308 12379
12309 /* Recalc repost for RQs based on buffers initially posted */ 12380 /* Recalc repost for RQs based on buffers initially posted */
@@ -12349,6 +12420,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12349 union lpfc_sli4_cfg_shdr *shdr; 12420 union lpfc_sli4_cfg_shdr *shdr;
12350 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12421 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12351 12422
12423 /* sanity check on queue memory */
12424 if (!hrq || !drq || !cq)
12425 return -ENODEV;
12352 if (!phba->sli4_hba.pc_sli4_params.supported) 12426 if (!phba->sli4_hba.pc_sli4_params.supported)
12353 hw_page_size = SLI4_PAGE_SIZE; 12427 hw_page_size = SLI4_PAGE_SIZE;
12354 12428
@@ -12550,6 +12624,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
12550 uint32_t shdr_status, shdr_add_status; 12624 uint32_t shdr_status, shdr_add_status;
12551 union lpfc_sli4_cfg_shdr *shdr; 12625 union lpfc_sli4_cfg_shdr *shdr;
12552 12626
12627 /* sanity check on queue memory */
12553 if (!eq) 12628 if (!eq)
12554 return -ENODEV; 12629 return -ENODEV;
12555 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 12630 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12605,6 +12680,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
12605 uint32_t shdr_status, shdr_add_status; 12680 uint32_t shdr_status, shdr_add_status;
12606 union lpfc_sli4_cfg_shdr *shdr; 12681 union lpfc_sli4_cfg_shdr *shdr;
12607 12682
12683 /* sanity check on queue memory */
12608 if (!cq) 12684 if (!cq)
12609 return -ENODEV; 12685 return -ENODEV;
12610 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 12686 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12658,6 +12734,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
12658 uint32_t shdr_status, shdr_add_status; 12734 uint32_t shdr_status, shdr_add_status;
12659 union lpfc_sli4_cfg_shdr *shdr; 12735 union lpfc_sli4_cfg_shdr *shdr;
12660 12736
12737 /* sanity check on queue memory */
12661 if (!mq) 12738 if (!mq)
12662 return -ENODEV; 12739 return -ENODEV;
12663 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 12740 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12711,6 +12788,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
12711 uint32_t shdr_status, shdr_add_status; 12788 uint32_t shdr_status, shdr_add_status;
12712 union lpfc_sli4_cfg_shdr *shdr; 12789 union lpfc_sli4_cfg_shdr *shdr;
12713 12790
12791 /* sanity check on queue memory */
12714 if (!wq) 12792 if (!wq)
12715 return -ENODEV; 12793 return -ENODEV;
12716 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 12794 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12764,6 +12842,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12764 uint32_t shdr_status, shdr_add_status; 12842 uint32_t shdr_status, shdr_add_status;
12765 union lpfc_sli4_cfg_shdr *shdr; 12843 union lpfc_sli4_cfg_shdr *shdr;
12766 12844
12845 /* sanity check on queue memory */
12767 if (!hrq || !drq) 12846 if (!hrq || !drq)
12768 return -ENODEV; 12847 return -ENODEV;
12769 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 12848 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 7d444c444db4..3f266e2c54e0 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
420 void __iomem *STATUSregaddr; 420 void __iomem *STATUSregaddr;
421 void __iomem *CTRLregaddr; 421 void __iomem *CTRLregaddr;
422 void __iomem *ERR1regaddr; 422 void __iomem *ERR1regaddr;
423#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
424#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
423 void __iomem *ERR2regaddr; 425 void __iomem *ERR2regaddr;
426#define SLIPORT_ERR2_REG_FW_RESTART 0x0
427#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
428#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
429#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
430#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
431#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
432#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
424 } if_type2; 433 } if_type2;
425 } u; 434 } u;
426 435