aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c732
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h155
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c110
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c135
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
10 files changed, 1396 insertions, 85 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3a9f5b288aee..90d1e062ec4f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -11,7 +11,9 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12 12
13static int qla24xx_vport_disable(struct fc_vport *, bool); 13static int qla24xx_vport_disable(struct fc_vport *, bool);
14 14static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
15int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
16static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
15/* SYSFS attributes --------------------------------------------------------- */ 17/* SYSFS attributes --------------------------------------------------------- */
16 18
17static ssize_t 19static ssize_t
@@ -1168,6 +1170,28 @@ qla2x00_total_isp_aborts_show(struct device *dev,
1168} 1170}
1169 1171
1170static ssize_t 1172static ssize_t
1173qla24xx_84xx_fw_version_show(struct device *dev,
1174 struct device_attribute *attr, char *buf)
1175{
1176 int rval = QLA_SUCCESS;
1177 uint16_t status[2] = {0, 0};
1178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1179 struct qla_hw_data *ha = vha->hw;
1180
1181 if (IS_QLA84XX(ha) && ha->cs84xx) {
1182 if (ha->cs84xx->op_fw_version == 0) {
1183 rval = qla84xx_verify_chip(vha, status);
1184 }
1185
1186 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1187 return snprintf(buf, PAGE_SIZE, "%u\n",
1188 (uint32_t)ha->cs84xx->op_fw_version);
1189 }
1190
1191 return snprintf(buf, PAGE_SIZE, "\n");
1192}
1193
1194static ssize_t
1171qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, 1195qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1172 char *buf) 1196 char *buf)
1173{ 1197{
@@ -1281,6 +1305,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1281 qla2x00_optrom_fcode_version_show, NULL); 1305 qla2x00_optrom_fcode_version_show, NULL);
1282static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 1306static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1283 NULL); 1307 NULL);
1308static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1309 NULL);
1284static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 1310static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1285 NULL); 1311 NULL);
1286static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); 1312static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
@@ -1310,6 +1336,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
1310 &dev_attr_optrom_efi_version, 1336 &dev_attr_optrom_efi_version,
1311 &dev_attr_optrom_fcode_version, 1337 &dev_attr_optrom_fcode_version,
1312 &dev_attr_optrom_fw_version, 1338 &dev_attr_optrom_fw_version,
1339 &dev_attr_84xx_fw_version,
1313 &dev_attr_total_isp_aborts, 1340 &dev_attr_total_isp_aborts,
1314 &dev_attr_mpi_version, 1341 &dev_attr_mpi_version,
1315 &dev_attr_phy_version, 1342 &dev_attr_phy_version,
@@ -1504,8 +1531,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1504 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1531 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1505 fcport->loop_id, fcport->d_id.b.domain, 1532 fcport->loop_id, fcport->d_id.b.domain,
1506 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1533 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1507
1508 qla2x00_abort_fcport_cmds(fcport);
1509} 1534}
1510 1535
1511static int 1536static int
@@ -1795,6 +1820,581 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1795 return 0; 1820 return 0;
1796} 1821}
1797 1822
1823/* BSG support for ELS/CT pass through */
1824inline srb_t *
1825qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1826{
1827 srb_t *sp;
1828 struct qla_hw_data *ha = vha->hw;
1829 struct srb_bsg_ctx *ctx;
1830
1831 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1832 if (!sp)
1833 goto done;
1834 ctx = kzalloc(size, GFP_KERNEL);
1835 if (!ctx) {
1836 mempool_free(sp, ha->srb_mempool);
1837 goto done;
1838 }
1839
1840 memset(sp, 0, sizeof(*sp));
1841 sp->fcport = fcport;
1842 sp->ctx = ctx;
1843done:
1844 return sp;
1845}
1846
1847static int
1848qla2x00_process_els(struct fc_bsg_job *bsg_job)
1849{
1850 struct fc_rport *rport;
1851 fc_port_t *fcport;
1852 struct Scsi_Host *host;
1853 scsi_qla_host_t *vha;
1854 struct qla_hw_data *ha;
1855 srb_t *sp;
1856 const char *type;
1857 int req_sg_cnt, rsp_sg_cnt;
1858 int rval = (DRIVER_ERROR << 16);
1859 uint16_t nextlid = 0;
1860 struct srb_bsg *els;
1861
1862 /* Multiple SG's are not supported for ELS requests */
1863 if (bsg_job->request_payload.sg_cnt > 1 ||
1864 bsg_job->reply_payload.sg_cnt > 1) {
1865 DEBUG2(printk(KERN_INFO
1866 "multiple SG's are not supported for ELS requests"
1867 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1868 bsg_job->request_payload.sg_cnt,
1869 bsg_job->reply_payload.sg_cnt));
1870 rval = -EPERM;
1871 goto done;
1872 }
1873
1874 /* ELS request for rport */
1875 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1876 rport = bsg_job->rport;
1877 fcport = *(fc_port_t **) rport->dd_data;
1878 host = rport_to_shost(rport);
1879 vha = shost_priv(host);
1880 ha = vha->hw;
1881 type = "FC_BSG_RPT_ELS";
1882
1883 /* make sure the rport is logged in,
1884 * if not perform fabric login
1885 */
1886 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1887 DEBUG2(qla_printk(KERN_WARNING, ha,
1888 "failed to login port %06X for ELS passthru\n",
1889 fcport->d_id.b24));
1890 rval = -EIO;
1891 goto done;
1892 }
1893 } else {
1894 host = bsg_job->shost;
1895 vha = shost_priv(host);
1896 ha = vha->hw;
1897 type = "FC_BSG_HST_ELS_NOLOGIN";
1898
1899 /* Allocate a dummy fcport structure, since functions
1900 * preparing the IOCB and mailbox command retrieves port
1901 * specific information from fcport structure. For Host based
1902 * ELS commands there will be no fcport structure allocated
1903 */
1904 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1905 if (!fcport) {
1906 rval = -ENOMEM;
1907 goto done;
1908 }
1909
1910 /* Initialize all required fields of fcport */
1911 fcport->vha = vha;
1912 fcport->vp_idx = vha->vp_idx;
1913 fcport->d_id.b.al_pa =
1914 bsg_job->request->rqst_data.h_els.port_id[0];
1915 fcport->d_id.b.area =
1916 bsg_job->request->rqst_data.h_els.port_id[1];
1917 fcport->d_id.b.domain =
1918 bsg_job->request->rqst_data.h_els.port_id[2];
1919 fcport->loop_id =
1920 (fcport->d_id.b.al_pa == 0xFD) ?
1921 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1922 }
1923
1924 if (!vha->flags.online) {
1925 DEBUG2(qla_printk(KERN_WARNING, ha,
1926 "host not online\n"));
1927 rval = -EIO;
1928 goto done;
1929 }
1930
1931 req_sg_cnt =
1932 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1933 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1934 if (!req_sg_cnt) {
1935 rval = -ENOMEM;
1936 goto done_free_fcport;
1937 }
1938 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1939 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1940 if (!rsp_sg_cnt) {
1941 rval = -ENOMEM;
1942 goto done_free_fcport;
1943 }
1944
1945 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1946 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1947 {
1948 DEBUG2(printk(KERN_INFO
1949 "dma mapping resulted in different sg counts \
1950 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1951 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1952 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1953 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1954 rval = -EAGAIN;
1955 goto done_unmap_sg;
1956 }
1957
1958 /* Alloc SRB structure */
1959 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1960 if (!sp) {
1961 rval = -ENOMEM;
1962 goto done_unmap_sg;
1963 }
1964
1965 els = sp->ctx;
1966 els->ctx.type =
1967 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1968 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1969 els->bsg_job = bsg_job;
1970
1971 DEBUG2(qla_printk(KERN_INFO, ha,
1972 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1973 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1974 bsg_job->request->rqst_data.h_els.command_code,
1975 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1976 fcport->d_id.b.al_pa));
1977
1978 rval = qla2x00_start_sp(sp);
1979 if (rval != QLA_SUCCESS) {
1980 kfree(sp->ctx);
1981 mempool_free(sp, ha->srb_mempool);
1982 rval = -EIO;
1983 goto done_unmap_sg;
1984 }
1985 return rval;
1986
1987done_unmap_sg:
1988 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1989 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1990 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1991 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1992 goto done_free_fcport;
1993
1994done_free_fcport:
1995 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
1996 kfree(fcport);
1997done:
1998 return rval;
1999}
2000
2001static int
2002qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2003{
2004 srb_t *sp;
2005 struct Scsi_Host *host = bsg_job->shost;
2006 scsi_qla_host_t *vha = shost_priv(host);
2007 struct qla_hw_data *ha = vha->hw;
2008 int rval = (DRIVER_ERROR << 16);
2009 int req_sg_cnt, rsp_sg_cnt;
2010 uint16_t loop_id;
2011 struct fc_port *fcport;
2012 char *type = "FC_BSG_HST_CT";
2013 struct srb_bsg *ct;
2014
2015 /* pass through is supported only for ISP 4Gb or higher */
2016 if (!IS_FWI2_CAPABLE(ha)) {
2017 DEBUG2(qla_printk(KERN_INFO, ha,
2018 "scsi(%ld):Firmware is not capable to support FC "
2019 "CT pass thru\n", vha->host_no));
2020 rval = -EPERM;
2021 goto done;
2022 }
2023
2024 req_sg_cnt =
2025 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2026 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2027 if (!req_sg_cnt) {
2028 rval = -ENOMEM;
2029 goto done;
2030 }
2031
2032 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2033 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2034 if (!rsp_sg_cnt) {
2035 rval = -ENOMEM;
2036 goto done;
2037 }
2038
2039 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2040 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2041 {
2042 DEBUG2(qla_printk(KERN_WARNING, ha,
2043 "dma mapping resulted in different sg counts \
2044 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2045 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2046 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2047 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2048 rval = -EAGAIN;
2049 goto done_unmap_sg;
2050 }
2051
2052 if (!vha->flags.online) {
2053 DEBUG2(qla_printk(KERN_WARNING, ha,
2054 "host not online\n"));
2055 rval = -EIO;
2056 goto done_unmap_sg;
2057 }
2058
2059 loop_id =
2060 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2061 >> 24;
2062 switch (loop_id) {
2063 case 0xFC:
2064 loop_id = cpu_to_le16(NPH_SNS);
2065 break;
2066 case 0xFA:
2067 loop_id = vha->mgmt_svr_loop_id;
2068 break;
2069 default:
2070 DEBUG2(qla_printk(KERN_INFO, ha,
2071 "Unknown loop id: %x\n", loop_id));
2072 rval = -EINVAL;
2073 goto done_unmap_sg;
2074 }
2075
2076 /* Allocate a dummy fcport structure, since functions preparing the
2077 * IOCB and mailbox command retrieves port specific information
2078 * from fcport structure. For Host based ELS commands there will be
2079 * no fcport structure allocated
2080 */
2081 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2082 if (!fcport)
2083 {
2084 rval = -ENOMEM;
2085 goto done_unmap_sg;
2086 }
2087
2088 /* Initialize all required fields of fcport */
2089 fcport->vha = vha;
2090 fcport->vp_idx = vha->vp_idx;
2091 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2092 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2093 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2094 fcport->loop_id = loop_id;
2095
2096 /* Alloc SRB structure */
2097 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2098 if (!sp) {
2099 rval = -ENOMEM;
2100 goto done_free_fcport;
2101 }
2102
2103 ct = sp->ctx;
2104 ct->ctx.type = SRB_CT_CMD;
2105 ct->bsg_job = bsg_job;
2106
2107 DEBUG2(qla_printk(KERN_INFO, ha,
2108 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2109 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2110 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2111 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2112 fcport->d_id.b.al_pa));
2113
2114 rval = qla2x00_start_sp(sp);
2115 if (rval != QLA_SUCCESS) {
2116 kfree(sp->ctx);
2117 mempool_free(sp, ha->srb_mempool);
2118 rval = -EIO;
2119 goto done_free_fcport;
2120 }
2121 return rval;
2122
2123done_free_fcport:
2124 kfree(fcport);
2125done_unmap_sg:
2126 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2127 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2128 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2129 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2130done:
2131 return rval;
2132}
2133
2134static int
2135qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2136{
2137 struct Scsi_Host *host = bsg_job->shost;
2138 scsi_qla_host_t *vha = shost_priv(host);
2139 struct qla_hw_data *ha = vha->hw;
2140 int rval;
2141 uint8_t command_sent;
2142 uint32_t vendor_cmd;
2143 char *type;
2144 struct msg_echo_lb elreq;
2145 uint16_t response[MAILBOX_REGISTER_COUNT];
2146 uint8_t* fw_sts_ptr;
2147 uint8_t *req_data;
2148 dma_addr_t req_data_dma;
2149 uint32_t req_data_len;
2150 uint8_t *rsp_data;
2151 dma_addr_t rsp_data_dma;
2152 uint32_t rsp_data_len;
2153
2154 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2155 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2156 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2157 rval = -EBUSY;
2158 goto done;
2159 }
2160
2161 if (!vha->flags.online) {
2162 DEBUG2(qla_printk(KERN_WARNING, ha,
2163 "host not online\n"));
2164 rval = -EIO;
2165 goto done;
2166 }
2167
2168 elreq.req_sg_cnt =
2169 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2170 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2171 if (!elreq.req_sg_cnt) {
2172 rval = -ENOMEM;
2173 goto done;
2174 }
2175 elreq.rsp_sg_cnt =
2176 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2177 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2178 if (!elreq.rsp_sg_cnt) {
2179 rval = -ENOMEM;
2180 goto done;
2181 }
2182
2183 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2184 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2185 {
2186 DEBUG2(printk(KERN_INFO
2187 "dma mapping resulted in different sg counts \
2188 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2189 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2190 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2191 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2192 rval = -EAGAIN;
2193 goto done_unmap_sg;
2194 }
2195 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2196 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2197 &req_data_dma, GFP_KERNEL);
2198
2199 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2200 &rsp_data_dma, GFP_KERNEL);
2201
2202 /* Copy the request buffer in req_data now */
2203 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2204 bsg_job->request_payload.sg_cnt, req_data,
2205 req_data_len);
2206
2207 elreq.send_dma = req_data_dma;
2208 elreq.rcv_dma = rsp_data_dma;
2209 elreq.transfer_size = req_data_len;
2210
2211 /* Vendor cmd : loopback or ECHO diagnostic
2212 * Options:
2213 * Loopback : Either internal or external loopback
2214 * ECHO: ECHO ELS or Vendor specific FC4 link data
2215 */
2216 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2217 elreq.options =
2218 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2219 + 1);
2220
2221 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2222 case QL_VND_LOOPBACK:
2223 if (ha->current_topology != ISP_CFG_F) {
2224 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2225
2226 DEBUG2(qla_printk(KERN_INFO, ha,
2227 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2228 vha->host_no, type, vendor_cmd, elreq.options));
2229
2230 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2231 rval = qla2x00_loopback_test(vha, &elreq, response);
2232 if (IS_QLA81XX(ha)) {
2233 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2234 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2235 "ISP\n", __func__, vha->host_no));
2236 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2237 qla2xxx_wake_dpc(vha);
2238 }
2239 }
2240 } else {
2241 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2242 DEBUG2(qla_printk(KERN_INFO, ha,
2243 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2244 vha->host_no, type, vendor_cmd, elreq.options));
2245
2246 command_sent = INT_DEF_LB_ECHO_CMD;
2247 rval = qla2x00_echo_test(vha, &elreq, response);
2248 }
2249 break;
2250 case QLA84_RESET:
2251 if (!IS_QLA84XX(vha->hw)) {
2252 rval = -EINVAL;
2253 DEBUG16(printk(
2254 "%s(%ld): 8xxx exiting.\n",
2255 __func__, vha->host_no));
2256 return rval;
2257 }
2258 rval = qla84xx_reset(vha, &elreq, bsg_job);
2259 break;
2260 case QLA84_MGMT_CMD:
2261 if (!IS_QLA84XX(vha->hw)) {
2262 rval = -EINVAL;
2263 DEBUG16(printk(
2264 "%s(%ld): 8xxx exiting.\n",
2265 __func__, vha->host_no));
2266 return rval;
2267 }
2268 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2269 break;
2270 default:
2271 rval = -ENOSYS;
2272 }
2273
2274 if (rval != QLA_SUCCESS) {
2275 DEBUG2(qla_printk(KERN_WARNING, ha,
2276 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2277 rval = 0;
2278 bsg_job->reply->result = (DID_ERROR << 16);
2279 bsg_job->reply->reply_payload_rcv_len = 0;
2280 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2281 memcpy( fw_sts_ptr, response, sizeof(response));
2282 fw_sts_ptr += sizeof(response);
2283 *fw_sts_ptr = command_sent;
2284 } else {
2285 DEBUG2(qla_printk(KERN_WARNING, ha,
2286 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2287 rval = bsg_job->reply->result = 0;
2288 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2289 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2290 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2291 memcpy(fw_sts_ptr, response, sizeof(response));
2292 fw_sts_ptr += sizeof(response);
2293 *fw_sts_ptr = command_sent;
2294 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2295 bsg_job->reply_payload.sg_cnt, rsp_data,
2296 rsp_data_len);
2297 }
2298 bsg_job->job_done(bsg_job);
2299
2300done_unmap_sg:
2301
2302 if(req_data)
2303 dma_free_coherent(&ha->pdev->dev, req_data_len,
2304 req_data, req_data_dma);
2305 dma_unmap_sg(&ha->pdev->dev,
2306 bsg_job->request_payload.sg_list,
2307 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2308 dma_unmap_sg(&ha->pdev->dev,
2309 bsg_job->reply_payload.sg_list,
2310 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2311
2312done:
2313 return rval;
2314}
2315
2316static int
2317qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2318{
2319 int ret = -EINVAL;
2320
2321 switch (bsg_job->request->msgcode) {
2322 case FC_BSG_RPT_ELS:
2323 case FC_BSG_HST_ELS_NOLOGIN:
2324 ret = qla2x00_process_els(bsg_job);
2325 break;
2326 case FC_BSG_HST_CT:
2327 ret = qla2x00_process_ct(bsg_job);
2328 break;
2329 case FC_BSG_HST_VENDOR:
2330 ret = qla2x00_process_vendor_specific(bsg_job);
2331 break;
2332 case FC_BSG_HST_ADD_RPORT:
2333 case FC_BSG_HST_DEL_RPORT:
2334 case FC_BSG_RPT_CT:
2335 default:
2336 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2337 break;
2338 }
2339 return ret;
2340}
2341
2342static int
2343qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2344{
2345 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2346 struct qla_hw_data *ha = vha->hw;
2347 srb_t *sp;
2348 int cnt, que;
2349 unsigned long flags;
2350 struct req_que *req;
2351 struct srb_bsg *sp_bsg;
2352
2353 /* find the bsg job from the active list of commands */
2354 spin_lock_irqsave(&ha->hardware_lock, flags);
2355 for (que = 0; que < ha->max_req_queues; que++) {
2356 req = ha->req_q_map[que];
2357 if (!req)
2358 continue;
2359
2360 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2361 sp = req->outstanding_cmds[cnt];
2362
2363 if (sp) {
2364 sp_bsg = (struct srb_bsg*)sp->ctx;
2365
2366 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2367 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2368 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2369 (sp_bsg->bsg_job == bsg_job)) {
2370 if (ha->isp_ops->abort_command(sp)) {
2371 DEBUG2(qla_printk(KERN_INFO, ha,
2372 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2373 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2374 } else {
2375 DEBUG2(qla_printk(KERN_INFO, ha,
2376 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2377 bsg_job->req->errors = bsg_job->reply->result = 0;
2378 }
2379 goto done;
2380 }
2381 }
2382 }
2383 }
2384 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2385 DEBUG2(qla_printk(KERN_INFO, ha,
2386 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2387 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2388 return 0;
2389
2390done:
2391 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2392 kfree(sp->fcport);
2393 kfree(sp->ctx);
2394 mempool_free(sp, ha->srb_mempool);
2395 return 0;
2396}
2397
1798struct fc_function_template qla2xxx_transport_functions = { 2398struct fc_function_template qla2xxx_transport_functions = {
1799 2399
1800 .show_host_node_name = 1, 2400 .show_host_node_name = 1,
@@ -1838,6 +2438,8 @@ struct fc_function_template qla2xxx_transport_functions = {
1838 .vport_create = qla24xx_vport_create, 2438 .vport_create = qla24xx_vport_create,
1839 .vport_disable = qla24xx_vport_disable, 2439 .vport_disable = qla24xx_vport_disable,
1840 .vport_delete = qla24xx_vport_delete, 2440 .vport_delete = qla24xx_vport_delete,
2441 .bsg_request = qla24xx_bsg_request,
2442 .bsg_timeout = qla24xx_bsg_timeout,
1841}; 2443};
1842 2444
1843struct fc_function_template qla2xxx_transport_vport_functions = { 2445struct fc_function_template qla2xxx_transport_vport_functions = {
@@ -1878,6 +2480,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1878 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2480 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1879 .terminate_rport_io = qla2x00_terminate_rport_io, 2481 .terminate_rport_io = qla2x00_terminate_rport_io,
1880 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2482 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2483 .bsg_request = qla24xx_bsg_request,
2484 .bsg_timeout = qla24xx_bsg_timeout,
1881}; 2485};
1882 2486
1883void 2487void
@@ -1906,3 +2510,125 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1906 speed = FC_PORTSPEED_1GBIT; 2510 speed = FC_PORTSPEED_1GBIT;
1907 fc_host_supported_speeds(vha->host) = speed; 2511 fc_host_supported_speeds(vha->host) = speed;
1908} 2512}
2513static int
2514qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2515{
2516 int ret = 0;
2517 int cmd;
2518 uint16_t cmd_status;
2519
2520 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2521
2522 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2523 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2524 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2525 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2526 &cmd_status);
2527 return ret;
2528}
2529
2530static int
2531qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2532{
2533 struct access_chip_84xx *mn;
2534 dma_addr_t mn_dma, mgmt_dma;
2535 void *mgmt_b = NULL;
2536 int ret = 0;
2537 int rsp_hdr_len, len = 0;
2538 struct qla84_msg_mgmt *ql84_mgmt;
2539
2540 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2541 ql84_mgmt->cmd =
2542 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2543 ql84_mgmt->mgmtp.u.mem.start_addr =
2544 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2545 ql84_mgmt->len =
2546 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2547 ql84_mgmt->mgmtp.u.config.id =
2548 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2549 ql84_mgmt->mgmtp.u.config.param0 =
2550 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2551 ql84_mgmt->mgmtp.u.config.param1 =
2552 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2553 ql84_mgmt->mgmtp.u.info.type =
2554 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2555 ql84_mgmt->mgmtp.u.info.context =
2556 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2557
2558 rsp_hdr_len = bsg_job->request_payload.payload_len;
2559
2560 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2561 if (mn == NULL) {
2562 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2563 "failed%lu\n", __func__, ha->host_no));
2564 return -ENOMEM;
2565 }
2566
2567 memset(mn, 0, sizeof (struct access_chip_84xx));
2568
2569 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2570 mn->entry_count = 1;
2571
2572 switch (ql84_mgmt->cmd) {
2573 case QLA84_MGMT_READ_MEM:
2574 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2575 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2576 break;
2577 case QLA84_MGMT_WRITE_MEM:
2578 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2579 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2580 break;
2581 case QLA84_MGMT_CHNG_CONFIG:
2582 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2583 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2584 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2585 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2586 break;
2587 case QLA84_MGMT_GET_INFO:
2588 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2589 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2590 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2591 break;
2592 default:
2593 ret = -EIO;
2594 goto exit_mgmt0;
2595 }
2596
2597 if ((len == ql84_mgmt->len) &&
2598 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2599 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2600 &mgmt_dma, GFP_KERNEL);
2601 if (mgmt_b == NULL) {
2602 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2603 "failed%lu\n", __func__, ha->host_no));
2604 ret = -ENOMEM;
2605 goto exit_mgmt0;
2606 }
2607 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2608 mn->dseg_count = cpu_to_le16(1);
2609 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2610 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2611 mn->dseg_length = cpu_to_le32(len);
2612
2613 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2614 memcpy(mgmt_b, ql84_mgmt->payload, len);
2615 }
2616 }
2617
2618 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2619 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2620 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2621 if (ret != QLA_SUCCESS)
2622 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2623 __func__, ha->host_no));
2624 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2625 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2626 }
2627
2628 if (mgmt_b)
2629 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2630
2631exit_mgmt0:
2632 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2633 return ret;
2634}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1263d9796e89..afa95614aaf8 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -31,6 +31,7 @@
31#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
32#include <scsi/scsi_cmnd.h> 32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h>
34 35
35#define QLA2XXX_DRIVER_NAME "qla2xxx" 36#define QLA2XXX_DRIVER_NAME "qla2xxx"
36 37
@@ -228,6 +229,27 @@ struct srb_logio {
228 uint16_t flags; 229 uint16_t flags;
229}; 230};
230 231
232struct srb_bsg_ctx {
233#define SRB_ELS_CMD_RPT 3
234#define SRB_ELS_CMD_HST 4
235#define SRB_CT_CMD 5
236 uint16_t type;
237};
238
239struct srb_bsg {
240 struct srb_bsg_ctx ctx;
241 struct fc_bsg_job *bsg_job;
242};
243
244struct msg_echo_lb {
245 dma_addr_t send_dma;
246 dma_addr_t rcv_dma;
247 uint16_t req_sg_cnt;
248 uint16_t rsp_sg_cnt;
249 uint16_t options;
250 uint32_t transfer_size;
251};
252
231/* 253/*
232 * ISP I/O Register Set structure definitions. 254 * ISP I/O Register Set structure definitions.
233 */ 255 */
@@ -522,6 +544,8 @@ typedef struct {
522#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ 544#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
523#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ 545#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
524 546
547/* ISP mailbox loopback echo diagnostic error code */
548#define MBS_LB_RESET 0x17
525/* 549/*
526 * Firmware options 1, 2, 3. 550 * Firmware options 1, 2, 3.
527 */ 551 */
@@ -2230,6 +2254,13 @@ struct req_que {
2230 int max_q_depth; 2254 int max_q_depth;
2231}; 2255};
2232 2256
2257/* Place holder for FW buffer parameters */
2258struct qlfc_fw {
2259 void *fw_buf;
2260 dma_addr_t fw_dma;
2261 uint32_t len;
2262};
2263
2233/* 2264/*
2234 * Qlogic host adapter specific data structure. 2265 * Qlogic host adapter specific data structure.
2235*/ 2266*/
@@ -2594,6 +2625,7 @@ struct qla_hw_data {
2594 struct qla_statistics qla_stats; 2625 struct qla_statistics qla_stats;
2595 struct isp_operations *isp_ops; 2626 struct isp_operations *isp_ops;
2596 struct workqueue_struct *wq; 2627 struct workqueue_struct *wq;
2628 struct qlfc_fw fw_buf;
2597}; 2629};
2598 2630
2599/* 2631/*
@@ -2766,4 +2798,127 @@ typedef struct scsi_qla_host {
2766 2798
2767#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 2799#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
2768 2800
2801/*
2802 * BSG Vendor specific commands
2803 */
2804
2805#define QL_VND_LOOPBACK 0x01
2806#define QLA84_RESET 0x02
2807#define QLA84_UPDATE_FW 0x03
2808#define QLA84_MGMT_CMD 0x04
2809
2810/* BSG definations for interpreting CommandSent field */
2811#define INT_DEF_LB_LOOPBACK_CMD 0
2812#define INT_DEF_LB_ECHO_CMD 1
2813
2814/* BSG Vendor specific definations */
2815typedef struct _A84_RESET {
2816 uint16_t Flags;
2817 uint16_t Reserved;
2818#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
2819} __attribute__((packed)) A84_RESET, *PA84_RESET;
2820
2821#define A84_ISSUE_WRITE_TYPE_CMD 0
2822#define A84_ISSUE_READ_TYPE_CMD 1
2823#define A84_CLEANUP_CMD 2
2824#define A84_ISSUE_RESET_OP_FW 3
2825#define A84_ISSUE_RESET_DIAG_FW 4
2826#define A84_ISSUE_UPDATE_OPFW_CMD 5
2827#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
2828
2829struct qla84_mgmt_param {
2830 union {
2831 struct {
2832 uint32_t start_addr;
2833 } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
2834 struct {
2835 uint32_t id;
2836#define QLA84_MGMT_CONFIG_ID_UIF 1
2837#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
2838#define QLA84_MGMT_CONFIG_ID_PAUSE 3
2839#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
2840
2841 uint32_t param0;
2842 uint32_t param1;
2843 } config; /* for QLA84_MGMT_CHNG_CONFIG */
2844
2845 struct {
2846 uint32_t type;
2847#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
2848#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
2849#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
2850#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
2851#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
2852#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
2853#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
2854
2855 uint32_t context;
2856/*
2857* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
2858*/
2859#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
2860#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
2861#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
2862#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
2863#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
2864#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
2865#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
2866#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
2867#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
2868#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
2869
2870/*
2871* context definitions for QLA84_MGMT_INFO_PORT_STAT
2872*/
2873#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
2874#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
2875#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
2876#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
2877#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
2878#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
2879
2880
2881/*
2882* context definitions for QLA84_MGMT_INFO_LIF_STAT
2883*/
2884#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
2885#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
2886#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
2887#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
2888#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
2889
2890 } info; /* for QLA84_MGMT_GET_INFO */
2891 } u;
2892};
2893
2894struct qla84_msg_mgmt {
2895 uint16_t cmd;
2896#define QLA84_MGMT_READ_MEM 0x00
2897#define QLA84_MGMT_WRITE_MEM 0x01
2898#define QLA84_MGMT_CHNG_CONFIG 0x02
2899#define QLA84_MGMT_GET_INFO 0x03
2900 uint16_t rsrvd;
2901 struct qla84_mgmt_param mgmtp;/* parameters for cmd */
2902 uint32_t len; /* bytes in payload following this struct */
2903 uint8_t payload[0]; /* payload for cmd */
2904};
2905
2906struct msg_update_fw {
2907 /*
2908 * diag_fw = 0 operational fw
2909 * otherwise diagnostic fw
2910 * offset, len, fw_len are present to overcome the current limitation
2911 * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
2912 * specifies the byte "offset" where it fits in the fw buffer. The
2913 * number of bytes in each chunk is specified in "len". "fw_len"
2914 * is the total size of fw. The first chunk should start at offset = 0.
2915 * When offset+len == fw_len, the fw is written to the HBA.
2916 */
2917 uint32_t diag_fw;
2918 uint32_t offset;/* start offset */
2919 uint32_t len; /* num bytes in cur xfer */
2920 uint32_t fw_len; /* size of fw in bytes */
2921 uint8_t fw_bytes[0];
2922};
2923
2769#endif 2924#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 66a8da5d7d08..cebf4f1bb7d9 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -627,6 +627,39 @@ struct els_entry_24xx {
627 uint32_t rx_len; /* Data segment 1 length. */ 627 uint32_t rx_len; /* Data segment 1 length. */
628}; 628};
629 629
630struct els_sts_entry_24xx {
631 uint8_t entry_type; /* Entry type. */
632 uint8_t entry_count; /* Entry count. */
633 uint8_t sys_define; /* System Defined. */
634 uint8_t entry_status; /* Entry Status. */
635
636 uint32_t handle; /* System handle. */
637
638 uint16_t comp_status;
639
640 uint16_t nport_handle; /* N_PORT handle. */
641
642 uint16_t reserved_1;
643
644 uint8_t vp_index;
645 uint8_t sof_type;
646
647 uint32_t rx_xchg_address; /* Receive exchange address. */
648 uint16_t reserved_2;
649
650 uint8_t opcode;
651 uint8_t reserved_3;
652
653 uint8_t port_id[3];
654 uint8_t reserved_4;
655
656 uint16_t reserved_5;
657
658 uint16_t control_flags; /* Control flags. */
659 uint32_t total_byte_count;
660 uint32_t error_subcode_1;
661 uint32_t error_subcode_2;
662};
630/* 663/*
631 * ISP queue - Mailbox Command entry structure definition. 664 * ISP queue - Mailbox Command entry structure definition.
632 */ 665 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 8bc6f53691e9..3a89bc514e2b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -60,6 +60,8 @@ extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
60extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, 60extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
61 uint16_t *); 61 uint16_t *);
62 62
63extern fc_port_t *
64qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
63/* 65/*
64 * Global Data in qla_os.c source file. 66 * Global Data in qla_os.c source file.
65 */ 67 */
@@ -76,6 +78,7 @@ extern int ql2xiidmaenable;
76extern int ql2xmaxqueues; 78extern int ql2xmaxqueues;
77extern int ql2xmultique_tag; 79extern int ql2xmultique_tag;
78extern int ql2xfwloadbin; 80extern int ql2xfwloadbin;
81extern int ql2xetsenable;
79 82
80extern int qla2x00_loop_reset(scsi_qla_host_t *); 83extern int qla2x00_loop_reset(scsi_qla_host_t *);
81extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 84extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -94,7 +97,6 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
94 97
95extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); 98extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
96 99
97extern void qla2x00_abort_fcport_cmds(fc_port_t *);
98extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, 100extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
99 struct qla_hw_data *); 101 struct qla_hw_data *);
100extern void qla2x00_free_host(struct scsi_qla_host *); 102extern void qla2x00_free_host(struct scsi_qla_host *);
@@ -154,6 +156,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
154int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 156int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
155 uint16_t, uint16_t, uint8_t); 157 uint16_t, uint16_t, uint8_t);
156extern int qla2x00_start_sp(srb_t *); 158extern int qla2x00_start_sp(srb_t *);
159extern void qla2x00_ctx_sp_free(srb_t *);
157 160
158/* 161/*
159 * Global Function Prototypes in qla_mbx.c source file. 162 * Global Function Prototypes in qla_mbx.c source file.
@@ -426,6 +429,8 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
426extern void qla2x00_init_host_attr(scsi_qla_host_t *); 429extern void qla2x00_init_host_attr(scsi_qla_host_t *);
427extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); 430extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
428extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); 431extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
432extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
433extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
429 434
430/* 435/*
431 * Global Function Prototypes in qla_dfs.c source file. 436 * Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3f8e8495b743..a67b2bafb882 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -62,7 +62,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
62 ctx->free(sp); 62 ctx->free(sp);
63} 63}
64 64
65static void 65void
66qla2x00_ctx_sp_free(srb_t *sp) 66qla2x00_ctx_sp_free(srb_t *sp)
67{ 67{
68 struct srb_ctx *ctx = sp->ctx; 68 struct srb_ctx *ctx = sp->ctx;
@@ -338,6 +338,16 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
338 rval = qla2x00_init_rings(vha); 338 rval = qla2x00_init_rings(vha);
339 ha->flags.chip_reset_done = 1; 339 ha->flags.chip_reset_done = 1;
340 340
341 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
342 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
343 rval = qla84xx_init_chip(vha);
344 if (rval != QLA_SUCCESS) {
345 qla_printk(KERN_ERR, ha,
346 "Unable to initialize ISP84XX.\n");
347 qla84xx_put_chip(vha);
348 }
349 }
350
341 return (rval); 351 return (rval);
342} 352}
343 353
@@ -2216,7 +2226,7 @@ qla2x00_rport_del(void *data)
2216 * 2226 *
2217 * Returns a pointer to the allocated fcport, or NULL, if none available. 2227 * Returns a pointer to the allocated fcport, or NULL, if none available.
2218 */ 2228 */
2219static fc_port_t * 2229fc_port_t *
2220qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 2230qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2221{ 2231{
2222 fc_port_t *fcport; 2232 fc_port_t *fcport;
@@ -2900,8 +2910,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2900 if (qla2x00_is_reserved_id(vha, loop_id)) 2910 if (qla2x00_is_reserved_id(vha, loop_id))
2901 continue; 2911 continue;
2902 2912
2903 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha)) 2913 if (atomic_read(&vha->loop_down_timer) ||
2914 LOOP_TRANSITION(vha)) {
2915 atomic_set(&vha->loop_down_timer, 0);
2916 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2917 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2904 break; 2918 break;
2919 }
2905 2920
2906 if (swl != NULL) { 2921 if (swl != NULL) {
2907 if (last_dev) { 2922 if (last_dev) {
@@ -4877,6 +4892,15 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4877} 4892}
4878 4893
4879void 4894void
4880qla81xx_update_fw_options(scsi_qla_host_t *ha) 4895qla81xx_update_fw_options(scsi_qla_host_t *vha)
4881{ 4896{
4897 struct qla_hw_data *ha = vha->hw;
4898
4899 if (!ql2xetsenable)
4900 return;
4901
4902 /* Enable ETS Burst. */
4903 memset(ha->fw_options, 0, sizeof(ha->fw_options));
4904 ha->fw_options[2] |= BIT_9;
4905 qla2x00_set_fw_options(vha, ha->fw_options);
4882} 4906}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5ccac0bef76..8299a9891bfe 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1025,6 +1025,119 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1025 /* Implicit: mbx->mbx10 = 0. */ 1025 /* Implicit: mbx->mbx10 = 0. */
1026} 1026}
1027 1027
1028static void
1029qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1030{
1031 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1032
1033 els_iocb->entry_type = ELS_IOCB_TYPE;
1034 els_iocb->entry_count = 1;
1035 els_iocb->sys_define = 0;
1036 els_iocb->entry_status = 0;
1037 els_iocb->handle = sp->handle;
1038 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1039 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1040 els_iocb->vp_index = sp->fcport->vp_idx;
1041 els_iocb->sof_type = EST_SOFI3;
1042 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1043
1044 els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
1045 bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
1046 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1047 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1048 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1049 els_iocb->control_flags = 0;
1050 els_iocb->rx_byte_count =
1051 cpu_to_le32(bsg_job->reply_payload.payload_len);
1052 els_iocb->tx_byte_count =
1053 cpu_to_le32(bsg_job->request_payload.payload_len);
1054
1055 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1056 (bsg_job->request_payload.sg_list)));
1057 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1058 (bsg_job->request_payload.sg_list)));
1059 els_iocb->tx_len = cpu_to_le32(sg_dma_len
1060 (bsg_job->request_payload.sg_list));
1061
1062 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1063 (bsg_job->reply_payload.sg_list)));
1064 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1065 (bsg_job->reply_payload.sg_list)));
1066 els_iocb->rx_len = cpu_to_le32(sg_dma_len
1067 (bsg_job->reply_payload.sg_list));
1068}
1069
1070static void
1071qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1072{
1073 uint16_t avail_dsds;
1074 uint32_t *cur_dsd;
1075 struct scatterlist *sg;
1076 int index;
1077 uint16_t tot_dsds;
1078 scsi_qla_host_t *vha = sp->fcport->vha;
1079 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1080 int loop_iterartion = 0;
1081 int cont_iocb_prsnt = 0;
1082 int entry_count = 1;
1083
1084 ct_iocb->entry_type = CT_IOCB_TYPE;
1085 ct_iocb->entry_status = 0;
1086 ct_iocb->sys_define = 0;
1087 ct_iocb->handle = sp->handle;
1088
1089 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1090 ct_iocb->vp_index = sp->fcport->vp_idx;
1091 ct_iocb->comp_status = __constant_cpu_to_le16(0);
1092
1093 ct_iocb->cmd_dsd_count =
1094 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1095 ct_iocb->timeout = 0;
1096 ct_iocb->rsp_dsd_count =
1097 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1098 ct_iocb->rsp_byte_count =
1099 cpu_to_le32(bsg_job->reply_payload.payload_len);
1100 ct_iocb->cmd_byte_count =
1101 cpu_to_le32(bsg_job->request_payload.payload_len);
1102 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1103 (bsg_job->request_payload.sg_list)));
1104 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1105 (bsg_job->request_payload.sg_list)));
1106 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1107 (bsg_job->request_payload.sg_list));
1108
1109 avail_dsds = 1;
1110 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1111 index = 0;
1112 tot_dsds = bsg_job->reply_payload.sg_cnt;
1113
1114 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1115 dma_addr_t sle_dma;
1116 cont_a64_entry_t *cont_pkt;
1117
1118 /* Allocate additional continuation packets? */
1119 if (avail_dsds == 0) {
1120 /*
1121 * Five DSDs are available in the Cont.
1122 * Type 1 IOCB.
1123 */
1124 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1125 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1126 avail_dsds = 5;
1127 cont_iocb_prsnt = 1;
1128 entry_count++;
1129 }
1130
1131 sle_dma = sg_dma_address(sg);
1132 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1133 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1134 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1135 loop_iterartion++;
1136 avail_dsds--;
1137 }
1138 ct_iocb->entry_count = entry_count;
1139}
1140
1028int 1141int
1029qla2x00_start_sp(srb_t *sp) 1142qla2x00_start_sp(srb_t *sp)
1030{ 1143{
@@ -1052,6 +1165,13 @@ qla2x00_start_sp(srb_t *sp)
1052 qla24xx_logout_iocb(sp, pkt): 1165 qla24xx_logout_iocb(sp, pkt):
1053 qla2x00_logout_iocb(sp, pkt); 1166 qla2x00_logout_iocb(sp, pkt);
1054 break; 1167 break;
1168 case SRB_ELS_CMD_RPT:
1169 case SRB_ELS_CMD_HST:
1170 qla24xx_els_iocb(sp, pkt);
1171 break;
1172 case SRB_CT_CMD:
1173 qla24xx_ct_iocb(sp, pkt);
1174 break;
1055 default: 1175 default:
1056 break; 1176 break;
1057 } 1177 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6fc63b98818c..ab90329ff2e4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <scsi/scsi_tcq.h> 10#include <scsi/scsi_tcq.h>
11#include <scsi/scsi_bsg_fc.h>
11 12
12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 13static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 14static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -881,7 +882,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
881 index); 882 index);
882 return NULL; 883 return NULL;
883 } 884 }
885
884 req->outstanding_cmds[index] = NULL; 886 req->outstanding_cmds[index] = NULL;
887
885done: 888done:
886 return sp; 889 return sp;
887} 890}
@@ -982,6 +985,100 @@ done_post_logio_done_work:
982} 985}
983 986
984static void 987static void
988qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
989 struct sts_entry_24xx *pkt, int iocb_type)
990{
991 const char func[] = "ELS_CT_IOCB";
992 const char *type;
993 struct qla_hw_data *ha = vha->hw;
994 srb_t *sp;
995 struct srb_bsg *sp_bsg;
996 struct fc_bsg_job *bsg_job;
997 uint16_t comp_status;
998 uint32_t fw_status[3];
999 uint8_t* fw_sts_ptr;
1000
1001 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1002 if (!sp)
1003 return;
1004 sp_bsg = (struct srb_bsg*)sp->ctx;
1005 bsg_job = sp_bsg->bsg_job;
1006
1007 type = NULL;
1008 switch (sp_bsg->ctx.type) {
1009 case SRB_ELS_CMD_RPT:
1010 case SRB_ELS_CMD_HST:
1011 type = "els";
1012 break;
1013 case SRB_CT_CMD:
1014 type = "ct pass-through";
1015 break;
1016 default:
1017 qla_printk(KERN_WARNING, ha,
1018 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1019 sp_bsg->ctx.type);
1020 return;
1021 }
1022
1023 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1024 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1025 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1026
1027 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1028 * fc payload to the caller
1029 */
1030 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1031 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1032
1033 if (comp_status != CS_COMPLETE) {
1034 if (comp_status == CS_DATA_UNDERRUN) {
1035 bsg_job->reply->result = DID_OK << 16;
1036 bsg_job->reply->reply_payload_rcv_len =
1037 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1038
1039 DEBUG2(qla_printk(KERN_WARNING, ha,
1040 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1041 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1042 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
1043 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
1044 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1045 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1046 }
1047 else {
1048 DEBUG2(qla_printk(KERN_WARNING, ha,
1049 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1050 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1051 vha->host_no, sp->handle, type, comp_status,
1052 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
1053 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
1054 bsg_job->reply->result = DID_ERROR << 16;
1055 bsg_job->reply->reply_payload_rcv_len = 0;
1056 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1057 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1058 }
1059 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1060 }
1061 else {
1062 bsg_job->reply->result = DID_OK << 16;;
1063 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1064 bsg_job->reply_len = 0;
1065 }
1066
1067 dma_unmap_sg(&ha->pdev->dev,
1068 bsg_job->request_payload.sg_list,
1069 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1070 dma_unmap_sg(&ha->pdev->dev,
1071 bsg_job->reply_payload.sg_list,
1072 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1073 if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) ||
1074 (sp_bsg->ctx.type == SRB_CT_CMD))
1075 kfree(sp->fcport);
1076 kfree(sp->ctx);
1077 mempool_free(sp, ha->srb_mempool);
1078 bsg_job->job_done(bsg_job);
1079}
1080
1081static void
985qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1082qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
986 struct logio_entry_24xx *logio) 1083 struct logio_entry_24xx *logio)
987{ 1084{
@@ -1749,6 +1846,13 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1749 qla24xx_logio_entry(vha, rsp->req, 1846 qla24xx_logio_entry(vha, rsp->req,
1750 (struct logio_entry_24xx *)pkt); 1847 (struct logio_entry_24xx *)pkt);
1751 break; 1848 break;
1849 case CT_IOCB_TYPE:
1850 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1851 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
1852 break;
1853 case ELS_IOCB_TYPE:
1854 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
1855 break;
1752 default: 1856 default:
1753 /* Type Not Supported. */ 1857 /* Type Not Supported. */
1754 DEBUG4(printk(KERN_WARNING 1858 DEBUG4(printk(KERN_WARNING
@@ -2049,7 +2153,6 @@ qla24xx_msix_default(int irq, void *dev_id)
2049 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2153 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2050 complete(&ha->mbx_intr_comp); 2154 complete(&ha->mbx_intr_comp);
2051 } 2155 }
2052
2053 return IRQ_HANDLED; 2156 return IRQ_HANDLED;
2054} 2157}
2055 2158
@@ -2255,10 +2358,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2255 2358
2256 if (ha->flags.msix_enabled) 2359 if (ha->flags.msix_enabled)
2257 qla24xx_disable_msix(ha); 2360 qla24xx_disable_msix(ha);
2258 else if (ha->flags.inta_enabled) { 2361 else if (ha->flags.msi_enabled) {
2259 free_irq(ha->pdev->irq, rsp); 2362 free_irq(ha->pdev->irq, rsp);
2260 pci_disable_msi(ha->pdev); 2363 pci_disable_msi(ha->pdev);
2261 } 2364 } else
2365 free_irq(ha->pdev->irq, rsp);
2262} 2366}
2263 2367
2264 2368
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 056e4d4505f3..6e53bdbb1da8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3636,6 +3636,157 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3636} 3636}
3637 3637
3638int 3638int
3639qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
3640{
3641 int rval;
3642 mbx_cmd_t mc;
3643 mbx_cmd_t *mcp = &mc;
3644 uint32_t iter_cnt = 0x1;
3645
3646 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
3647
3648 memset(mcp->mb, 0 , sizeof(mcp->mb));
3649 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
3650 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
3651
3652 /* transfer count */
3653 mcp->mb[10] = LSW(mreq->transfer_size);
3654 mcp->mb[11] = MSW(mreq->transfer_size);
3655
3656 /* send data address */
3657 mcp->mb[14] = LSW(mreq->send_dma);
3658 mcp->mb[15] = MSW(mreq->send_dma);
3659 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3660 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3661
3662 /* recieve data address */
3663 mcp->mb[16] = LSW(mreq->rcv_dma);
3664 mcp->mb[17] = MSW(mreq->rcv_dma);
3665 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3666 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3667
3668 /* Iteration count */
3669 mcp->mb[18] = LSW(iter_cnt);
3670 mcp->mb[19] = MSW(iter_cnt);
3671
3672 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
3673 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3674 if (IS_QLA81XX(vha->hw))
3675 mcp->out_mb |= MBX_2;
3676 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
3677
3678 mcp->buf_size = mreq->transfer_size;
3679 mcp->tov = MBX_TOV_SECONDS;
3680 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3681
3682 rval = qla2x00_mailbox_command(vha, mcp);
3683
3684 if (rval != QLA_SUCCESS) {
3685 DEBUG2(printk(KERN_WARNING
3686 "(%ld): failed=%x mb[0]=0x%x "
3687 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
3688 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
3689 } else {
3690 DEBUG2(printk(KERN_WARNING
3691 "scsi(%ld): done.\n", vha->host_no));
3692 }
3693
3694 /* Copy mailbox information */
3695 memcpy( mresp, mcp->mb, 64);
3696 mresp[3] = mcp->mb[18];
3697 mresp[4] = mcp->mb[19];
3698 return rval;
3699}
3700
3701int
3702qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
3703{
3704 int rval;
3705 mbx_cmd_t mc;
3706 mbx_cmd_t *mcp = &mc;
3707 struct qla_hw_data *ha = vha->hw;
3708
3709 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
3710
3711 memset(mcp->mb, 0 , sizeof(mcp->mb));
3712 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
3713 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
3714 if (IS_QLA81XX(ha))
3715 mcp->mb[1] |= BIT_15;
3716 mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
3717 mcp->mb[16] = LSW(mreq->rcv_dma);
3718 mcp->mb[17] = MSW(mreq->rcv_dma);
3719 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3720 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3721
3722 mcp->mb[10] = LSW(mreq->transfer_size);
3723
3724 mcp->mb[14] = LSW(mreq->send_dma);
3725 mcp->mb[15] = MSW(mreq->send_dma);
3726 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3727 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3728
3729 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
3730 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3731 if (IS_QLA81XX(ha))
3732 mcp->out_mb |= MBX_2;
3733
3734 mcp->in_mb = MBX_0;
3735 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
3736 mcp->in_mb |= MBX_1;
3737 if (IS_QLA81XX(ha))
3738 mcp->in_mb |= MBX_3;
3739
3740 mcp->tov = MBX_TOV_SECONDS;
3741 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3742 mcp->buf_size = mreq->transfer_size;
3743
3744 rval = qla2x00_mailbox_command(vha, mcp);
3745
3746 if (rval != QLA_SUCCESS) {
3747 DEBUG2(printk(KERN_WARNING
3748 "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
3749 vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
3750 } else {
3751 DEBUG2(printk(KERN_WARNING
3752 "scsi(%ld): done.\n", vha->host_no));
3753 }
3754
3755 /* Copy mailbox information */
3756 memcpy( mresp, mcp->mb, 32);
3757 return rval;
3758}
3759int
3760qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
3761 uint16_t *cmd_status)
3762{
3763 int rval;
3764 mbx_cmd_t mc;
3765 mbx_cmd_t *mcp = &mc;
3766
3767 DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__,
3768 ha->host_no, enable_diagnostic));
3769
3770 mcp->mb[0] = MBC_ISP84XX_RESET;
3771 mcp->mb[1] = enable_diagnostic;
3772 mcp->out_mb = MBX_1|MBX_0;
3773 mcp->in_mb = MBX_1|MBX_0;
3774 mcp->tov = MBX_TOV_SECONDS;
3775 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3776 rval = qla2x00_mailbox_command(ha, mcp);
3777
3778 /* Return mailbox statuses. */
3779 *cmd_status = mcp->mb[0];
3780 if (rval != QLA_SUCCESS)
3781 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
3782 rval));
3783 else
3784 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
3785
3786 return rval;
3787}
3788
3789int
3639qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 3790qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3640{ 3791{
3641 int rval; 3792 int rval;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8529eb1f3cd4..46720b23028f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -107,6 +107,12 @@ MODULE_PARM_DESC(ql2xfwloadbin,
107 " 1 -- load firmware from flash.\n" 107 " 1 -- load firmware from flash.\n"
108 " 0 -- use default semantics.\n"); 108 " 0 -- use default semantics.\n");
109 109
110int ql2xetsenable;
111module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
112MODULE_PARM_DESC(ql2xetsenable,
113 "Enables firmware ETS burst."
114 "Default is 0 - skip ETS enablement.");
115
110/* 116/*
111 * SCSI host template entry points 117 * SCSI host template entry points
112 */ 118 */
@@ -682,44 +688,6 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
682 return (return_status); 688 return (return_status);
683} 689}
684 690
685void
686qla2x00_abort_fcport_cmds(fc_port_t *fcport)
687{
688 int cnt;
689 unsigned long flags;
690 srb_t *sp;
691 scsi_qla_host_t *vha = fcport->vha;
692 struct qla_hw_data *ha = vha->hw;
693 struct req_que *req;
694
695 spin_lock_irqsave(&ha->hardware_lock, flags);
696 req = vha->req;
697 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
698 sp = req->outstanding_cmds[cnt];
699 if (!sp)
700 continue;
701 if (sp->fcport != fcport)
702 continue;
703 if (sp->ctx)
704 continue;
705
706 spin_unlock_irqrestore(&ha->hardware_lock, flags);
707 if (ha->isp_ops->abort_command(sp)) {
708 DEBUG2(qla_printk(KERN_WARNING, ha,
709 "Abort failed -- %lx\n",
710 sp->cmd->serial_number));
711 } else {
712 if (qla2x00_eh_wait_on_command(sp->cmd) !=
713 QLA_SUCCESS)
714 DEBUG2(qla_printk(KERN_WARNING, ha,
715 "Abort failed while waiting -- %lx\n",
716 sp->cmd->serial_number));
717 }
718 spin_lock_irqsave(&ha->hardware_lock, flags);
719 }
720 spin_unlock_irqrestore(&ha->hardware_lock, flags);
721}
722
723/************************************************************************** 691/**************************************************************************
724* qla2xxx_eh_abort 692* qla2xxx_eh_abort
725* 693*
@@ -1095,6 +1063,20 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1095 struct fc_port *fcport; 1063 struct fc_port *fcport;
1096 struct qla_hw_data *ha = vha->hw; 1064 struct qla_hw_data *ha = vha->hw;
1097 1065
1066 if (ha->flags.enable_target_reset) {
1067 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1068 if (fcport->port_type != FCT_TARGET)
1069 continue;
1070
1071 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1072 if (ret != QLA_SUCCESS) {
1073 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1074 "target_reset=%d d_id=%x.\n", __func__,
1075 vha->host_no, ret, fcport->d_id.b24));
1076 }
1077 }
1078 }
1079
1098 if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) { 1080 if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
1099 ret = qla2x00_full_login_lip(vha); 1081 ret = qla2x00_full_login_lip(vha);
1100 if (ret != QLA_SUCCESS) { 1082 if (ret != QLA_SUCCESS) {
@@ -1117,19 +1099,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1117 qla2x00_wait_for_loop_ready(vha); 1099 qla2x00_wait_for_loop_ready(vha);
1118 } 1100 }
1119 1101
1120 if (ha->flags.enable_target_reset) {
1121 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1122 if (fcport->port_type != FCT_TARGET)
1123 continue;
1124
1125 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1126 if (ret != QLA_SUCCESS) {
1127 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1128 "target_reset=%d d_id=%x.\n", __func__,
1129 vha->host_no, ret, fcport->d_id.b24));
1130 }
1131 }
1132 }
1133 /* Issue marker command only when we are going to start the I/O */ 1102 /* Issue marker command only when we are going to start the I/O */
1134 vha->marker_needed = 1; 1103 vha->marker_needed = 1;
1135 1104
@@ -1160,8 +1129,19 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1160 qla2x00_sp_compl(ha, sp); 1129 qla2x00_sp_compl(ha, sp);
1161 } else { 1130 } else {
1162 ctx = sp->ctx; 1131 ctx = sp->ctx;
1163 del_timer_sync(&ctx->timer); 1132 if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
1164 ctx->free(sp); 1133 del_timer_sync(&ctx->timer);
1134 ctx->free(sp);
1135 } else {
1136 struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
1137 if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
1138 kfree(sp->fcport);
1139 sp_bsg->bsg_job->req->errors = 0;
1140 sp_bsg->bsg_job->reply->result = res;
1141 sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
1142 kfree(sp->ctx);
1143 mempool_free(sp, ha->srb_mempool);
1144 }
1165 } 1145 }
1166 } 1146 }
1167 } 1147 }
@@ -1258,7 +1238,7 @@ qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1258 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); 1238 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1259 break; 1239 break;
1260 default: 1240 default:
1261 return EOPNOTSUPP; 1241 return -EOPNOTSUPP;
1262 } 1242 }
1263 1243
1264 return sdev->queue_depth; 1244 return sdev->queue_depth;
@@ -1818,7 +1798,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1818 /* Set EEH reset type to fundamental if required by hba */ 1798 /* Set EEH reset type to fundamental if required by hba */
1819 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { 1799 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1820 pdev->needs_freset = 1; 1800 pdev->needs_freset = 1;
1821 pci_save_state(pdev);
1822 } 1801 }
1823 1802
1824 /* Configure PCI I/O space */ 1803 /* Configure PCI I/O space */
@@ -1970,11 +1949,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1970 host->max_channel = MAX_BUSES - 1; 1949 host->max_channel = MAX_BUSES - 1;
1971 host->max_lun = MAX_LUNS; 1950 host->max_lun = MAX_LUNS;
1972 host->transportt = qla2xxx_transport_template; 1951 host->transportt = qla2xxx_transport_template;
1952 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
1973 1953
1974 /* Set up the irqs */ 1954 /* Set up the irqs */
1975 ret = qla2x00_request_irqs(ha, rsp); 1955 ret = qla2x00_request_irqs(ha, rsp);
1976 if (ret) 1956 if (ret)
1977 goto probe_init_failed; 1957 goto probe_init_failed;
1958
1959 pci_save_state(pdev);
1960
1978 /* Alloc arrays of request and response ring ptrs */ 1961 /* Alloc arrays of request and response ring ptrs */
1979que_init: 1962que_init:
1980 if (!qla2x00_alloc_queues(ha)) { 1963 if (!qla2x00_alloc_queues(ha)) {
@@ -2176,6 +2159,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
2176 kfree(ha); 2159 kfree(ha);
2177 ha = NULL; 2160 ha = NULL;
2178 2161
2162 pci_disable_pcie_error_reporting(pdev);
2163
2179 pci_disable_device(pdev); 2164 pci_disable_device(pdev);
2180 pci_set_drvdata(pdev, NULL); 2165 pci_set_drvdata(pdev, NULL);
2181} 2166}
@@ -3310,6 +3295,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3310 return PCI_ERS_RESULT_CAN_RECOVER; 3295 return PCI_ERS_RESULT_CAN_RECOVER;
3311 case pci_channel_io_frozen: 3296 case pci_channel_io_frozen:
3312 ha->flags.eeh_busy = 1; 3297 ha->flags.eeh_busy = 1;
3298 qla2x00_free_irqs(vha);
3313 pci_disable_device(pdev); 3299 pci_disable_device(pdev);
3314 return PCI_ERS_RESULT_NEED_RESET; 3300 return PCI_ERS_RESULT_NEED_RESET;
3315 case pci_channel_io_perm_failure: 3301 case pci_channel_io_perm_failure:
@@ -3363,10 +3349,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3363 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 3349 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
3364 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 3350 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3365 struct qla_hw_data *ha = base_vha->hw; 3351 struct qla_hw_data *ha = base_vha->hw;
3366 int rc; 3352 struct rsp_que *rsp;
3353 int rc, retries = 10;
3367 3354
3368 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); 3355 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3369 3356
3357 /* Workaround: qla2xxx driver which access hardware earlier
3358 * needs error state to be pci_channel_io_online.
3359 * Otherwise mailbox command timesout.
3360 */
3361 pdev->error_state = pci_channel_io_normal;
3362
3363 pci_restore_state(pdev);
3364
3365 /* pci_restore_state() clears the saved_state flag of the device
3366 * save restored state which resets saved_state flag
3367 */
3368 pci_save_state(pdev);
3369
3370 if (ha->mem_only) 3370 if (ha->mem_only)
3371 rc = pci_enable_device_mem(pdev); 3371 rc = pci_enable_device_mem(pdev);
3372 else 3372 else
@@ -3378,27 +3378,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3378 return ret; 3378 return ret;
3379 } 3379 }
3380 3380
3381 rsp = ha->rsp_q_map[0];
3382 if (qla2x00_request_irqs(ha, rsp))
3383 return ret;
3384
3381 if (ha->isp_ops->pci_config(base_vha)) 3385 if (ha->isp_ops->pci_config(base_vha))
3382 return ret; 3386 return ret;
3383 3387
3384#ifdef QL_DEBUG_LEVEL_17 3388 while (ha->flags.mbox_busy && retries--)
3385 { 3389 msleep(1000);
3386 uint8_t b;
3387 uint32_t i;
3388 3390
3389 printk("slot_reset_1: ");
3390 for (i = 0; i < 256; i++) {
3391 pci_read_config_byte(ha->pdev, i, &b);
3392 printk("%s%02x", (i%16) ? " " : "\n", b);
3393 }
3394 printk("\n");
3395 }
3396#endif
3397 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3391 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3398 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) 3392 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
3399 ret = PCI_ERS_RESULT_RECOVERED; 3393 ret = PCI_ERS_RESULT_RECOVERED;
3400 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3394 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3401 3395
3396 pci_cleanup_aer_uncorrect_error_status(pdev);
3397
3402 DEBUG17(qla_printk(KERN_WARNING, ha, 3398 DEBUG17(qla_printk(KERN_WARNING, ha,
3403 "slot_reset-return:ret=%x\n", ret)); 3399 "slot_reset-return:ret=%x\n", ret));
3404 3400
@@ -3422,8 +3418,6 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
3422 } 3418 }
3423 3419
3424 ha->flags.eeh_busy = 0; 3420 ha->flags.eeh_busy = 0;
3425
3426 pci_cleanup_aer_uncorrect_error_status(pdev);
3427} 3421}
3428 3422
3429static struct pci_error_handlers qla2xxx_err_handler = { 3423static struct pci_error_handlers qla2xxx_err_handler = {
@@ -3536,4 +3530,3 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
3536MODULE_FIRMWARE(FW_FILE_ISP2322); 3530MODULE_FIRMWARE(FW_FILE_ISP2322);
3537MODULE_FIRMWARE(FW_FILE_ISP24XX); 3531MODULE_FIRMWARE(FW_FILE_ISP24XX);
3538MODULE_FIRMWARE(FW_FILE_ISP25XX); 3532MODULE_FIRMWARE(FW_FILE_ISP25XX);
3539MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ed36279a33c1..8d2fc2fa7a6b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k10" 10#define QLA2XXX_VERSION "8.03.02-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 1 14#define QLA_DRIVER_PATCH_VER 2
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 1