aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorGiridhar Malavali <giridhar.malavali@qlogic.com>2010-03-19 20:03:58 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-04-11 10:44:10 -0400
commit6e98016ca077c5c751167bfdb1a3a2a3bee581cf (patch)
tree4704944f9e379e8ad2035d4257b96cb888f1b549 /drivers/scsi/qla2xxx
parent077424e2e2c97c830d903891dfcd1532068b85b7 (diff)
[SCSI] qla2xxx: Re-organized BSG interface specific code.
1. Segregate BSG interface specific code to new files. 2. Handle multiple vendor specific commands indepedently. 3. Reorganised support for reset, management and update FCoE firmware commands. 4. Fixed memory leak issue in Loopback. 5. Added new vendor command to support iiDMA using BSG interface. 6. Proper cleanup of dma mapped and dma allocated buffers for BSG request. [jejb: fix up conflict and merge in Jiri Slaby lock imbalance patch] Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Harish Zunjarrao <harish.zunjarrao@qlogic.com> Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c702
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c1040
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h135
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h125
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c49
7 files changed, 1230 insertions, 831 deletions
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index c51fd1f8663..1014db6f992 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,4 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o
3 3
4obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 4obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c7ef55966f..90bf7ad42f6 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -12,9 +12,7 @@
12#include <linux/delay.h> 12#include <linux/delay.h>
13 13
14static int qla24xx_vport_disable(struct fc_vport *, bool); 14static int qla24xx_vport_disable(struct fc_vport *, bool);
15static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *); 15
16int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
17static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
18/* SYSFS attributes --------------------------------------------------------- */ 16/* SYSFS attributes --------------------------------------------------------- */
19 17
20static ssize_t 18static ssize_t
@@ -1825,582 +1823,6 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1825 return 0; 1823 return 0;
1826} 1824}
1827 1825
1828/* BSG support for ELS/CT pass through */
1829inline srb_t *
1830qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1831{
1832 srb_t *sp;
1833 struct qla_hw_data *ha = vha->hw;
1834 struct srb_bsg_ctx *ctx;
1835
1836 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1837 if (!sp)
1838 goto done;
1839 ctx = kzalloc(size, GFP_KERNEL);
1840 if (!ctx) {
1841 mempool_free(sp, ha->srb_mempool);
1842 goto done;
1843 }
1844
1845 memset(sp, 0, sizeof(*sp));
1846 sp->fcport = fcport;
1847 sp->ctx = ctx;
1848done:
1849 return sp;
1850}
1851
1852static int
1853qla2x00_process_els(struct fc_bsg_job *bsg_job)
1854{
1855 struct fc_rport *rport;
1856 fc_port_t *fcport;
1857 struct Scsi_Host *host;
1858 scsi_qla_host_t *vha;
1859 struct qla_hw_data *ha;
1860 srb_t *sp;
1861 const char *type;
1862 int req_sg_cnt, rsp_sg_cnt;
1863 int rval = (DRIVER_ERROR << 16);
1864 uint16_t nextlid = 0;
1865 struct srb_bsg *els;
1866
1867 /* Multiple SG's are not supported for ELS requests */
1868 if (bsg_job->request_payload.sg_cnt > 1 ||
1869 bsg_job->reply_payload.sg_cnt > 1) {
1870 DEBUG2(printk(KERN_INFO
1871 "multiple SG's are not supported for ELS requests"
1872 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1873 bsg_job->request_payload.sg_cnt,
1874 bsg_job->reply_payload.sg_cnt));
1875 rval = -EPERM;
1876 goto done;
1877 }
1878
1879 /* ELS request for rport */
1880 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1881 rport = bsg_job->rport;
1882 fcport = *(fc_port_t **) rport->dd_data;
1883 host = rport_to_shost(rport);
1884 vha = shost_priv(host);
1885 ha = vha->hw;
1886 type = "FC_BSG_RPT_ELS";
1887
1888 /* make sure the rport is logged in,
1889 * if not perform fabric login
1890 */
1891 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1892 DEBUG2(qla_printk(KERN_WARNING, ha,
1893 "failed to login port %06X for ELS passthru\n",
1894 fcport->d_id.b24));
1895 rval = -EIO;
1896 goto done;
1897 }
1898 } else {
1899 host = bsg_job->shost;
1900 vha = shost_priv(host);
1901 ha = vha->hw;
1902 type = "FC_BSG_HST_ELS_NOLOGIN";
1903
1904 /* Allocate a dummy fcport structure, since functions
1905 * preparing the IOCB and mailbox command retrieves port
1906 * specific information from fcport structure. For Host based
1907 * ELS commands there will be no fcport structure allocated
1908 */
1909 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1910 if (!fcport) {
1911 rval = -ENOMEM;
1912 goto done;
1913 }
1914
1915 /* Initialize all required fields of fcport */
1916 fcport->vha = vha;
1917 fcport->vp_idx = vha->vp_idx;
1918 fcport->d_id.b.al_pa =
1919 bsg_job->request->rqst_data.h_els.port_id[0];
1920 fcport->d_id.b.area =
1921 bsg_job->request->rqst_data.h_els.port_id[1];
1922 fcport->d_id.b.domain =
1923 bsg_job->request->rqst_data.h_els.port_id[2];
1924 fcport->loop_id =
1925 (fcport->d_id.b.al_pa == 0xFD) ?
1926 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1927 }
1928
1929 if (!vha->flags.online) {
1930 DEBUG2(qla_printk(KERN_WARNING, ha,
1931 "host not online\n"));
1932 rval = -EIO;
1933 goto done;
1934 }
1935
1936 req_sg_cnt =
1937 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1938 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1939 if (!req_sg_cnt) {
1940 rval = -ENOMEM;
1941 goto done_free_fcport;
1942 }
1943 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1944 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1945 if (!rsp_sg_cnt) {
1946 rval = -ENOMEM;
1947 goto done_free_fcport;
1948 }
1949
1950 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1951 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1952 {
1953 DEBUG2(printk(KERN_INFO
1954 "dma mapping resulted in different sg counts \
1955 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1956 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1957 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1958 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1959 rval = -EAGAIN;
1960 goto done_unmap_sg;
1961 }
1962
1963 /* Alloc SRB structure */
1964 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1965 if (!sp) {
1966 rval = -ENOMEM;
1967 goto done_unmap_sg;
1968 }
1969
1970 els = sp->ctx;
1971 els->ctx.type =
1972 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1973 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1974 els->bsg_job = bsg_job;
1975
1976 DEBUG2(qla_printk(KERN_INFO, ha,
1977 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1978 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1979 bsg_job->request->rqst_data.h_els.command_code,
1980 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1981 fcport->d_id.b.al_pa));
1982
1983 rval = qla2x00_start_sp(sp);
1984 if (rval != QLA_SUCCESS) {
1985 kfree(sp->ctx);
1986 mempool_free(sp, ha->srb_mempool);
1987 rval = -EIO;
1988 goto done_unmap_sg;
1989 }
1990 return rval;
1991
1992done_unmap_sg:
1993 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1994 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1995 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1996 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1997 goto done_free_fcport;
1998
1999done_free_fcport:
2000 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
2001 kfree(fcport);
2002done:
2003 return rval;
2004}
2005
2006static int
2007qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2008{
2009 srb_t *sp;
2010 struct Scsi_Host *host = bsg_job->shost;
2011 scsi_qla_host_t *vha = shost_priv(host);
2012 struct qla_hw_data *ha = vha->hw;
2013 int rval = (DRIVER_ERROR << 16);
2014 int req_sg_cnt, rsp_sg_cnt;
2015 uint16_t loop_id;
2016 struct fc_port *fcport;
2017 char *type = "FC_BSG_HST_CT";
2018 struct srb_bsg *ct;
2019
2020 /* pass through is supported only for ISP 4Gb or higher */
2021 if (!IS_FWI2_CAPABLE(ha)) {
2022 DEBUG2(qla_printk(KERN_INFO, ha,
2023 "scsi(%ld):Firmware is not capable to support FC "
2024 "CT pass thru\n", vha->host_no));
2025 rval = -EPERM;
2026 goto done;
2027 }
2028
2029 req_sg_cnt =
2030 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2031 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2032 if (!req_sg_cnt) {
2033 rval = -ENOMEM;
2034 goto done;
2035 }
2036
2037 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2038 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2039 if (!rsp_sg_cnt) {
2040 rval = -ENOMEM;
2041 goto done;
2042 }
2043
2044 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2045 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2046 {
2047 DEBUG2(qla_printk(KERN_WARNING, ha,
2048 "dma mapping resulted in different sg counts \
2049 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2050 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2051 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2052 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2053 rval = -EAGAIN;
2054 goto done_unmap_sg;
2055 }
2056
2057 if (!vha->flags.online) {
2058 DEBUG2(qla_printk(KERN_WARNING, ha,
2059 "host not online\n"));
2060 rval = -EIO;
2061 goto done_unmap_sg;
2062 }
2063
2064 loop_id =
2065 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2066 >> 24;
2067 switch (loop_id) {
2068 case 0xFC:
2069 loop_id = cpu_to_le16(NPH_SNS);
2070 break;
2071 case 0xFA:
2072 loop_id = vha->mgmt_svr_loop_id;
2073 break;
2074 default:
2075 DEBUG2(qla_printk(KERN_INFO, ha,
2076 "Unknown loop id: %x\n", loop_id));
2077 rval = -EINVAL;
2078 goto done_unmap_sg;
2079 }
2080
2081 /* Allocate a dummy fcport structure, since functions preparing the
2082 * IOCB and mailbox command retrieves port specific information
2083 * from fcport structure. For Host based ELS commands there will be
2084 * no fcport structure allocated
2085 */
2086 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2087 if (!fcport)
2088 {
2089 rval = -ENOMEM;
2090 goto done_unmap_sg;
2091 }
2092
2093 /* Initialize all required fields of fcport */
2094 fcport->vha = vha;
2095 fcport->vp_idx = vha->vp_idx;
2096 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2097 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2098 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2099 fcport->loop_id = loop_id;
2100
2101 /* Alloc SRB structure */
2102 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2103 if (!sp) {
2104 rval = -ENOMEM;
2105 goto done_free_fcport;
2106 }
2107
2108 ct = sp->ctx;
2109 ct->ctx.type = SRB_CT_CMD;
2110 ct->bsg_job = bsg_job;
2111
2112 DEBUG2(qla_printk(KERN_INFO, ha,
2113 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2114 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2115 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2116 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2117 fcport->d_id.b.al_pa));
2118
2119 rval = qla2x00_start_sp(sp);
2120 if (rval != QLA_SUCCESS) {
2121 kfree(sp->ctx);
2122 mempool_free(sp, ha->srb_mempool);
2123 rval = -EIO;
2124 goto done_free_fcport;
2125 }
2126 return rval;
2127
2128done_free_fcport:
2129 kfree(fcport);
2130done_unmap_sg:
2131 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2132 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2133 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2134 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2135done:
2136 return rval;
2137}
2138
2139static int
2140qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2141{
2142 struct Scsi_Host *host = bsg_job->shost;
2143 scsi_qla_host_t *vha = shost_priv(host);
2144 struct qla_hw_data *ha = vha->hw;
2145 int rval;
2146 uint8_t command_sent;
2147 uint32_t vendor_cmd;
2148 char *type;
2149 struct msg_echo_lb elreq;
2150 uint16_t response[MAILBOX_REGISTER_COUNT];
2151 uint8_t* fw_sts_ptr;
2152 uint8_t *req_data;
2153 dma_addr_t req_data_dma;
2154 uint32_t req_data_len;
2155 uint8_t *rsp_data;
2156 dma_addr_t rsp_data_dma;
2157 uint32_t rsp_data_len;
2158
2159 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2160 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2161 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2162 rval = -EBUSY;
2163 goto done;
2164 }
2165
2166 if (!vha->flags.online) {
2167 DEBUG2(qla_printk(KERN_WARNING, ha,
2168 "host not online\n"));
2169 rval = -EIO;
2170 goto done;
2171 }
2172
2173 elreq.req_sg_cnt =
2174 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2175 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2176 if (!elreq.req_sg_cnt) {
2177 rval = -ENOMEM;
2178 goto done;
2179 }
2180 elreq.rsp_sg_cnt =
2181 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2182 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2183 if (!elreq.rsp_sg_cnt) {
2184 rval = -ENOMEM;
2185 goto done;
2186 }
2187
2188 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2189 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2190 {
2191 DEBUG2(printk(KERN_INFO
2192 "dma mapping resulted in different sg counts \
2193 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2194 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2195 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2196 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2197 rval = -EAGAIN;
2198 goto done_unmap_sg;
2199 }
2200 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2201 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2202 &req_data_dma, GFP_KERNEL);
2203
2204 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2205 &rsp_data_dma, GFP_KERNEL);
2206
2207 /* Copy the request buffer in req_data now */
2208 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2209 bsg_job->request_payload.sg_cnt, req_data,
2210 req_data_len);
2211
2212 elreq.send_dma = req_data_dma;
2213 elreq.rcv_dma = rsp_data_dma;
2214 elreq.transfer_size = req_data_len;
2215
2216 /* Vendor cmd : loopback or ECHO diagnostic
2217 * Options:
2218 * Loopback : Either internal or external loopback
2219 * ECHO: ECHO ELS or Vendor specific FC4 link data
2220 */
2221 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2222 elreq.options =
2223 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2224 + 1);
2225
2226 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2227 case QL_VND_LOOPBACK:
2228 if (ha->current_topology != ISP_CFG_F) {
2229 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2230
2231 DEBUG2(qla_printk(KERN_INFO, ha,
2232 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2233 vha->host_no, type, vendor_cmd, elreq.options));
2234
2235 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2236 rval = qla2x00_loopback_test(vha, &elreq, response);
2237 if (IS_QLA81XX(ha)) {
2238 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2239 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2240 "ISP\n", __func__, vha->host_no));
2241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2242 qla2xxx_wake_dpc(vha);
2243 }
2244 }
2245 } else {
2246 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2247 DEBUG2(qla_printk(KERN_INFO, ha,
2248 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2249 vha->host_no, type, vendor_cmd, elreq.options));
2250
2251 command_sent = INT_DEF_LB_ECHO_CMD;
2252 rval = qla2x00_echo_test(vha, &elreq, response);
2253 }
2254 break;
2255 case QLA84_RESET:
2256 if (!IS_QLA84XX(vha->hw)) {
2257 rval = -EINVAL;
2258 DEBUG16(printk(
2259 "%s(%ld): 8xxx exiting.\n",
2260 __func__, vha->host_no));
2261 return rval;
2262 }
2263 rval = qla84xx_reset(vha, &elreq, bsg_job);
2264 break;
2265 case QLA84_MGMT_CMD:
2266 if (!IS_QLA84XX(vha->hw)) {
2267 rval = -EINVAL;
2268 DEBUG16(printk(
2269 "%s(%ld): 8xxx exiting.\n",
2270 __func__, vha->host_no));
2271 return rval;
2272 }
2273 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2274 break;
2275 default:
2276 rval = -ENOSYS;
2277 }
2278
2279 if (rval != QLA_SUCCESS) {
2280 DEBUG2(qla_printk(KERN_WARNING, ha,
2281 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2282 rval = 0;
2283 bsg_job->reply->result = (DID_ERROR << 16);
2284 bsg_job->reply->reply_payload_rcv_len = 0;
2285 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2286 memcpy( fw_sts_ptr, response, sizeof(response));
2287 fw_sts_ptr += sizeof(response);
2288 *fw_sts_ptr = command_sent;
2289 } else {
2290 DEBUG2(qla_printk(KERN_WARNING, ha,
2291 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2292 rval = bsg_job->reply->result = 0;
2293 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2294 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2295 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2296 memcpy(fw_sts_ptr, response, sizeof(response));
2297 fw_sts_ptr += sizeof(response);
2298 *fw_sts_ptr = command_sent;
2299 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2300 bsg_job->reply_payload.sg_cnt, rsp_data,
2301 rsp_data_len);
2302 }
2303 bsg_job->job_done(bsg_job);
2304
2305done_unmap_sg:
2306
2307 if(req_data)
2308 dma_free_coherent(&ha->pdev->dev, req_data_len,
2309 req_data, req_data_dma);
2310 dma_unmap_sg(&ha->pdev->dev,
2311 bsg_job->request_payload.sg_list,
2312 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2313 dma_unmap_sg(&ha->pdev->dev,
2314 bsg_job->reply_payload.sg_list,
2315 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2316
2317done:
2318 return rval;
2319}
2320
2321static int
2322qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2323{
2324 int ret = -EINVAL;
2325
2326 switch (bsg_job->request->msgcode) {
2327 case FC_BSG_RPT_ELS:
2328 case FC_BSG_HST_ELS_NOLOGIN:
2329 ret = qla2x00_process_els(bsg_job);
2330 break;
2331 case FC_BSG_HST_CT:
2332 ret = qla2x00_process_ct(bsg_job);
2333 break;
2334 case FC_BSG_HST_VENDOR:
2335 ret = qla2x00_process_vendor_specific(bsg_job);
2336 break;
2337 case FC_BSG_HST_ADD_RPORT:
2338 case FC_BSG_HST_DEL_RPORT:
2339 case FC_BSG_RPT_CT:
2340 default:
2341 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2342 break;
2343 }
2344 return ret;
2345}
2346
2347static int
2348qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2349{
2350 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2351 struct qla_hw_data *ha = vha->hw;
2352 srb_t *sp;
2353 int cnt, que;
2354 unsigned long flags;
2355 struct req_que *req;
2356 struct srb_bsg *sp_bsg;
2357
2358 /* find the bsg job from the active list of commands */
2359 spin_lock_irqsave(&ha->hardware_lock, flags);
2360 for (que = 0; que < ha->max_req_queues; que++) {
2361 req = ha->req_q_map[que];
2362 if (!req)
2363 continue;
2364
2365 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2366 sp = req->outstanding_cmds[cnt];
2367
2368 if (sp) {
2369 sp_bsg = (struct srb_bsg*)sp->ctx;
2370
2371 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2372 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2373 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2374 (sp_bsg->bsg_job == bsg_job)) {
2375 if (ha->isp_ops->abort_command(sp)) {
2376 DEBUG2(qla_printk(KERN_INFO, ha,
2377 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2378 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2379 } else {
2380 DEBUG2(qla_printk(KERN_INFO, ha,
2381 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2382 bsg_job->req->errors = bsg_job->reply->result = 0;
2383 }
2384 goto done;
2385 }
2386 }
2387 }
2388 }
2389 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2390 DEBUG2(qla_printk(KERN_INFO, ha,
2391 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2392 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2393 return 0;
2394
2395done:
2396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2397 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2398 kfree(sp->fcport);
2399 kfree(sp->ctx);
2400 mempool_free(sp, ha->srb_mempool);
2401 return 0;
2402}
2403
2404struct fc_function_template qla2xxx_transport_functions = { 1826struct fc_function_template qla2xxx_transport_functions = {
2405 1827
2406 .show_host_node_name = 1, 1828 .show_host_node_name = 1,
@@ -2516,125 +1938,3 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2516 speed = FC_PORTSPEED_1GBIT; 1938 speed = FC_PORTSPEED_1GBIT;
2517 fc_host_supported_speeds(vha->host) = speed; 1939 fc_host_supported_speeds(vha->host) = speed;
2518} 1940}
2519static int
2520qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2521{
2522 int ret = 0;
2523 int cmd;
2524 uint16_t cmd_status;
2525
2526 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2527
2528 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2529 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2530 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2531 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2532 &cmd_status);
2533 return ret;
2534}
2535
2536static int
2537qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2538{
2539 struct access_chip_84xx *mn;
2540 dma_addr_t mn_dma, mgmt_dma;
2541 void *mgmt_b = NULL;
2542 int ret = 0;
2543 int rsp_hdr_len, len = 0;
2544 struct qla84_msg_mgmt *ql84_mgmt;
2545
2546 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2547 ql84_mgmt->cmd =
2548 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2549 ql84_mgmt->mgmtp.u.mem.start_addr =
2550 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2551 ql84_mgmt->len =
2552 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2553 ql84_mgmt->mgmtp.u.config.id =
2554 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2555 ql84_mgmt->mgmtp.u.config.param0 =
2556 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2557 ql84_mgmt->mgmtp.u.config.param1 =
2558 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2559 ql84_mgmt->mgmtp.u.info.type =
2560 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2561 ql84_mgmt->mgmtp.u.info.context =
2562 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2563
2564 rsp_hdr_len = bsg_job->request_payload.payload_len;
2565
2566 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2567 if (mn == NULL) {
2568 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2569 "failed%lu\n", __func__, ha->host_no));
2570 return -ENOMEM;
2571 }
2572
2573 memset(mn, 0, sizeof (struct access_chip_84xx));
2574
2575 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2576 mn->entry_count = 1;
2577
2578 switch (ql84_mgmt->cmd) {
2579 case QLA84_MGMT_READ_MEM:
2580 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2581 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2582 break;
2583 case QLA84_MGMT_WRITE_MEM:
2584 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2585 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2586 break;
2587 case QLA84_MGMT_CHNG_CONFIG:
2588 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2589 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2590 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2591 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2592 break;
2593 case QLA84_MGMT_GET_INFO:
2594 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2595 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2596 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2597 break;
2598 default:
2599 ret = -EIO;
2600 goto exit_mgmt0;
2601 }
2602
2603 if ((len == ql84_mgmt->len) &&
2604 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2605 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2606 &mgmt_dma, GFP_KERNEL);
2607 if (mgmt_b == NULL) {
2608 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2609 "failed%lu\n", __func__, ha->host_no));
2610 ret = -ENOMEM;
2611 goto exit_mgmt0;
2612 }
2613 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2614 mn->dseg_count = cpu_to_le16(1);
2615 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2616 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2617 mn->dseg_length = cpu_to_le32(len);
2618
2619 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2620 memcpy(mgmt_b, ql84_mgmt->payload, len);
2621 }
2622 }
2623
2624 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2625 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2626 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2627 if (ret != QLA_SUCCESS)
2628 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2629 __func__, ha->host_no));
2630 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2631 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2632 }
2633
2634 if (mgmt_b)
2635 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2636
2637exit_mgmt0:
2638 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2639 return ret;
2640}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
new file mode 100644
index 00000000000..c20292fde72
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -0,0 +1,1040 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/delay.h>
12
13/* BSG support for ELS/CT pass through */
14inline srb_t *
15qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16{
17 srb_t *sp;
18 struct qla_hw_data *ha = vha->hw;
19 struct srb_bsg_ctx *ctx;
20
21 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 if (!sp)
23 goto done;
24 ctx = kzalloc(size, GFP_KERNEL);
25 if (!ctx) {
26 mempool_free(sp, ha->srb_mempool);
27 sp = NULL;
28 goto done;
29 }
30
31 memset(sp, 0, sizeof(*sp));
32 sp->fcport = fcport;
33 sp->ctx = ctx;
34done:
35 return sp;
36}
37
38static int
39qla2x00_process_els(struct fc_bsg_job *bsg_job)
40{
41 struct fc_rport *rport;
42 fc_port_t *fcport;
43 struct Scsi_Host *host;
44 scsi_qla_host_t *vha;
45 struct qla_hw_data *ha;
46 srb_t *sp;
47 const char *type;
48 int req_sg_cnt, rsp_sg_cnt;
49 int rval = (DRIVER_ERROR << 16);
50 uint16_t nextlid = 0;
51 struct srb_bsg *els;
52
53 /* Multiple SG's are not supported for ELS requests */
54 if (bsg_job->request_payload.sg_cnt > 1 ||
55 bsg_job->reply_payload.sg_cnt > 1) {
56 DEBUG2(printk(KERN_INFO
57 "multiple SG's are not supported for ELS requests"
58 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
59 bsg_job->request_payload.sg_cnt,
60 bsg_job->reply_payload.sg_cnt));
61 rval = -EPERM;
62 goto done;
63 }
64
65 /* ELS request for rport */
66 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
67 rport = bsg_job->rport;
68 fcport = *(fc_port_t **) rport->dd_data;
69 host = rport_to_shost(rport);
70 vha = shost_priv(host);
71 ha = vha->hw;
72 type = "FC_BSG_RPT_ELS";
73
74 /* make sure the rport is logged in,
75 * if not perform fabric login
76 */
77 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
78 DEBUG2(qla_printk(KERN_WARNING, ha,
79 "failed to login port %06X for ELS passthru\n",
80 fcport->d_id.b24));
81 rval = -EIO;
82 goto done;
83 }
84 } else {
85 host = bsg_job->shost;
86 vha = shost_priv(host);
87 ha = vha->hw;
88 type = "FC_BSG_HST_ELS_NOLOGIN";
89
90 /* Allocate a dummy fcport structure, since functions
91 * preparing the IOCB and mailbox command retrieves port
92 * specific information from fcport structure. For Host based
93 * ELS commands there will be no fcport structure allocated
94 */
95 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
96 if (!fcport) {
97 rval = -ENOMEM;
98 goto done;
99 }
100
101 /* Initialize all required fields of fcport */
102 fcport->vha = vha;
103 fcport->vp_idx = vha->vp_idx;
104 fcport->d_id.b.al_pa =
105 bsg_job->request->rqst_data.h_els.port_id[0];
106 fcport->d_id.b.area =
107 bsg_job->request->rqst_data.h_els.port_id[1];
108 fcport->d_id.b.domain =
109 bsg_job->request->rqst_data.h_els.port_id[2];
110 fcport->loop_id =
111 (fcport->d_id.b.al_pa == 0xFD) ?
112 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
113 }
114
115 if (!vha->flags.online) {
116 DEBUG2(qla_printk(KERN_WARNING, ha,
117 "host not online\n"));
118 rval = -EIO;
119 goto done;
120 }
121
122 req_sg_cnt =
123 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
124 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
125 if (!req_sg_cnt) {
126 rval = -ENOMEM;
127 goto done_free_fcport;
128 }
129 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
130 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
131 if (!rsp_sg_cnt) {
132 rval = -ENOMEM;
133 goto done_free_fcport;
134 }
135
136 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
137 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
138 {
139 DEBUG2(printk(KERN_INFO
140 "dma mapping resulted in different sg counts \
141 [request_sg_cnt: %x dma_request_sg_cnt: %x\
142 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
143 bsg_job->request_payload.sg_cnt, req_sg_cnt,
144 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
145 rval = -EAGAIN;
146 goto done_unmap_sg;
147 }
148
149 /* Alloc SRB structure */
150 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
151 if (!sp) {
152 rval = -ENOMEM;
153 goto done_unmap_sg;
154 }
155
156 els = sp->ctx;
157 els->ctx.type =
158 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
159 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
160 els->bsg_job = bsg_job;
161
162 DEBUG2(qla_printk(KERN_INFO, ha,
163 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
164 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
165 bsg_job->request->rqst_data.h_els.command_code,
166 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
167 fcport->d_id.b.al_pa));
168
169 rval = qla2x00_start_sp(sp);
170 if (rval != QLA_SUCCESS) {
171 kfree(sp->ctx);
172 mempool_free(sp, ha->srb_mempool);
173 rval = -EIO;
174 goto done_unmap_sg;
175 }
176 return rval;
177
178done_unmap_sg:
179 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
180 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
181 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
182 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
183 goto done_free_fcport;
184
185done_free_fcport:
186 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
187 kfree(fcport);
188done:
189 return rval;
190}
191
192static int
193qla2x00_process_ct(struct fc_bsg_job *bsg_job)
194{
195 srb_t *sp;
196 struct Scsi_Host *host = bsg_job->shost;
197 scsi_qla_host_t *vha = shost_priv(host);
198 struct qla_hw_data *ha = vha->hw;
199 int rval = (DRIVER_ERROR << 16);
200 int req_sg_cnt, rsp_sg_cnt;
201 uint16_t loop_id;
202 struct fc_port *fcport;
203 char *type = "FC_BSG_HST_CT";
204 struct srb_bsg *ct;
205
206 /* pass through is supported only for ISP 4Gb or higher */
207 if (!IS_FWI2_CAPABLE(ha)) {
208 DEBUG2(qla_printk(KERN_INFO, ha,
209 "scsi(%ld):Firmware is not capable to support FC "
210 "CT pass thru\n", vha->host_no));
211 rval = -EPERM;
212 goto done;
213 }
214
215 req_sg_cnt =
216 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
217 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
218 if (!req_sg_cnt) {
219 rval = -ENOMEM;
220 goto done;
221 }
222
223 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
224 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
225 if (!rsp_sg_cnt) {
226 rval = -ENOMEM;
227 goto done;
228 }
229
230 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
231 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
232 {
233 DEBUG2(qla_printk(KERN_WARNING, ha,
234 "[request_sg_cnt: %x dma_request_sg_cnt: %x\
235 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
236 bsg_job->request_payload.sg_cnt, req_sg_cnt,
237 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
238 rval = -EAGAIN;
239 goto done_unmap_sg;
240 }
241
242 if (!vha->flags.online) {
243 DEBUG2(qla_printk(KERN_WARNING, ha,
244 "host not online\n"));
245 rval = -EIO;
246 goto done_unmap_sg;
247 }
248
249 loop_id =
250 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
251 >> 24;
252 switch (loop_id) {
253 case 0xFC:
254 loop_id = cpu_to_le16(NPH_SNS);
255 break;
256 case 0xFA:
257 loop_id = vha->mgmt_svr_loop_id;
258 break;
259 default:
260 DEBUG2(qla_printk(KERN_INFO, ha,
261 "Unknown loop id: %x\n", loop_id));
262 rval = -EINVAL;
263 goto done_unmap_sg;
264 }
265
266 /* Allocate a dummy fcport structure, since functions preparing the
267 * IOCB and mailbox command retrieves port specific information
268 * from fcport structure. For Host based ELS commands there will be
269 * no fcport structure allocated
270 */
271 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
272 if (!fcport)
273 {
274 rval = -ENOMEM;
275 goto done_unmap_sg;
276 }
277
278 /* Initialize all required fields of fcport */
279 fcport->vha = vha;
280 fcport->vp_idx = vha->vp_idx;
281 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
282 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
283 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
284 fcport->loop_id = loop_id;
285
286 /* Alloc SRB structure */
287 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
288 if (!sp) {
289 rval = -ENOMEM;
290 goto done_free_fcport;
291 }
292
293 ct = sp->ctx;
294 ct->ctx.type = SRB_CT_CMD;
295 ct->bsg_job = bsg_job;
296
297 DEBUG2(qla_printk(KERN_INFO, ha,
298 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
299 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
300 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
301 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
302 fcport->d_id.b.al_pa));
303
304 rval = qla2x00_start_sp(sp);
305 if (rval != QLA_SUCCESS) {
306 kfree(sp->ctx);
307 mempool_free(sp, ha->srb_mempool);
308 rval = -EIO;
309 goto done_free_fcport;
310 }
311 return rval;
312
313done_free_fcport:
314 kfree(fcport);
315done_unmap_sg:
316 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
317 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
318 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
319 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
320done:
321 return rval;
322}
323
324static int
325qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
326{
327 struct Scsi_Host *host = bsg_job->shost;
328 scsi_qla_host_t *vha = shost_priv(host);
329 struct qla_hw_data *ha = vha->hw;
330 int rval;
331 uint8_t command_sent;
332 char *type;
333 struct msg_echo_lb elreq;
334 uint16_t response[MAILBOX_REGISTER_COUNT];
335 uint8_t* fw_sts_ptr;
336 uint8_t *req_data = NULL;
337 dma_addr_t req_data_dma;
338 uint32_t req_data_len;
339 uint8_t *rsp_data = NULL;
340 dma_addr_t rsp_data_dma;
341 uint32_t rsp_data_len;
342
343 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
344 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
345 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
346 return -EBUSY;
347
348 if (!vha->flags.online) {
349 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
350 return -EIO;
351 }
352
353 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
354 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
355 DMA_TO_DEVICE);
356
357 if (!elreq.req_sg_cnt)
358 return -ENOMEM;
359
360 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
361 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
362 DMA_FROM_DEVICE);
363
364 if (!elreq.rsp_sg_cnt) {
365 rval = -ENOMEM;
366 goto done_unmap_req_sg;
367}
368
369 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
370 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
371 DEBUG2(printk(KERN_INFO
372 "dma mapping resulted in different sg counts "
373 "[request_sg_cnt: %x dma_request_sg_cnt: %x "
374 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
375 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
376 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
377 rval = -EAGAIN;
378 goto done_unmap_sg;
379 }
380 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
381 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
382 &req_data_dma, GFP_KERNEL);
383 if (!req_data) {
384 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
385 "failed for host=%lu\n", __func__, vha->host_no));
386 rval = -ENOMEM;
387 goto done_unmap_sg;
388 }
389
390 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
391 &rsp_data_dma, GFP_KERNEL);
392 if (!rsp_data) {
393 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
394 "failed for host=%lu\n", __func__, vha->host_no));
395 rval = -ENOMEM;
396 goto done_free_dma_req;
397 }
398
399 /* Copy the request buffer in req_data now */
400 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
401 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
402
403 elreq.send_dma = req_data_dma;
404 elreq.rcv_dma = rsp_data_dma;
405 elreq.transfer_size = req_data_len;
406
407 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
408
409 if (ha->current_topology != ISP_CFG_F) {
410 type = "FC_BSG_HST_VENDOR_LOOPBACK";
411 DEBUG2(qla_printk(KERN_INFO, ha,
412 "scsi(%ld) bsg rqst type: %s\n",
413 vha->host_no, type));
414
415 command_sent = INT_DEF_LB_LOOPBACK_CMD;
416 rval = qla2x00_loopback_test(vha, &elreq, response);
417 if (IS_QLA81XX(ha)) {
418 if (response[0] == MBS_COMMAND_ERROR &&
419 response[1] == MBS_LB_RESET) {
420 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
421 "ISP\n", __func__, vha->host_no));
422 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
423 qla2xxx_wake_dpc(vha);
424 }
425 }
426 } else {
427 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
428 DEBUG2(qla_printk(KERN_INFO, ha,
429 "scsi(%ld) bsg rqst type: %s\n" ,vha->host_no, type));
430 command_sent = INT_DEF_LB_ECHO_CMD;
431 rval = qla2x00_echo_test(vha, &elreq, response);
432 }
433
434 if (rval) {
435 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
436 "request %s failed\n", vha->host_no, type));
437
438 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
439 sizeof(struct fc_bsg_reply);
440
441 memcpy(fw_sts_ptr, response, sizeof(response));
442 fw_sts_ptr += sizeof(response);
443 *fw_sts_ptr = command_sent;
444 rval = 0;
445 bsg_job->reply->reply_payload_rcv_len = 0;
446 bsg_job->reply->result = (DID_ERROR << 16);
447 } else {
448 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
449 "request %s completed\n", vha->host_no, type));
450
451 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
452 sizeof(response) + sizeof(uint8_t);
453 bsg_job->reply->reply_payload_rcv_len =
454 bsg_job->reply_payload.payload_len;
455 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
456 sizeof(struct fc_bsg_reply);
457 memcpy(fw_sts_ptr, response, sizeof(response));
458 fw_sts_ptr += sizeof(response);
459 *fw_sts_ptr = command_sent;
460 bsg_job->reply->result = DID_OK;
461 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
462 bsg_job->reply_payload.sg_cnt, rsp_data,
463 rsp_data_len);
464 }
465 bsg_job->job_done(bsg_job);
466
467 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
468 rsp_data, rsp_data_dma);
469done_free_dma_req:
470 dma_free_coherent(&ha->pdev->dev, req_data_len,
471 req_data, req_data_dma);
472done_unmap_sg:
473 dma_unmap_sg(&ha->pdev->dev,
474 bsg_job->reply_payload.sg_list,
475 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
476done_unmap_req_sg:
477 dma_unmap_sg(&ha->pdev->dev,
478 bsg_job->request_payload.sg_list,
479 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
480 return rval;
481}
482
483static int
484qla84xx_reset(struct fc_bsg_job *bsg_job)
485{
486 struct Scsi_Host *host = bsg_job->shost;
487 scsi_qla_host_t *vha = shost_priv(host);
488 struct qla_hw_data *ha = vha->hw;
489 int rval = 0;
490 uint32_t flag;
491
492 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
493 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
494 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
495 return -EBUSY;
496
497 if (!IS_QLA84XX(ha)) {
498 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
499 "exiting.\n", vha->host_no));
500 return -EINVAL;
501 }
502
503 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
504
505 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
506
507 if (rval) {
508 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
509 "request 84xx reset failed\n", vha->host_no));
510 rval = bsg_job->reply->reply_payload_rcv_len = 0;
511 bsg_job->reply->result = (DID_ERROR << 16);
512
513 } else {
514 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
515 "request 84xx reset completed\n", vha->host_no));
516 bsg_job->reply->result = DID_OK;
517 }
518
519 bsg_job->job_done(bsg_job);
520 return rval;
521}
522
523static int
524qla84xx_updatefw(struct fc_bsg_job *bsg_job)
525{
526 struct Scsi_Host *host = bsg_job->shost;
527 scsi_qla_host_t *vha = shost_priv(host);
528 struct qla_hw_data *ha = vha->hw;
529 struct verify_chip_entry_84xx *mn = NULL;
530 dma_addr_t mn_dma, fw_dma;
531 void *fw_buf = NULL;
532 int rval = 0;
533 uint32_t sg_cnt;
534 uint32_t data_len;
535 uint16_t options;
536 uint32_t flag;
537 uint32_t fw_ver;
538
539 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
540 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
541 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
542 return -EBUSY;
543
544 if (!IS_QLA84XX(ha)) {
545 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
546 "exiting.\n", vha->host_no));
547 return -EINVAL;
548 }
549
550 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
551 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
552 if (!sg_cnt)
553 return -ENOMEM;
554
555 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
556 DEBUG2(printk(KERN_INFO
557 "dma mapping resulted in different sg counts "
558 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
559 bsg_job->request_payload.sg_cnt, sg_cnt));
560 rval = -EAGAIN;
561 goto done_unmap_sg;
562 }
563
564 data_len = bsg_job->request_payload.payload_len;
565 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
566 &fw_dma, GFP_KERNEL);
567 if (!fw_buf) {
568 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
569 "failed for host=%lu\n", __func__, vha->host_no));
570 rval = -ENOMEM;
571 goto done_unmap_sg;
572 }
573
574 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
575 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
576
577 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
578 if (!mn) {
579 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
580 "failed for host=%lu\n", __func__, vha->host_no));
581 rval = -ENOMEM;
582 goto done_free_fw_buf;
583 }
584
585 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
586 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
587
588 memset(mn, 0, sizeof(struct access_chip_84xx));
589 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
590 mn->entry_count = 1;
591
592 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
593 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
594 options |= VCO_DIAG_FW;
595
596 mn->options = cpu_to_le16(options);
597 mn->fw_ver = cpu_to_le32(fw_ver);
598 mn->fw_size = cpu_to_le32(data_len);
599 mn->fw_seq_size = cpu_to_le32(data_len);
600 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
601 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
602 mn->dseg_length = cpu_to_le32(data_len);
603 mn->data_seg_cnt = cpu_to_le16(1);
604
605 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
606
607 if (rval) {
608 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
609 "request 84xx updatefw failed\n", vha->host_no));
610
611 rval = bsg_job->reply->reply_payload_rcv_len = 0;
612 bsg_job->reply->result = (DID_ERROR << 16);
613
614 } else {
615 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
616 "request 84xx updatefw completed\n", vha->host_no));
617
618 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
619 bsg_job->reply->result = DID_OK;
620 }
621
622 bsg_job->job_done(bsg_job);
623 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
624
625done_free_fw_buf:
626 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
627
628done_unmap_sg:
629 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
630 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
631
632 return rval;
633}
634
635static int
636qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
637{
638 struct Scsi_Host *host = bsg_job->shost;
639 scsi_qla_host_t *vha = shost_priv(host);
640 struct qla_hw_data *ha = vha->hw;
641 struct access_chip_84xx *mn = NULL;
642 dma_addr_t mn_dma, mgmt_dma;
643 void *mgmt_b = NULL;
644 int rval = 0;
645 struct qla_bsg_a84_mgmt *ql84_mgmt;
646 uint32_t sg_cnt;
647 uint32_t data_len;
648 uint32_t dma_direction = DMA_NONE;
649
650 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
651 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
652 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
653 return -EBUSY;
654
655 if (!IS_QLA84XX(ha)) {
656 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
657 "exiting.\n", vha->host_no));
658 return -EINVAL;
659 }
660
661 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
662 sizeof(struct fc_bsg_request));
663 if (!ql84_mgmt) {
664 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
665 __func__, vha->host_no));
666 return -EINVAL;
667 }
668
669 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
670 if (!mn) {
671 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
672 "failed for host=%lu\n", __func__, vha->host_no));
673 return -ENOMEM;
674 }
675
676 memset(mn, 0, sizeof(struct access_chip_84xx));
677 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
678 mn->entry_count = 1;
679
680 switch (ql84_mgmt->mgmt.cmd) {
681 case QLA84_MGMT_READ_MEM:
682 case QLA84_MGMT_GET_INFO:
683 sg_cnt = dma_map_sg(&ha->pdev->dev,
684 bsg_job->reply_payload.sg_list,
685 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
686 if (!sg_cnt) {
687 rval = -ENOMEM;
688 goto exit_mgmt;
689 }
690
691 dma_direction = DMA_FROM_DEVICE;
692
693 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
694 DEBUG2(printk(KERN_INFO
695 "dma mapping resulted in different sg counts "
696 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
697 bsg_job->reply_payload.sg_cnt, sg_cnt));
698 rval = -EAGAIN;
699 goto done_unmap_sg;
700 }
701
702 data_len = bsg_job->reply_payload.payload_len;
703
704 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
705 &mgmt_dma, GFP_KERNEL);
706 if (!mgmt_b) {
707 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
708 "failed for host=%lu\n",
709 __func__, vha->host_no));
710 rval = -ENOMEM;
711 goto done_unmap_sg;
712 }
713
714 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
715 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
716 mn->parameter1 =
717 cpu_to_le32(
718 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
719
720 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
721 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
722 mn->parameter1 =
723 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
724
725 mn->parameter2 =
726 cpu_to_le32(
727 ql84_mgmt->mgmt.mgmtp.u.info.context);
728 }
729 break;
730
731 case QLA84_MGMT_WRITE_MEM:
732 sg_cnt = dma_map_sg(&ha->pdev->dev,
733 bsg_job->request_payload.sg_list,
734 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
735
736 if (!sg_cnt) {
737 rval = -ENOMEM;
738 goto exit_mgmt;
739 }
740
741 dma_direction = DMA_TO_DEVICE;
742
743 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
744 DEBUG2(printk(KERN_INFO
745 "dma mapping resulted in different sg counts "
746 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
747 bsg_job->request_payload.sg_cnt, sg_cnt));
748 rval = -EAGAIN;
749 goto done_unmap_sg;
750 }
751
752 data_len = bsg_job->request_payload.payload_len;
753 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
754 &mgmt_dma, GFP_KERNEL);
755 if (!mgmt_b) {
756 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
757 "failed for host=%lu\n",
758 __func__, vha->host_no));
759 rval = -ENOMEM;
760 goto done_unmap_sg;
761 }
762
763 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
764 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
765
766 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
767 mn->parameter1 =
768 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
769 break;
770
771 case QLA84_MGMT_CHNG_CONFIG:
772 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
773 mn->parameter1 =
774 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
775
776 mn->parameter2 =
777 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
778
779 mn->parameter3 =
780 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
781 break;
782
783 default:
784 rval = -EIO;
785 goto exit_mgmt;
786 }
787
788 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
789 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
790 mn->dseg_count = cpu_to_le16(1);
791 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
792 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
793 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
794 }
795
796 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
797
798 if (rval) {
799 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
800 "request 84xx mgmt failed\n", vha->host_no));
801
802 rval = bsg_job->reply->reply_payload_rcv_len = 0;
803 bsg_job->reply->result = (DID_ERROR << 16);
804
805 } else {
806 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
807 "request 84xx mgmt completed\n", vha->host_no));
808
809 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
810 bsg_job->reply->result = DID_OK;
811
812 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
813 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
814 bsg_job->reply->reply_payload_rcv_len =
815 bsg_job->reply_payload.payload_len;
816
817 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
818 bsg_job->reply_payload.sg_cnt, mgmt_b, data_len);
819 }
820 }
821
822 bsg_job->job_done(bsg_job);
823 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
824
825done_unmap_sg:
826 if (dma_direction == DMA_TO_DEVICE)
827 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
828 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
829 else if (dma_direction == DMA_FROM_DEVICE)
830 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
831 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
832
833exit_mgmt:
834 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
835
836 return rval;
837}
838
839static int
840qla24xx_iidma(struct fc_bsg_job *bsg_job)
841{
842 struct Scsi_Host *host = bsg_job->shost;
843 scsi_qla_host_t *vha = shost_priv(host);
844 struct qla_hw_data *ha = vha->hw;
845 int rval = 0;
846 struct qla_port_param *port_param = NULL;
847 fc_port_t *fcport = NULL;
848 uint16_t mb[MAILBOX_REGISTER_COUNT];
849 uint8_t *rsp_ptr = NULL;
850
851 bsg_job->reply->reply_payload_rcv_len = 0;
852
853 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
854 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
855 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
856 return -EBUSY;
857
858 if (!IS_IIDMA_CAPABLE(vha->hw)) {
859 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
860 "supported\n", __func__, vha->host_no));
861 return -EINVAL;
862 }
863
864 port_param = (struct qla_port_param *)((char *)bsg_job->request +
865 sizeof(struct fc_bsg_request));
866 if (!port_param) {
867 DEBUG2(printk("%s(%ld): port_param header not provided, "
868 "exiting.\n", __func__, vha->host_no));
869 return -EINVAL;
870 }
871
872 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
873 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
874 __func__, vha->host_no));
875 return -EINVAL;
876 }
877
878 list_for_each_entry(fcport, &vha->vp_fcports, list) {
879 if (fcport->port_type != FCT_TARGET)
880 continue;
881
882 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
883 fcport->port_name, sizeof(fcport->port_name)))
884 continue;
885 break;
886 }
887
888 if (!fcport) {
889 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
890 __func__, vha->host_no));
891 return -EINVAL;
892 }
893
894 if (port_param->mode)
895 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
896 port_param->speed, mb);
897 else
898 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
899 &port_param->speed, mb);
900
901 if (rval) {
902 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
903 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
904 vha->host_no, fcport->port_name[0],
905 fcport->port_name[1],
906 fcport->port_name[2], fcport->port_name[3],
907 fcport->port_name[4], fcport->port_name[5],
908 fcport->port_name[6], fcport->port_name[7], rval,
909 fcport->fp_speed, mb[0], mb[1]));
910 rval = 0;
911 bsg_job->reply->result = (DID_ERROR << 16);
912
913 } else {
914 if (!port_param->mode) {
915 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
916 sizeof(struct qla_port_param);
917
918 rsp_ptr = ((uint8_t *)bsg_job->reply) +
919 sizeof(struct fc_bsg_reply);
920
921 memcpy(rsp_ptr, port_param,
922 sizeof(struct qla_port_param));
923 }
924
925 bsg_job->reply->result = DID_OK;
926 }
927
928 bsg_job->job_done(bsg_job);
929 return rval;
930}
931
932static int
933qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
934{
935 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
936 case QL_VND_LOOPBACK:
937 return qla2x00_process_loopback(bsg_job);
938
939 case QL_VND_A84_RESET:
940 return qla84xx_reset(bsg_job);
941
942 case QL_VND_A84_UPDATE_FW:
943 return qla84xx_updatefw(bsg_job);
944
945 case QL_VND_A84_MGMT_CMD:
946 return qla84xx_mgmt_cmd(bsg_job);
947
948 case QL_VND_IIDMA:
949 return qla24xx_iidma(bsg_job);
950
951 default:
952 bsg_job->reply->result = (DID_ERROR << 16);
953 bsg_job->job_done(bsg_job);
954 return -ENOSYS;
955 }
956}
957
958int
959qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
960{
961 int ret = -EINVAL;
962
963 switch (bsg_job->request->msgcode) {
964 case FC_BSG_RPT_ELS:
965 case FC_BSG_HST_ELS_NOLOGIN:
966 ret = qla2x00_process_els(bsg_job);
967 break;
968 case FC_BSG_HST_CT:
969 ret = qla2x00_process_ct(bsg_job);
970 break;
971 case FC_BSG_HST_VENDOR:
972 ret = qla2x00_process_vendor_specific(bsg_job);
973 break;
974 case FC_BSG_HST_ADD_RPORT:
975 case FC_BSG_HST_DEL_RPORT:
976 case FC_BSG_RPT_CT:
977 default:
978 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
979 break;
980 }
981 return ret;
982}
983
984int
985qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
986{
987 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
988 struct qla_hw_data *ha = vha->hw;
989 srb_t *sp;
990 int cnt, que;
991 unsigned long flags;
992 struct req_que *req;
993 struct srb_bsg *sp_bsg;
994
995 /* find the bsg job from the active list of commands */
996 spin_lock_irqsave(&ha->hardware_lock, flags);
997 for (que = 0; que < ha->max_req_queues; que++) {
998 req = ha->req_q_map[que];
999 if (!req)
1000 continue;
1001
1002 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
1003 sp = req->outstanding_cmds[cnt];
1004
1005 if (sp) {
1006 sp_bsg = (struct srb_bsg*)sp->ctx;
1007
1008 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
1009 (sp_bsg->ctx.type == SRB_ELS_CMD_HST))
1010 && (sp_bsg->bsg_job == bsg_job)) {
1011 if (ha->isp_ops->abort_command(sp)) {
1012 DEBUG2(qla_printk(KERN_INFO, ha,
1013 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
1014 bsg_job->req->errors =
1015 bsg_job->reply->result = -EIO;
1016 } else {
1017 DEBUG2(qla_printk(KERN_INFO, ha,
1018 "scsi(%ld): mbx abort_command success\n", vha->host_no));
1019 bsg_job->req->errors =
1020 bsg_job->reply->result = 0;
1021 }
1022 goto done;
1023 }
1024 }
1025 }
1026 }
1027 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1028 DEBUG2(qla_printk(KERN_INFO, ha,
1029 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1030 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1031 return 0;
1032
1033done:
1034 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1035 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1036 kfree(sp->fcport);
1037 kfree(sp->ctx);
1038 mempool_free(sp, ha->srb_mempool);
1039 return 0;
1040}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
new file mode 100644
index 00000000000..76ed92dd2ef
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -0,0 +1,135 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#ifndef __QLA_BSG_H
8#define __QLA_BSG_H
9
10/* BSG Vendor specific commands */
11#define QL_VND_LOOPBACK 0x01
12#define QL_VND_A84_RESET 0x02
13#define QL_VND_A84_UPDATE_FW 0x03
14#define QL_VND_A84_MGMT_CMD 0x04
15#define QL_VND_IIDMA 0x05
16#define QL_VND_FCP_PRIO_CFG_CMD 0x06
17
18/* BSG definations for interpreting CommandSent field */
19#define INT_DEF_LB_LOOPBACK_CMD 0
20#define INT_DEF_LB_ECHO_CMD 1
21
22/* BSG Vendor specific definations */
23#define A84_ISSUE_WRITE_TYPE_CMD 0
24#define A84_ISSUE_READ_TYPE_CMD 1
25#define A84_CLEANUP_CMD 2
26#define A84_ISSUE_RESET_OP_FW 3
27#define A84_ISSUE_RESET_DIAG_FW 4
28#define A84_ISSUE_UPDATE_OPFW_CMD 5
29#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
30
31struct qla84_mgmt_param {
32 union {
33 struct {
34 uint32_t start_addr;
35 } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
36 struct {
37 uint32_t id;
38#define QLA84_MGMT_CONFIG_ID_UIF 1
39#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
40#define QLA84_MGMT_CONFIG_ID_PAUSE 3
41#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
42
43 uint32_t param0;
44 uint32_t param1;
45 } config; /* for QLA84_MGMT_CHNG_CONFIG */
46
47 struct {
48 uint32_t type;
49#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
50#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
51#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
52#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
53#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
54#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
55#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
56
57 uint32_t context;
58/*
59* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
60*/
61#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
62#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
63#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
64#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
65#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
66#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
67#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
68#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
69#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
70#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
71
72/*
73* context definitions for QLA84_MGMT_INFO_PORT_STAT
74*/
75#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
76#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
77#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
78#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
79#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
80#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
81
82
83/*
84* context definitions for QLA84_MGMT_INFO_LIF_STAT
85*/
86#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
87#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
88#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
89#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
90#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
91
92 } info; /* for QLA84_MGMT_GET_INFO */
93 } u;
94};
95
96struct qla84_msg_mgmt {
97 uint16_t cmd;
98#define QLA84_MGMT_READ_MEM 0x00
99#define QLA84_MGMT_WRITE_MEM 0x01
100#define QLA84_MGMT_CHNG_CONFIG 0x02
101#define QLA84_MGMT_GET_INFO 0x03
102 uint16_t rsrvd;
103 struct qla84_mgmt_param mgmtp;/* parameters for cmd */
104 uint32_t len; /* bytes in payload following this struct */
105 uint8_t payload[0]; /* payload for cmd */
106};
107
108struct qla_bsg_a84_mgmt {
109 struct qla84_msg_mgmt mgmt;
110} __attribute__ ((packed));
111
112struct qla_scsi_addr {
113 uint16_t bus;
114 uint16_t target;
115} __attribute__ ((packed));
116
117struct qla_ext_dest_addr {
118 union {
119 uint8_t wwnn[8];
120 uint8_t wwpn[8];
121 uint8_t id[4];
122 struct qla_scsi_addr scsi_addr;
123 } dest_addr;
124 uint16_t dest_type;
125#define EXT_DEF_TYPE_WWPN 2
126 uint16_t lun;
127 uint16_t padding[2];
128} __attribute__ ((packed));
129
130struct qla_port_param {
131 struct qla_ext_dest_addr fc_scsi_addr;
132 uint16_t mode;
133 uint16_t speed;
134} __attribute__ ((packed));
135#endif
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index afa95614aaf..608397bf7e0 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -33,6 +33,7 @@
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h> 34#include <scsi/scsi_bsg_fc.h>
35 35
36#include "qla_bsg.h"
36#define QLA2XXX_DRIVER_NAME "qla2xxx" 37#define QLA2XXX_DRIVER_NAME "qla2xxx"
37 38
38/* 39/*
@@ -2797,128 +2798,4 @@ typedef struct scsi_qla_host {
2797#include "qla_inline.h" 2798#include "qla_inline.h"
2798 2799
2799#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 2800#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
2800
2801/*
2802 * BSG Vendor specific commands
2803 */
2804
2805#define QL_VND_LOOPBACK 0x01
2806#define QLA84_RESET 0x02
2807#define QLA84_UPDATE_FW 0x03
2808#define QLA84_MGMT_CMD 0x04
2809
2810/* BSG definations for interpreting CommandSent field */
2811#define INT_DEF_LB_LOOPBACK_CMD 0
2812#define INT_DEF_LB_ECHO_CMD 1
2813
2814/* BSG Vendor specific definations */
2815typedef struct _A84_RESET {
2816 uint16_t Flags;
2817 uint16_t Reserved;
2818#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
2819} __attribute__((packed)) A84_RESET, *PA84_RESET;
2820
2821#define A84_ISSUE_WRITE_TYPE_CMD 0
2822#define A84_ISSUE_READ_TYPE_CMD 1
2823#define A84_CLEANUP_CMD 2
2824#define A84_ISSUE_RESET_OP_FW 3
2825#define A84_ISSUE_RESET_DIAG_FW 4
2826#define A84_ISSUE_UPDATE_OPFW_CMD 5
2827#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
2828
2829struct qla84_mgmt_param {
2830 union {
2831 struct {
2832 uint32_t start_addr;
2833 } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
2834 struct {
2835 uint32_t id;
2836#define QLA84_MGMT_CONFIG_ID_UIF 1
2837#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
2838#define QLA84_MGMT_CONFIG_ID_PAUSE 3
2839#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
2840
2841 uint32_t param0;
2842 uint32_t param1;
2843 } config; /* for QLA84_MGMT_CHNG_CONFIG */
2844
2845 struct {
2846 uint32_t type;
2847#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
2848#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
2849#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
2850#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
2851#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
2852#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
2853#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
2854
2855 uint32_t context;
2856/*
2857* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
2858*/
2859#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
2860#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
2861#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
2862#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
2863#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
2864#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
2865#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
2866#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
2867#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
2868#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
2869
2870/*
2871* context definitions for QLA84_MGMT_INFO_PORT_STAT
2872*/
2873#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
2874#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
2875#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
2876#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
2877#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
2878#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
2879
2880
2881/*
2882* context definitions for QLA84_MGMT_INFO_LIF_STAT
2883*/
2884#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
2885#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
2886#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
2887#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
2888#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
2889
2890 } info; /* for QLA84_MGMT_GET_INFO */
2891 } u;
2892};
2893
2894struct qla84_msg_mgmt {
2895 uint16_t cmd;
2896#define QLA84_MGMT_READ_MEM 0x00
2897#define QLA84_MGMT_WRITE_MEM 0x01
2898#define QLA84_MGMT_CHNG_CONFIG 0x02
2899#define QLA84_MGMT_GET_INFO 0x03
2900 uint16_t rsrvd;
2901 struct qla84_mgmt_param mgmtp;/* parameters for cmd */
2902 uint32_t len; /* bytes in payload following this struct */
2903 uint8_t payload[0]; /* payload for cmd */
2904};
2905
2906struct msg_update_fw {
2907 /*
2908 * diag_fw = 0 operational fw
2909 * otherwise diagnostic fw
2910 * offset, len, fw_len are present to overcome the current limitation
2911 * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
2912 * specifies the byte "offset" where it fits in the fw buffer. The
2913 * number of bytes in each chunk is specified in "len". "fw_len"
2914 * is the total size of fw. The first chunk should start at offset = 0.
2915 * When offset+len == fw_len, the fw is written to the HBA.
2916 */
2917 uint32_t diag_fw;
2918 uint32_t offset;/* start offset */
2919 uint32_t len; /* num bytes in cur xfer */
2920 uint32_t fw_len; /* size of fw in bytes */
2921 uint8_t fw_bytes[0];
2922};
2923
2924#endif 2801#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3a89bc514e2..c1f1736dcda 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -459,4 +459,12 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
459extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 459extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
460extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 460extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
461 461
462/* BSG related functions */
463extern int qla24xx_bsg_request(struct fc_bsg_job *);
464extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
465extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
466extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
467 dma_addr_t, size_t, uint32_t);
468extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
469 uint16_t *, uint16_t *);
462#endif /* _QLA_GBL_H */ 470#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 42eb7ffd594..7f3bc45d2e2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -711,7 +711,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
711 * Context: 711 * Context:
712 * Kernel context. 712 * Kernel context.
713 */ 713 */
714static int 714int
715qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 715qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
716 dma_addr_t phys_addr, size_t size, uint32_t tov) 716 dma_addr_t phys_addr, size_t size, uint32_t tov)
717{ 717{
@@ -2740,6 +2740,48 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
2740} 2740}
2741 2741
2742int 2742int
2743qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2744 uint16_t *port_speed, uint16_t *mb)
2745{
2746 int rval;
2747 mbx_cmd_t mc;
2748 mbx_cmd_t *mcp = &mc;
2749
2750 if (!IS_IIDMA_CAPABLE(vha->hw))
2751 return QLA_FUNCTION_FAILED;
2752
2753 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2754
2755 mcp->mb[0] = MBC_PORT_PARAMS;
2756 mcp->mb[1] = loop_id;
2757 mcp->mb[2] = mcp->mb[3] = 0;
2758 mcp->mb[9] = vha->vp_idx;
2759 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2760 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2761 mcp->tov = MBX_TOV_SECONDS;
2762 mcp->flags = 0;
2763 rval = qla2x00_mailbox_command(vha, mcp);
2764
2765 /* Return mailbox statuses. */
2766 if (mb != NULL) {
2767 mb[0] = mcp->mb[0];
2768 mb[1] = mcp->mb[1];
2769 mb[3] = mcp->mb[3];
2770 }
2771
2772 if (rval != QLA_SUCCESS) {
2773 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2774 vha->host_no, rval));
2775 } else {
2776 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2777 if (port_speed)
2778 *port_speed = mcp->mb[3];
2779 }
2780
2781 return rval;
2782}
2783
2784int
2743qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 2785qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2744 uint16_t port_speed, uint16_t *mb) 2786 uint16_t port_speed, uint16_t *mb)
2745{ 2787{
@@ -3764,8 +3806,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
3764 return rval; 3806 return rval;
3765} 3807}
3766int 3808int
3767qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic, 3809qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
3768 uint16_t *cmd_status)
3769{ 3810{
3770 int rval; 3811 int rval;
3771 mbx_cmd_t mc; 3812 mbx_cmd_t mc;
@@ -3782,8 +3823,6 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
3782 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 3823 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3783 rval = qla2x00_mailbox_command(ha, mcp); 3824 rval = qla2x00_mailbox_command(ha, mcp);
3784 3825
3785 /* Return mailbox statuses. */
3786 *cmd_status = mcp->mb[0];
3787 if (rval != QLA_SUCCESS) 3826 if (rval != QLA_SUCCESS)
3788 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, 3827 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
3789 rval)); 3828 rval));