aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_attr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_attr.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c772
1 files changed, 58 insertions, 714 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c7ef55966fb..3b708606b932 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -12,9 +12,7 @@
12#include <linux/delay.h> 12#include <linux/delay.h>
13 13
14static int qla24xx_vport_disable(struct fc_vport *, bool); 14static int qla24xx_vport_disable(struct fc_vport *, bool);
15static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *); 15
16int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
17static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
18/* SYSFS attributes --------------------------------------------------------- */ 16/* SYSFS attributes --------------------------------------------------------- */
19 17
20static ssize_t 18static ssize_t
@@ -43,6 +41,12 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
43 struct qla_hw_data *ha = vha->hw; 41 struct qla_hw_data *ha = vha->hw;
44 int reading; 42 int reading;
45 43
44 if (IS_QLA82XX(ha)) {
45 DEBUG2(qla_printk(KERN_INFO, ha,
46 "Firmware dump not supported for ISP82xx\n"));
47 return count;
48 }
49
46 if (off != 0) 50 if (off != 0)
47 return (0); 51 return (0);
48 52
@@ -277,6 +281,12 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
277 return count; 281 return count;
278 } 282 }
279 283
284 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
285 qla_printk(KERN_WARNING, ha,
286 "HBA not online, failing NVRAM update.\n");
287 return -EAGAIN;
288 }
289
280 DEBUG2(qla_printk(KERN_INFO, ha, 290 DEBUG2(qla_printk(KERN_INFO, ha,
281 "Reading flash region -- 0x%x/0x%x.\n", 291 "Reading flash region -- 0x%x/0x%x.\n",
282 ha->optrom_region_start, ha->optrom_region_size)); 292 ha->optrom_region_start, ha->optrom_region_size));
@@ -315,8 +325,8 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
315 else if (start == (ha->flt_region_boot * 4) || 325 else if (start == (ha->flt_region_boot * 4) ||
316 start == (ha->flt_region_fw * 4)) 326 start == (ha->flt_region_fw * 4))
317 valid = 1; 327 valid = 1;
318 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) 328 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
319 valid = 1; 329 valid = 1;
320 if (!valid) { 330 if (!valid) {
321 qla_printk(KERN_WARNING, ha, 331 qla_printk(KERN_WARNING, ha,
322 "Invalid start region 0x%x/0x%x.\n", start, size); 332 "Invalid start region 0x%x/0x%x.\n", start, size);
@@ -519,6 +529,7 @@ qla2x00_sysfs_write_reset(struct kobject *kobj,
519 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 529 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
520 struct device, kobj))); 530 struct device, kobj)));
521 struct qla_hw_data *ha = vha->hw; 531 struct qla_hw_data *ha = vha->hw;
532 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
522 int type; 533 int type;
523 534
524 if (off != 0) 535 if (off != 0)
@@ -553,6 +564,20 @@ qla2x00_sysfs_write_reset(struct kobject *kobj,
553 "MPI reset failed on (%ld).\n", vha->host_no); 564 "MPI reset failed on (%ld).\n", vha->host_no);
554 scsi_unblock_requests(vha->host); 565 scsi_unblock_requests(vha->host);
555 break; 566 break;
567 case 0x2025e:
568 if (!IS_QLA82XX(ha) || vha != base_vha) {
569 qla_printk(KERN_INFO, ha,
570 "FCoE ctx reset not supported for host%ld.\n",
571 vha->host_no);
572 return count;
573 }
574
575 qla_printk(KERN_INFO, ha,
576 "Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
578 qla2xxx_wake_dpc(vha);
579 qla2x00_wait_for_fcoe_ctx_reset(vha);
580 break;
556 } 581 }
557 return count; 582 return count;
558} 583}
@@ -838,7 +863,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
838 continue; 863 continue;
839 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 864 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
840 continue; 865 continue;
841 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw)) 866 if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
842 continue; 867 continue;
843 868
844 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 869 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -862,7 +887,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
862 continue; 887 continue;
863 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 888 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
864 continue; 889 continue;
865 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha)) 890 if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
866 continue; 891 continue;
867 892
868 sysfs_remove_bin_file(&host->shost_gendev.kobj, 893 sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -968,7 +993,8 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
968 int len = 0; 993 int len = 0;
969 994
970 if (atomic_read(&vha->loop_state) == LOOP_DOWN || 995 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
971 atomic_read(&vha->loop_state) == LOOP_DEAD) 996 atomic_read(&vha->loop_state) == LOOP_DEAD ||
997 vha->device_flags & DFLG_NO_CABLE)
972 len = snprintf(buf, PAGE_SIZE, "Link Down\n"); 998 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
973 else if (atomic_read(&vha->loop_state) != LOOP_READY || 999 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
974 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1000 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
@@ -1179,15 +1205,15 @@ qla24xx_84xx_fw_version_show(struct device *dev,
1179 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1205 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1180 struct qla_hw_data *ha = vha->hw; 1206 struct qla_hw_data *ha = vha->hw;
1181 1207
1182 if (IS_QLA84XX(ha) && ha->cs84xx) { 1208 if (!IS_QLA84XX(ha))
1183 if (ha->cs84xx->op_fw_version == 0) { 1209 return snprintf(buf, PAGE_SIZE, "\n");
1184 rval = qla84xx_verify_chip(vha, status); 1210
1185 } 1211 if (ha->cs84xx && ha->cs84xx->op_fw_version == 0)
1212 rval = qla84xx_verify_chip(vha, status);
1186 1213
1187 if ((rval == QLA_SUCCESS) && (status[0] == 0)) 1214 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1188 return snprintf(buf, PAGE_SIZE, "%u\n", 1215 return snprintf(buf, PAGE_SIZE, "%u\n",
1189 (uint32_t)ha->cs84xx->op_fw_version); 1216 (uint32_t)ha->cs84xx->op_fw_version);
1190 }
1191 1217
1192 return snprintf(buf, PAGE_SIZE, "\n"); 1218 return snprintf(buf, PAGE_SIZE, "\n");
1193} 1219}
@@ -1237,7 +1263,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1237{ 1263{
1238 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1264 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1239 1265
1240 if (!IS_QLA81XX(vha->hw)) 1266 if (!IS_QLA8XXX_TYPE(vha->hw))
1241 return snprintf(buf, PAGE_SIZE, "\n"); 1267 return snprintf(buf, PAGE_SIZE, "\n");
1242 1268
1243 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1269 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1249,7 +1275,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
1249{ 1275{
1250 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1276 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1251 1277
1252 if (!IS_QLA81XX(vha->hw)) 1278 if (!IS_QLA8XXX_TYPE(vha->hw))
1253 return snprintf(buf, PAGE_SIZE, "\n"); 1279 return snprintf(buf, PAGE_SIZE, "\n");
1254 1280
1255 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", 1281 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1706,6 +1732,22 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1706 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1732 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1707 } 1733 }
1708 1734
1735 if (IS_QLA25XX(ha) && ql2xenabledif) {
1736 if (ha->fw_attributes & BIT_4) {
1737 vha->flags.difdix_supported = 1;
1738 DEBUG18(qla_printk(KERN_INFO, ha,
1739 "Registering for DIF/DIX type 1 and 3"
1740 " protection.\n"));
1741 scsi_host_set_prot(vha->host,
1742 SHOST_DIF_TYPE1_PROTECTION
1743 | SHOST_DIF_TYPE3_PROTECTION
1744 | SHOST_DIX_TYPE1_PROTECTION
1745 | SHOST_DIX_TYPE3_PROTECTION);
1746 scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
1747 } else
1748 vha->flags.difdix_supported = 0;
1749 }
1750
1709 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1751 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1710 &ha->pdev->dev)) { 1752 &ha->pdev->dev)) {
1711 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1753 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
@@ -1825,582 +1867,6 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1825 return 0; 1867 return 0;
1826} 1868}
1827 1869
1828/* BSG support for ELS/CT pass through */
1829inline srb_t *
1830qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1831{
1832 srb_t *sp;
1833 struct qla_hw_data *ha = vha->hw;
1834 struct srb_bsg_ctx *ctx;
1835
1836 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1837 if (!sp)
1838 goto done;
1839 ctx = kzalloc(size, GFP_KERNEL);
1840 if (!ctx) {
1841 mempool_free(sp, ha->srb_mempool);
1842 goto done;
1843 }
1844
1845 memset(sp, 0, sizeof(*sp));
1846 sp->fcport = fcport;
1847 sp->ctx = ctx;
1848done:
1849 return sp;
1850}
1851
1852static int
1853qla2x00_process_els(struct fc_bsg_job *bsg_job)
1854{
1855 struct fc_rport *rport;
1856 fc_port_t *fcport;
1857 struct Scsi_Host *host;
1858 scsi_qla_host_t *vha;
1859 struct qla_hw_data *ha;
1860 srb_t *sp;
1861 const char *type;
1862 int req_sg_cnt, rsp_sg_cnt;
1863 int rval = (DRIVER_ERROR << 16);
1864 uint16_t nextlid = 0;
1865 struct srb_bsg *els;
1866
1867 /* Multiple SG's are not supported for ELS requests */
1868 if (bsg_job->request_payload.sg_cnt > 1 ||
1869 bsg_job->reply_payload.sg_cnt > 1) {
1870 DEBUG2(printk(KERN_INFO
1871 "multiple SG's are not supported for ELS requests"
1872 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1873 bsg_job->request_payload.sg_cnt,
1874 bsg_job->reply_payload.sg_cnt));
1875 rval = -EPERM;
1876 goto done;
1877 }
1878
1879 /* ELS request for rport */
1880 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1881 rport = bsg_job->rport;
1882 fcport = *(fc_port_t **) rport->dd_data;
1883 host = rport_to_shost(rport);
1884 vha = shost_priv(host);
1885 ha = vha->hw;
1886 type = "FC_BSG_RPT_ELS";
1887
1888 /* make sure the rport is logged in,
1889 * if not perform fabric login
1890 */
1891 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1892 DEBUG2(qla_printk(KERN_WARNING, ha,
1893 "failed to login port %06X for ELS passthru\n",
1894 fcport->d_id.b24));
1895 rval = -EIO;
1896 goto done;
1897 }
1898 } else {
1899 host = bsg_job->shost;
1900 vha = shost_priv(host);
1901 ha = vha->hw;
1902 type = "FC_BSG_HST_ELS_NOLOGIN";
1903
1904 /* Allocate a dummy fcport structure, since functions
1905 * preparing the IOCB and mailbox command retrieves port
1906 * specific information from fcport structure. For Host based
1907 * ELS commands there will be no fcport structure allocated
1908 */
1909 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1910 if (!fcport) {
1911 rval = -ENOMEM;
1912 goto done;
1913 }
1914
1915 /* Initialize all required fields of fcport */
1916 fcport->vha = vha;
1917 fcport->vp_idx = vha->vp_idx;
1918 fcport->d_id.b.al_pa =
1919 bsg_job->request->rqst_data.h_els.port_id[0];
1920 fcport->d_id.b.area =
1921 bsg_job->request->rqst_data.h_els.port_id[1];
1922 fcport->d_id.b.domain =
1923 bsg_job->request->rqst_data.h_els.port_id[2];
1924 fcport->loop_id =
1925 (fcport->d_id.b.al_pa == 0xFD) ?
1926 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1927 }
1928
1929 if (!vha->flags.online) {
1930 DEBUG2(qla_printk(KERN_WARNING, ha,
1931 "host not online\n"));
1932 rval = -EIO;
1933 goto done;
1934 }
1935
1936 req_sg_cnt =
1937 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1938 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1939 if (!req_sg_cnt) {
1940 rval = -ENOMEM;
1941 goto done_free_fcport;
1942 }
1943 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1944 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1945 if (!rsp_sg_cnt) {
1946 rval = -ENOMEM;
1947 goto done_free_fcport;
1948 }
1949
1950 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1951 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1952 {
1953 DEBUG2(printk(KERN_INFO
1954 "dma mapping resulted in different sg counts \
1955 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1956 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1957 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1958 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1959 rval = -EAGAIN;
1960 goto done_unmap_sg;
1961 }
1962
1963 /* Alloc SRB structure */
1964 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1965 if (!sp) {
1966 rval = -ENOMEM;
1967 goto done_unmap_sg;
1968 }
1969
1970 els = sp->ctx;
1971 els->ctx.type =
1972 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1973 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1974 els->bsg_job = bsg_job;
1975
1976 DEBUG2(qla_printk(KERN_INFO, ha,
1977 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1978 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1979 bsg_job->request->rqst_data.h_els.command_code,
1980 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1981 fcport->d_id.b.al_pa));
1982
1983 rval = qla2x00_start_sp(sp);
1984 if (rval != QLA_SUCCESS) {
1985 kfree(sp->ctx);
1986 mempool_free(sp, ha->srb_mempool);
1987 rval = -EIO;
1988 goto done_unmap_sg;
1989 }
1990 return rval;
1991
1992done_unmap_sg:
1993 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1994 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1995 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1996 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1997 goto done_free_fcport;
1998
1999done_free_fcport:
2000 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
2001 kfree(fcport);
2002done:
2003 return rval;
2004}
2005
2006static int
2007qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2008{
2009 srb_t *sp;
2010 struct Scsi_Host *host = bsg_job->shost;
2011 scsi_qla_host_t *vha = shost_priv(host);
2012 struct qla_hw_data *ha = vha->hw;
2013 int rval = (DRIVER_ERROR << 16);
2014 int req_sg_cnt, rsp_sg_cnt;
2015 uint16_t loop_id;
2016 struct fc_port *fcport;
2017 char *type = "FC_BSG_HST_CT";
2018 struct srb_bsg *ct;
2019
2020 /* pass through is supported only for ISP 4Gb or higher */
2021 if (!IS_FWI2_CAPABLE(ha)) {
2022 DEBUG2(qla_printk(KERN_INFO, ha,
2023 "scsi(%ld):Firmware is not capable to support FC "
2024 "CT pass thru\n", vha->host_no));
2025 rval = -EPERM;
2026 goto done;
2027 }
2028
2029 req_sg_cnt =
2030 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2031 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2032 if (!req_sg_cnt) {
2033 rval = -ENOMEM;
2034 goto done;
2035 }
2036
2037 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2038 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2039 if (!rsp_sg_cnt) {
2040 rval = -ENOMEM;
2041 goto done;
2042 }
2043
2044 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2045 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2046 {
2047 DEBUG2(qla_printk(KERN_WARNING, ha,
2048 "dma mapping resulted in different sg counts \
2049 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2050 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2051 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2052 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2053 rval = -EAGAIN;
2054 goto done_unmap_sg;
2055 }
2056
2057 if (!vha->flags.online) {
2058 DEBUG2(qla_printk(KERN_WARNING, ha,
2059 "host not online\n"));
2060 rval = -EIO;
2061 goto done_unmap_sg;
2062 }
2063
2064 loop_id =
2065 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2066 >> 24;
2067 switch (loop_id) {
2068 case 0xFC:
2069 loop_id = cpu_to_le16(NPH_SNS);
2070 break;
2071 case 0xFA:
2072 loop_id = vha->mgmt_svr_loop_id;
2073 break;
2074 default:
2075 DEBUG2(qla_printk(KERN_INFO, ha,
2076 "Unknown loop id: %x\n", loop_id));
2077 rval = -EINVAL;
2078 goto done_unmap_sg;
2079 }
2080
2081 /* Allocate a dummy fcport structure, since functions preparing the
2082 * IOCB and mailbox command retrieves port specific information
2083 * from fcport structure. For Host based ELS commands there will be
2084 * no fcport structure allocated
2085 */
2086 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2087 if (!fcport)
2088 {
2089 rval = -ENOMEM;
2090 goto done_unmap_sg;
2091 }
2092
2093 /* Initialize all required fields of fcport */
2094 fcport->vha = vha;
2095 fcport->vp_idx = vha->vp_idx;
2096 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2097 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2098 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2099 fcport->loop_id = loop_id;
2100
2101 /* Alloc SRB structure */
2102 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2103 if (!sp) {
2104 rval = -ENOMEM;
2105 goto done_free_fcport;
2106 }
2107
2108 ct = sp->ctx;
2109 ct->ctx.type = SRB_CT_CMD;
2110 ct->bsg_job = bsg_job;
2111
2112 DEBUG2(qla_printk(KERN_INFO, ha,
2113 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2114 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2115 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2116 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2117 fcport->d_id.b.al_pa));
2118
2119 rval = qla2x00_start_sp(sp);
2120 if (rval != QLA_SUCCESS) {
2121 kfree(sp->ctx);
2122 mempool_free(sp, ha->srb_mempool);
2123 rval = -EIO;
2124 goto done_free_fcport;
2125 }
2126 return rval;
2127
2128done_free_fcport:
2129 kfree(fcport);
2130done_unmap_sg:
2131 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2132 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2133 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2134 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2135done:
2136 return rval;
2137}
2138
2139static int
2140qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2141{
2142 struct Scsi_Host *host = bsg_job->shost;
2143 scsi_qla_host_t *vha = shost_priv(host);
2144 struct qla_hw_data *ha = vha->hw;
2145 int rval;
2146 uint8_t command_sent;
2147 uint32_t vendor_cmd;
2148 char *type;
2149 struct msg_echo_lb elreq;
2150 uint16_t response[MAILBOX_REGISTER_COUNT];
2151 uint8_t* fw_sts_ptr;
2152 uint8_t *req_data;
2153 dma_addr_t req_data_dma;
2154 uint32_t req_data_len;
2155 uint8_t *rsp_data;
2156 dma_addr_t rsp_data_dma;
2157 uint32_t rsp_data_len;
2158
2159 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2160 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2161 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2162 rval = -EBUSY;
2163 goto done;
2164 }
2165
2166 if (!vha->flags.online) {
2167 DEBUG2(qla_printk(KERN_WARNING, ha,
2168 "host not online\n"));
2169 rval = -EIO;
2170 goto done;
2171 }
2172
2173 elreq.req_sg_cnt =
2174 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2175 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2176 if (!elreq.req_sg_cnt) {
2177 rval = -ENOMEM;
2178 goto done;
2179 }
2180 elreq.rsp_sg_cnt =
2181 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2182 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2183 if (!elreq.rsp_sg_cnt) {
2184 rval = -ENOMEM;
2185 goto done;
2186 }
2187
2188 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2189 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2190 {
2191 DEBUG2(printk(KERN_INFO
2192 "dma mapping resulted in different sg counts \
2193 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2194 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2195 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2196 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2197 rval = -EAGAIN;
2198 goto done_unmap_sg;
2199 }
2200 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2201 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2202 &req_data_dma, GFP_KERNEL);
2203
2204 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2205 &rsp_data_dma, GFP_KERNEL);
2206
2207 /* Copy the request buffer in req_data now */
2208 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2209 bsg_job->request_payload.sg_cnt, req_data,
2210 req_data_len);
2211
2212 elreq.send_dma = req_data_dma;
2213 elreq.rcv_dma = rsp_data_dma;
2214 elreq.transfer_size = req_data_len;
2215
2216 /* Vendor cmd : loopback or ECHO diagnostic
2217 * Options:
2218 * Loopback : Either internal or external loopback
2219 * ECHO: ECHO ELS or Vendor specific FC4 link data
2220 */
2221 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2222 elreq.options =
2223 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2224 + 1);
2225
2226 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2227 case QL_VND_LOOPBACK:
2228 if (ha->current_topology != ISP_CFG_F) {
2229 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2230
2231 DEBUG2(qla_printk(KERN_INFO, ha,
2232 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2233 vha->host_no, type, vendor_cmd, elreq.options));
2234
2235 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2236 rval = qla2x00_loopback_test(vha, &elreq, response);
2237 if (IS_QLA81XX(ha)) {
2238 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2239 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2240 "ISP\n", __func__, vha->host_no));
2241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2242 qla2xxx_wake_dpc(vha);
2243 }
2244 }
2245 } else {
2246 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2247 DEBUG2(qla_printk(KERN_INFO, ha,
2248 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2249 vha->host_no, type, vendor_cmd, elreq.options));
2250
2251 command_sent = INT_DEF_LB_ECHO_CMD;
2252 rval = qla2x00_echo_test(vha, &elreq, response);
2253 }
2254 break;
2255 case QLA84_RESET:
2256 if (!IS_QLA84XX(vha->hw)) {
2257 rval = -EINVAL;
2258 DEBUG16(printk(
2259 "%s(%ld): 8xxx exiting.\n",
2260 __func__, vha->host_no));
2261 return rval;
2262 }
2263 rval = qla84xx_reset(vha, &elreq, bsg_job);
2264 break;
2265 case QLA84_MGMT_CMD:
2266 if (!IS_QLA84XX(vha->hw)) {
2267 rval = -EINVAL;
2268 DEBUG16(printk(
2269 "%s(%ld): 8xxx exiting.\n",
2270 __func__, vha->host_no));
2271 return rval;
2272 }
2273 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2274 break;
2275 default:
2276 rval = -ENOSYS;
2277 }
2278
2279 if (rval != QLA_SUCCESS) {
2280 DEBUG2(qla_printk(KERN_WARNING, ha,
2281 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2282 rval = 0;
2283 bsg_job->reply->result = (DID_ERROR << 16);
2284 bsg_job->reply->reply_payload_rcv_len = 0;
2285 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2286 memcpy( fw_sts_ptr, response, sizeof(response));
2287 fw_sts_ptr += sizeof(response);
2288 *fw_sts_ptr = command_sent;
2289 } else {
2290 DEBUG2(qla_printk(KERN_WARNING, ha,
2291 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2292 rval = bsg_job->reply->result = 0;
2293 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2294 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2295 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2296 memcpy(fw_sts_ptr, response, sizeof(response));
2297 fw_sts_ptr += sizeof(response);
2298 *fw_sts_ptr = command_sent;
2299 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2300 bsg_job->reply_payload.sg_cnt, rsp_data,
2301 rsp_data_len);
2302 }
2303 bsg_job->job_done(bsg_job);
2304
2305done_unmap_sg:
2306
2307 if(req_data)
2308 dma_free_coherent(&ha->pdev->dev, req_data_len,
2309 req_data, req_data_dma);
2310 dma_unmap_sg(&ha->pdev->dev,
2311 bsg_job->request_payload.sg_list,
2312 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2313 dma_unmap_sg(&ha->pdev->dev,
2314 bsg_job->reply_payload.sg_list,
2315 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2316
2317done:
2318 return rval;
2319}
2320
2321static int
2322qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2323{
2324 int ret = -EINVAL;
2325
2326 switch (bsg_job->request->msgcode) {
2327 case FC_BSG_RPT_ELS:
2328 case FC_BSG_HST_ELS_NOLOGIN:
2329 ret = qla2x00_process_els(bsg_job);
2330 break;
2331 case FC_BSG_HST_CT:
2332 ret = qla2x00_process_ct(bsg_job);
2333 break;
2334 case FC_BSG_HST_VENDOR:
2335 ret = qla2x00_process_vendor_specific(bsg_job);
2336 break;
2337 case FC_BSG_HST_ADD_RPORT:
2338 case FC_BSG_HST_DEL_RPORT:
2339 case FC_BSG_RPT_CT:
2340 default:
2341 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2342 break;
2343 }
2344 return ret;
2345}
2346
2347static int
2348qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2349{
2350 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2351 struct qla_hw_data *ha = vha->hw;
2352 srb_t *sp;
2353 int cnt, que;
2354 unsigned long flags;
2355 struct req_que *req;
2356 struct srb_bsg *sp_bsg;
2357
2358 /* find the bsg job from the active list of commands */
2359 spin_lock_irqsave(&ha->hardware_lock, flags);
2360 for (que = 0; que < ha->max_req_queues; que++) {
2361 req = ha->req_q_map[que];
2362 if (!req)
2363 continue;
2364
2365 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2366 sp = req->outstanding_cmds[cnt];
2367
2368 if (sp) {
2369 sp_bsg = (struct srb_bsg*)sp->ctx;
2370
2371 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2372 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2373 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2374 (sp_bsg->bsg_job == bsg_job)) {
2375 if (ha->isp_ops->abort_command(sp)) {
2376 DEBUG2(qla_printk(KERN_INFO, ha,
2377 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2378 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2379 } else {
2380 DEBUG2(qla_printk(KERN_INFO, ha,
2381 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2382 bsg_job->req->errors = bsg_job->reply->result = 0;
2383 }
2384 goto done;
2385 }
2386 }
2387 }
2388 }
2389 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2390 DEBUG2(qla_printk(KERN_INFO, ha,
2391 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2392 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2393 return 0;
2394
2395done:
2396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2397 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2398 kfree(sp->fcport);
2399 kfree(sp->ctx);
2400 mempool_free(sp, ha->srb_mempool);
2401 return 0;
2402}
2403
2404struct fc_function_template qla2xxx_transport_functions = { 1870struct fc_function_template qla2xxx_transport_functions = {
2405 1871
2406 .show_host_node_name = 1, 1872 .show_host_node_name = 1,
@@ -2502,7 +1968,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2502 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1968 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2503 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1969 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2504 1970
2505 if (IS_QLA81XX(ha)) 1971 if (IS_QLA8XXX_TYPE(ha))
2506 speed = FC_PORTSPEED_10GBIT; 1972 speed = FC_PORTSPEED_10GBIT;
2507 else if (IS_QLA25XX(ha)) 1973 else if (IS_QLA25XX(ha))
2508 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1974 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -2516,125 +1982,3 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2516 speed = FC_PORTSPEED_1GBIT; 1982 speed = FC_PORTSPEED_1GBIT;
2517 fc_host_supported_speeds(vha->host) = speed; 1983 fc_host_supported_speeds(vha->host) = speed;
2518} 1984}
2519static int
2520qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2521{
2522 int ret = 0;
2523 int cmd;
2524 uint16_t cmd_status;
2525
2526 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2527
2528 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2529 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2530 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2531 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2532 &cmd_status);
2533 return ret;
2534}
2535
2536static int
2537qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2538{
2539 struct access_chip_84xx *mn;
2540 dma_addr_t mn_dma, mgmt_dma;
2541 void *mgmt_b = NULL;
2542 int ret = 0;
2543 int rsp_hdr_len, len = 0;
2544 struct qla84_msg_mgmt *ql84_mgmt;
2545
2546 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2547 ql84_mgmt->cmd =
2548 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2549 ql84_mgmt->mgmtp.u.mem.start_addr =
2550 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2551 ql84_mgmt->len =
2552 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2553 ql84_mgmt->mgmtp.u.config.id =
2554 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2555 ql84_mgmt->mgmtp.u.config.param0 =
2556 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2557 ql84_mgmt->mgmtp.u.config.param1 =
2558 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2559 ql84_mgmt->mgmtp.u.info.type =
2560 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2561 ql84_mgmt->mgmtp.u.info.context =
2562 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2563
2564 rsp_hdr_len = bsg_job->request_payload.payload_len;
2565
2566 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2567 if (mn == NULL) {
2568 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2569 "failed%lu\n", __func__, ha->host_no));
2570 return -ENOMEM;
2571 }
2572
2573 memset(mn, 0, sizeof (struct access_chip_84xx));
2574
2575 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2576 mn->entry_count = 1;
2577
2578 switch (ql84_mgmt->cmd) {
2579 case QLA84_MGMT_READ_MEM:
2580 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2581 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2582 break;
2583 case QLA84_MGMT_WRITE_MEM:
2584 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2585 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2586 break;
2587 case QLA84_MGMT_CHNG_CONFIG:
2588 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2589 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2590 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2591 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2592 break;
2593 case QLA84_MGMT_GET_INFO:
2594 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2595 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2596 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2597 break;
2598 default:
2599 ret = -EIO;
2600 goto exit_mgmt0;
2601 }
2602
2603 if ((len == ql84_mgmt->len) &&
2604 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2605 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2606 &mgmt_dma, GFP_KERNEL);
2607 if (mgmt_b == NULL) {
2608 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2609 "failed%lu\n", __func__, ha->host_no));
2610 ret = -ENOMEM;
2611 goto exit_mgmt0;
2612 }
2613 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2614 mn->dseg_count = cpu_to_le16(1);
2615 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2616 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2617 mn->dseg_length = cpu_to_le32(len);
2618
2619 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2620 memcpy(mgmt_b, ql84_mgmt->payload, len);
2621 }
2622 }
2623
2624 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2625 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2626 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2627 if (ret != QLA_SUCCESS)
2628 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2629 __func__, ha->host_no));
2630 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2631 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2632 }
2633
2634 if (mgmt_b)
2635 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2636
2637exit_mgmt0:
2638 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2639 return ret;
2640}