aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_bsg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_bsg.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c154
1 files changed, 149 insertions, 5 deletions
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index ad54099cb805..39719f892488 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -30,14 +30,31 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
30 struct scsi_qla_host *vha = sp->fcport->vha; 30 struct scsi_qla_host *vha = sp->fcport->vha;
31 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 31 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
32 struct qla_hw_data *ha = vha->hw; 32 struct qla_hw_data *ha = vha->hw;
33 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
33 34
34 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 35 if (sp->type == SRB_FXIOCB_BCMD) {
35 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 36 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
37 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
36 38
37 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 39 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
38 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 40 dma_unmap_sg(&ha->pdev->dev,
41 bsg_job->request_payload.sg_list,
42 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
43
44 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
45 dma_unmap_sg(&ha->pdev->dev,
46 bsg_job->reply_payload.sg_list,
47 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
48 } else {
49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
50 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
51
52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
54 }
39 55
40 if (sp->type == SRB_CT_CMD || 56 if (sp->type == SRB_CT_CMD ||
57 sp->type == SRB_FXIOCB_BCMD ||
41 sp->type == SRB_ELS_CMD_HST) 58 sp->type == SRB_ELS_CMD_HST)
42 kfree(sp->fcport); 59 kfree(sp->fcport);
43 qla2x00_rel_sp(vha, sp); 60 qla2x00_rel_sp(vha, sp);
@@ -751,6 +768,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
751 elreq.transfer_size = req_data_len; 768 elreq.transfer_size = req_data_len;
752 769
753 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 770 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
771 elreq.iteration_count =
772 bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
754 773
755 if (atomic_read(&vha->loop_state) == LOOP_READY && 774 if (atomic_read(&vha->loop_state) == LOOP_READY &&
756 (ha->current_topology == ISP_CFG_F || 775 (ha->current_topology == ISP_CFG_F ||
@@ -1883,6 +1902,128 @@ done:
1883} 1902}
1884 1903
1885static int 1904static int
1905qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
1906{
1907 struct Scsi_Host *host = bsg_job->shost;
1908 scsi_qla_host_t *vha = shost_priv(host);
1909 struct qla_hw_data *ha = vha->hw;
1910 int rval = (DRIVER_ERROR << 16);
1911 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1912 srb_t *sp;
1913 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1914 struct fc_port *fcport;
1915 char *type = "FC_BSG_HST_FX_MGMT";
1916
1917 /* Copy the IOCB specific information */
1918 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1919 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1920
1921 /* Dump the vendor information */
1922 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1923 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1924
1925 if (!vha->flags.online) {
1926 ql_log(ql_log_warn, vha, 0x70d0,
1927 "Host is not online.\n");
1928 rval = -EIO;
1929 goto done;
1930 }
1931
1932 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1933 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1934 bsg_job->request_payload.sg_list,
1935 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1936 if (!req_sg_cnt) {
1937 ql_log(ql_log_warn, vha, 0x70c7,
1938 "dma_map_sg return %d for request\n", req_sg_cnt);
1939 rval = -ENOMEM;
1940 goto done;
1941 }
1942 }
1943
1944 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1945 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1946 bsg_job->reply_payload.sg_list,
1947 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1948 if (!rsp_sg_cnt) {
1949 ql_log(ql_log_warn, vha, 0x70c8,
1950 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1951 rval = -ENOMEM;
1952 goto done_unmap_req_sg;
1953 }
1954 }
1955
1956 ql_dbg(ql_dbg_user, vha, 0x70c9,
1957 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
1958 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
1959 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1960
1961 /* Allocate a dummy fcport structure, since functions preparing the
1962 * IOCB and mailbox command retrieves port specific information
1963 * from fcport structure. For Host based ELS commands there will be
1964 * no fcport structure allocated
1965 */
1966 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1967 if (!fcport) {
1968 ql_log(ql_log_warn, vha, 0x70ca,
1969 "Failed to allocate fcport.\n");
1970 rval = -ENOMEM;
1971 goto done_unmap_rsp_sg;
1972 }
1973
1974 /* Alloc SRB structure */
1975 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1976 if (!sp) {
1977 ql_log(ql_log_warn, vha, 0x70cb,
1978 "qla2x00_get_sp failed.\n");
1979 rval = -ENOMEM;
1980 goto done_free_fcport;
1981 }
1982
1983 /* Initialize all required fields of fcport */
1984 fcport->vha = vha;
1985 fcport->loop_id = piocb_rqst->dataword;
1986
1987 sp->type = SRB_FXIOCB_BCMD;
1988 sp->name = "bsg_fx_mgmt";
1989 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
1990 sp->u.bsg_job = bsg_job;
1991 sp->free = qla2x00_bsg_sp_free;
1992 sp->done = qla2x00_bsg_job_done;
1993
1994 ql_dbg(ql_dbg_user, vha, 0x70cc,
1995 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
1996 type, piocb_rqst->func_type, fcport->loop_id);
1997
1998 rval = qla2x00_start_sp(sp);
1999 if (rval != QLA_SUCCESS) {
2000 ql_log(ql_log_warn, vha, 0x70cd,
2001 "qla2x00_start_sp failed=%d.\n", rval);
2002 mempool_free(sp, ha->srb_mempool);
2003 rval = -EIO;
2004 goto done_free_fcport;
2005 }
2006 return rval;
2007
2008done_free_fcport:
2009 kfree(fcport);
2010
2011done_unmap_rsp_sg:
2012 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2013 dma_unmap_sg(&ha->pdev->dev,
2014 bsg_job->reply_payload.sg_list,
2015 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2016done_unmap_req_sg:
2017 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2018 dma_unmap_sg(&ha->pdev->dev,
2019 bsg_job->request_payload.sg_list,
2020 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2021
2022done:
2023 return rval;
2024}
2025
2026static int
1886qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 2027qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1887{ 2028{
1888 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 2029 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1928,6 +2069,8 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1928 case QL_VND_DIAG_IO_CMD: 2069 case QL_VND_DIAG_IO_CMD:
1929 return qla24xx_process_bidir_cmd(bsg_job); 2070 return qla24xx_process_bidir_cmd(bsg_job);
1930 2071
2072 case QL_VND_FX00_MGMT_CMD:
2073 return qlafx00_mgmt_cmd(bsg_job);
1931 default: 2074 default:
1932 return -ENOSYS; 2075 return -ENOSYS;
1933 } 2076 }
@@ -2007,7 +2150,8 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2007 sp = req->outstanding_cmds[cnt]; 2150 sp = req->outstanding_cmds[cnt];
2008 if (sp) { 2151 if (sp) {
2009 if (((sp->type == SRB_CT_CMD) || 2152 if (((sp->type == SRB_CT_CMD) ||
2010 (sp->type == SRB_ELS_CMD_HST)) 2153 (sp->type == SRB_ELS_CMD_HST) ||
2154 (sp->type == SRB_FXIOCB_BCMD))
2011 && (sp->u.bsg_job == bsg_job)) { 2155 && (sp->u.bsg_job == bsg_job)) {
2012 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2156 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2013 if (ha->isp_ops->abort_command(sp)) { 2157 if (ha->isp_ops->abort_command(sp)) {