diff options
author | Bart Van Assche <bvanassche@acm.org> | 2014-10-02 09:29:25 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2014-11-12 06:05:23 -0500 |
commit | 77f2c1a40e6fed202d08c8ec0bdca36a76dab368 (patch) | |
tree | ab93e43dea23a96b66f5ba872ac87f3ab61810e6 /drivers/infiniband/ulp/srp | |
parent | 509c07bc18500c3ded1a8e6273ace5002136c9d2 (diff) |
IB/srp: Use block layer tags
Since the block layer already contains functionality to assign
a tag to each request, use that functionality instead of
reimplementing that functionality in the SRP initiator driver.
This change makes the free_reqs list superfluous. Hence remove
that list.
[hch: updated to use .use_blk_tags instead scsi_activate_tcq]
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/infiniband/ulp/srp')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 48 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 3 |
2 files changed, 27 insertions, 24 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index f07a8a614738..42af59f3c8c6 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -821,8 +821,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) | |||
821 | dma_addr_t dma_addr; | 821 | dma_addr_t dma_addr; |
822 | int i, ret = -ENOMEM; | 822 | int i, ret = -ENOMEM; |
823 | 823 | ||
824 | INIT_LIST_HEAD(&ch->free_reqs); | ||
825 | |||
826 | ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), | 824 | ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), |
827 | GFP_KERNEL); | 825 | GFP_KERNEL); |
828 | if (!ch->req_ring) | 826 | if (!ch->req_ring) |
@@ -853,8 +851,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) | |||
853 | goto out; | 851 | goto out; |
854 | 852 | ||
855 | req->indirect_dma_addr = dma_addr; | 853 | req->indirect_dma_addr = dma_addr; |
856 | req->index = i; | ||
857 | list_add_tail(&req->list, &ch->free_reqs); | ||
858 | } | 854 | } |
859 | ret = 0; | 855 | ret = 0; |
860 | 856 | ||
@@ -1076,7 +1072,6 @@ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, | |||
1076 | 1072 | ||
1077 | spin_lock_irqsave(&ch->lock, flags); | 1073 | spin_lock_irqsave(&ch->lock, flags); |
1078 | ch->req_lim += req_lim_delta; | 1074 | ch->req_lim += req_lim_delta; |
1079 | list_add_tail(&req->list, &ch->free_reqs); | ||
1080 | spin_unlock_irqrestore(&ch->lock, flags); | 1075 | spin_unlock_irqrestore(&ch->lock, flags); |
1081 | } | 1076 | } |
1082 | 1077 | ||
@@ -1648,8 +1643,11 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) | |||
1648 | ch->tsk_mgmt_status = rsp->data[3]; | 1643 | ch->tsk_mgmt_status = rsp->data[3]; |
1649 | complete(&ch->tsk_mgmt_done); | 1644 | complete(&ch->tsk_mgmt_done); |
1650 | } else { | 1645 | } else { |
1651 | req = &ch->req_ring[rsp->tag]; | 1646 | scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); |
1652 | scmnd = srp_claim_req(ch, req, NULL, NULL); | 1647 | if (scmnd) { |
1648 | req = (void *)scmnd->host_scribble; | ||
1649 | scmnd = srp_claim_req(ch, req, NULL, scmnd); | ||
1650 | } | ||
1653 | if (!scmnd) { | 1651 | if (!scmnd) { |
1654 | shost_printk(KERN_ERR, target->scsi_host, | 1652 | shost_printk(KERN_ERR, target->scsi_host, |
1655 | "Null scmnd for RSP w/tag %016llx\n", | 1653 | "Null scmnd for RSP w/tag %016llx\n", |
@@ -1889,6 +1887,8 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1889 | struct srp_cmd *cmd; | 1887 | struct srp_cmd *cmd; |
1890 | struct ib_device *dev; | 1888 | struct ib_device *dev; |
1891 | unsigned long flags; | 1889 | unsigned long flags; |
1890 | u32 tag; | ||
1891 | u16 idx; | ||
1892 | int len, ret; | 1892 | int len, ret; |
1893 | const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; | 1893 | const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; |
1894 | 1894 | ||
@@ -1905,17 +1905,22 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1905 | if (unlikely(scmnd->result)) | 1905 | if (unlikely(scmnd->result)) |
1906 | goto err; | 1906 | goto err; |
1907 | 1907 | ||
1908 | WARN_ON_ONCE(scmnd->request->tag < 0); | ||
1909 | tag = blk_mq_unique_tag(scmnd->request); | ||
1908 | ch = &target->ch; | 1910 | ch = &target->ch; |
1911 | idx = blk_mq_unique_tag_to_tag(tag); | ||
1912 | WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", | ||
1913 | dev_name(&shost->shost_gendev), tag, idx, | ||
1914 | target->req_ring_size); | ||
1909 | 1915 | ||
1910 | spin_lock_irqsave(&ch->lock, flags); | 1916 | spin_lock_irqsave(&ch->lock, flags); |
1911 | iu = __srp_get_tx_iu(ch, SRP_IU_CMD); | 1917 | iu = __srp_get_tx_iu(ch, SRP_IU_CMD); |
1912 | if (!iu) | ||
1913 | goto err_unlock; | ||
1914 | |||
1915 | req = list_first_entry(&ch->free_reqs, struct srp_request, list); | ||
1916 | list_del(&req->list); | ||
1917 | spin_unlock_irqrestore(&ch->lock, flags); | 1918 | spin_unlock_irqrestore(&ch->lock, flags); |
1918 | 1919 | ||
1920 | if (!iu) | ||
1921 | goto err; | ||
1922 | |||
1923 | req = &ch->req_ring[idx]; | ||
1919 | dev = target->srp_host->srp_dev->dev; | 1924 | dev = target->srp_host->srp_dev->dev; |
1920 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, | 1925 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, |
1921 | DMA_TO_DEVICE); | 1926 | DMA_TO_DEVICE); |
@@ -1927,7 +1932,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1927 | 1932 | ||
1928 | cmd->opcode = SRP_CMD; | 1933 | cmd->opcode = SRP_CMD; |
1929 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | 1934 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); |
1930 | cmd->tag = req->index; | 1935 | cmd->tag = tag; |
1931 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); | 1936 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); |
1932 | 1937 | ||
1933 | req->scmnd = scmnd; | 1938 | req->scmnd = scmnd; |
@@ -1976,12 +1981,6 @@ err_iu: | |||
1976 | */ | 1981 | */ |
1977 | req->scmnd = NULL; | 1982 | req->scmnd = NULL; |
1978 | 1983 | ||
1979 | spin_lock_irqsave(&ch->lock, flags); | ||
1980 | list_add(&req->list, &ch->free_reqs); | ||
1981 | |||
1982 | err_unlock: | ||
1983 | spin_unlock_irqrestore(&ch->lock, flags); | ||
1984 | |||
1985 | err: | 1984 | err: |
1986 | if (scmnd->result) { | 1985 | if (scmnd->result) { |
1987 | scmnd->scsi_done(scmnd); | 1986 | scmnd->scsi_done(scmnd); |
@@ -2387,6 +2386,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
2387 | { | 2386 | { |
2388 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 2387 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
2389 | struct srp_request *req = (struct srp_request *) scmnd->host_scribble; | 2388 | struct srp_request *req = (struct srp_request *) scmnd->host_scribble; |
2389 | u32 tag; | ||
2390 | struct srp_rdma_ch *ch; | 2390 | struct srp_rdma_ch *ch; |
2391 | int ret; | 2391 | int ret; |
2392 | 2392 | ||
@@ -2395,7 +2395,8 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
2395 | ch = &target->ch; | 2395 | ch = &target->ch; |
2396 | if (!req || !srp_claim_req(ch, req, NULL, scmnd)) | 2396 | if (!req || !srp_claim_req(ch, req, NULL, scmnd)) |
2397 | return SUCCESS; | 2397 | return SUCCESS; |
2398 | if (srp_send_tsk_mgmt(ch, req->index, scmnd->device->lun, | 2398 | tag = blk_mq_unique_tag(scmnd->request); |
2399 | if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, | ||
2399 | SRP_TSK_ABORT_TASK) == 0) | 2400 | SRP_TSK_ABORT_TASK) == 0) |
2400 | ret = SUCCESS; | 2401 | ret = SUCCESS; |
2401 | else if (target->rport->state == SRP_RPORT_LOST) | 2402 | else if (target->rport->state == SRP_RPORT_LOST) |
@@ -2633,7 +2634,8 @@ static struct scsi_host_template srp_template = { | |||
2633 | .this_id = -1, | 2634 | .this_id = -1, |
2634 | .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, | 2635 | .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, |
2635 | .use_clustering = ENABLE_CLUSTERING, | 2636 | .use_clustering = ENABLE_CLUSTERING, |
2636 | .shost_attrs = srp_host_attrs | 2637 | .shost_attrs = srp_host_attrs, |
2638 | .use_blk_tags = 1, | ||
2637 | }; | 2639 | }; |
2638 | 2640 | ||
2639 | static int srp_sdev_count(struct Scsi_Host *host) | 2641 | static int srp_sdev_count(struct Scsi_Host *host) |
@@ -3054,6 +3056,10 @@ static ssize_t srp_create_target(struct device *dev, | |||
3054 | if (ret) | 3056 | if (ret) |
3055 | goto err; | 3057 | goto err; |
3056 | 3058 | ||
3059 | ret = scsi_init_shared_tag_map(target_host, target_host->can_queue); | ||
3060 | if (ret) | ||
3061 | goto err; | ||
3062 | |||
3057 | target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; | 3063 | target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; |
3058 | 3064 | ||
3059 | if (!srp_conn_unique(target->srp_host, target)) { | 3065 | if (!srp_conn_unique(target->srp_host, target)) { |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 74530d9e6391..37aa9f49947a 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -116,7 +116,6 @@ struct srp_host { | |||
116 | }; | 116 | }; |
117 | 117 | ||
118 | struct srp_request { | 118 | struct srp_request { |
119 | struct list_head list; | ||
120 | struct scsi_cmnd *scmnd; | 119 | struct scsi_cmnd *scmnd; |
121 | struct srp_iu *cmd; | 120 | struct srp_iu *cmd; |
122 | union { | 121 | union { |
@@ -127,7 +126,6 @@ struct srp_request { | |||
127 | struct srp_direct_buf *indirect_desc; | 126 | struct srp_direct_buf *indirect_desc; |
128 | dma_addr_t indirect_dma_addr; | 127 | dma_addr_t indirect_dma_addr; |
129 | short nmdesc; | 128 | short nmdesc; |
130 | short index; | ||
131 | }; | 129 | }; |
132 | 130 | ||
133 | /** | 131 | /** |
@@ -137,7 +135,6 @@ struct srp_request { | |||
137 | struct srp_rdma_ch { | 135 | struct srp_rdma_ch { |
138 | /* These are RW in the hot path, and commonly used together */ | 136 | /* These are RW in the hot path, and commonly used together */ |
139 | struct list_head free_tx; | 137 | struct list_head free_tx; |
140 | struct list_head free_reqs; | ||
141 | spinlock_t lock; | 138 | spinlock_t lock; |
142 | s32 req_lim; | 139 | s32 req_lim; |
143 | 140 | ||