diff options
author | Bart Van Assche <bart.vanassche@wdc.com> | 2018-01-05 11:26:49 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-01-06 11:18:00 -0500 |
commit | 68c6e9cd2fa4f0109364834475628b4b1dd12257 (patch) | |
tree | 36fbac99cef67b28e757ae16e2efd85283f4be3d | |
parent | 4442b56fb5151e9a7e21c0f73aba5a071f559dce (diff) |
nvmet/rdma: Use sgl_alloc() and sgl_free()
Use the sgl_alloc() and sgl_free() functions instead of open coding
these functions.
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/nvme/target/Kconfig | 1 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 63 |
2 files changed, 5 insertions, 59 deletions
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 4d9715630e21..5f4f8b16685f 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig | |||
@@ -29,6 +29,7 @@ config NVME_TARGET_RDMA | |||
29 | tristate "NVMe over Fabrics RDMA target support" | 29 | tristate "NVMe over Fabrics RDMA target support" |
30 | depends on INFINIBAND | 30 | depends on INFINIBAND |
31 | depends on NVME_TARGET | 31 | depends on NVME_TARGET |
32 | select SGL_ALLOC | ||
32 | help | 33 | help |
33 | This enables the NVMe RDMA target support, which allows exporting NVMe | 34 | This enables the NVMe RDMA target support, which allows exporting NVMe |
34 | devices over RDMA. | 35 | devices over RDMA. |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 49912909c298..0e4c15754c58 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -185,59 +185,6 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) | |||
185 | spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); | 185 | spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents) | ||
189 | { | ||
190 | struct scatterlist *sg; | ||
191 | int count; | ||
192 | |||
193 | if (!sgl || !nents) | ||
194 | return; | ||
195 | |||
196 | for_each_sg(sgl, sg, nents, count) | ||
197 | __free_page(sg_page(sg)); | ||
198 | kfree(sgl); | ||
199 | } | ||
200 | |||
201 | static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, | ||
202 | u32 length) | ||
203 | { | ||
204 | struct scatterlist *sg; | ||
205 | struct page *page; | ||
206 | unsigned int nent; | ||
207 | int i = 0; | ||
208 | |||
209 | nent = DIV_ROUND_UP(length, PAGE_SIZE); | ||
210 | sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL); | ||
211 | if (!sg) | ||
212 | goto out; | ||
213 | |||
214 | sg_init_table(sg, nent); | ||
215 | |||
216 | while (length) { | ||
217 | u32 page_len = min_t(u32, length, PAGE_SIZE); | ||
218 | |||
219 | page = alloc_page(GFP_KERNEL); | ||
220 | if (!page) | ||
221 | goto out_free_pages; | ||
222 | |||
223 | sg_set_page(&sg[i], page, page_len, 0); | ||
224 | length -= page_len; | ||
225 | i++; | ||
226 | } | ||
227 | *sgl = sg; | ||
228 | *nents = nent; | ||
229 | return 0; | ||
230 | |||
231 | out_free_pages: | ||
232 | while (i > 0) { | ||
233 | i--; | ||
234 | __free_page(sg_page(&sg[i])); | ||
235 | } | ||
236 | kfree(sg); | ||
237 | out: | ||
238 | return NVME_SC_INTERNAL; | ||
239 | } | ||
240 | |||
241 | static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, | 188 | static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, |
242 | struct nvmet_rdma_cmd *c, bool admin) | 189 | struct nvmet_rdma_cmd *c, bool admin) |
243 | { | 190 | { |
@@ -484,7 +431,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) | |||
484 | } | 431 | } |
485 | 432 | ||
486 | if (rsp->req.sg != &rsp->cmd->inline_sg) | 433 | if (rsp->req.sg != &rsp->cmd->inline_sg) |
487 | nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt); | 434 | sgl_free(rsp->req.sg); |
488 | 435 | ||
489 | if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) | 436 | if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) |
490 | nvmet_rdma_process_wr_wait_list(queue); | 437 | nvmet_rdma_process_wr_wait_list(queue); |
@@ -621,16 +568,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, | |||
621 | u32 len = get_unaligned_le24(sgl->length); | 568 | u32 len = get_unaligned_le24(sgl->length); |
622 | u32 key = get_unaligned_le32(sgl->key); | 569 | u32 key = get_unaligned_le32(sgl->key); |
623 | int ret; | 570 | int ret; |
624 | u16 status; | ||
625 | 571 | ||
626 | /* no data command? */ | 572 | /* no data command? */ |
627 | if (!len) | 573 | if (!len) |
628 | return 0; | 574 | return 0; |
629 | 575 | ||
630 | status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, | 576 | rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt); |
631 | len); | 577 | if (!rsp->req.sg) |
632 | if (status) | 578 | return NVME_SC_INTERNAL; |
633 | return status; | ||
634 | 579 | ||
635 | ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, | 580 | ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, |
636 | rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, | 581 | rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, |