diff options
author | Sagi Grimberg <sagig@mellanox.com> | 2013-07-28 05:35:39 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2013-08-09 20:18:09 -0400 |
commit | b4e155ffbbd65cba77207bc5522c7b734a5c8c9d (patch) | |
tree | 8ab7e294205e558e71e0a028be63a8f33bdc6203 /drivers/infiniband/ulp/iser/iser_initiator.c | |
parent | b7f04513090cf12394de27588a1956d1f97188cb (diff) |
IB/iser: Generalize rdma memory registration
Currently the driver uses FMRs as the only means to register the
memory pointed by SG provided by the SCSI mid-layer with the RDMA
device.
As preparation step for adding more methods for fast path memory
registration, make the alloc/free and reg/unreg calls function
pointers, which are for now just set to the existing FMR ones.
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/ulp/iser/iser_initiator.c')
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 34 |
1 files changed, 16 insertions, 18 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 5c2b142840de..bdc38f423ca2 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -49,6 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, | |||
49 | 49 | ||
50 | { | 50 | { |
51 | struct iscsi_iser_task *iser_task = task->dd_data; | 51 | struct iscsi_iser_task *iser_task = task->dd_data; |
52 | struct iser_device *device = iser_task->iser_conn->ib_conn->device; | ||
52 | struct iser_regd_buf *regd_buf; | 53 | struct iser_regd_buf *regd_buf; |
53 | int err; | 54 | int err; |
54 | struct iser_hdr *hdr = &iser_task->desc.iser_header; | 55 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
@@ -69,7 +70,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, | |||
69 | return -EINVAL; | 70 | return -EINVAL; |
70 | } | 71 | } |
71 | 72 | ||
72 | err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN); | 73 | err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN); |
73 | if (err) { | 74 | if (err) { |
74 | iser_err("Failed to set up Data-IN RDMA\n"); | 75 | iser_err("Failed to set up Data-IN RDMA\n"); |
75 | return err; | 76 | return err; |
@@ -98,6 +99,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, | |||
98 | unsigned int edtl) | 99 | unsigned int edtl) |
99 | { | 100 | { |
100 | struct iscsi_iser_task *iser_task = task->dd_data; | 101 | struct iscsi_iser_task *iser_task = task->dd_data; |
102 | struct iser_device *device = iser_task->iser_conn->ib_conn->device; | ||
101 | struct iser_regd_buf *regd_buf; | 103 | struct iser_regd_buf *regd_buf; |
102 | int err; | 104 | int err; |
103 | struct iser_hdr *hdr = &iser_task->desc.iser_header; | 105 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
@@ -119,7 +121,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, | |||
119 | return -EINVAL; | 121 | return -EINVAL; |
120 | } | 122 | } |
121 | 123 | ||
122 | err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT); | 124 | err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); |
123 | if (err != 0) { | 125 | if (err != 0) { |
124 | iser_err("Failed to register write cmd RDMA mem\n"); | 126 | iser_err("Failed to register write cmd RDMA mem\n"); |
125 | return err; | 127 | return err; |
@@ -253,8 +255,8 @@ int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *s | |||
253 | ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ | 255 | ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ |
254 | ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2; | 256 | ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2; |
255 | 257 | ||
256 | if (iser_create_fmr_pool(ib_conn, session->scsi_cmds_max)) | 258 | if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max)) |
257 | goto create_fmr_pool_failed; | 259 | goto create_rdma_reg_res_failed; |
258 | 260 | ||
259 | if (iser_alloc_login_buf(ib_conn)) | 261 | if (iser_alloc_login_buf(ib_conn)) |
260 | goto alloc_login_buf_fail; | 262 | goto alloc_login_buf_fail; |
@@ -293,8 +295,8 @@ rx_desc_dma_map_failed: | |||
293 | rx_desc_alloc_fail: | 295 | rx_desc_alloc_fail: |
294 | iser_free_login_buf(ib_conn); | 296 | iser_free_login_buf(ib_conn); |
295 | alloc_login_buf_fail: | 297 | alloc_login_buf_fail: |
296 | iser_free_fmr_pool(ib_conn); | 298 | device->iser_free_rdma_reg_res(ib_conn); |
297 | create_fmr_pool_failed: | 299 | create_rdma_reg_res_failed: |
298 | iser_err("failed allocating rx descriptors / data buffers\n"); | 300 | iser_err("failed allocating rx descriptors / data buffers\n"); |
299 | return -ENOMEM; | 301 | return -ENOMEM; |
300 | } | 302 | } |
@@ -308,6 +310,9 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) | |||
308 | if (!ib_conn->rx_descs) | 310 | if (!ib_conn->rx_descs) |
309 | goto free_login_buf; | 311 | goto free_login_buf; |
310 | 312 | ||
313 | if (device && device->iser_free_rdma_reg_res) | ||
314 | device->iser_free_rdma_reg_res(ib_conn); | ||
315 | |||
311 | rx_desc = ib_conn->rx_descs; | 316 | rx_desc = ib_conn->rx_descs; |
312 | for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) | 317 | for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) |
313 | ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, | 318 | ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, |
@@ -318,7 +323,6 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) | |||
318 | 323 | ||
319 | free_login_buf: | 324 | free_login_buf: |
320 | iser_free_login_buf(ib_conn); | 325 | iser_free_login_buf(ib_conn); |
321 | iser_free_fmr_pool(ib_conn); | ||
322 | } | 326 | } |
323 | 327 | ||
324 | static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) | 328 | static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) |
@@ -629,8 +633,8 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) | |||
629 | 633 | ||
630 | void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) | 634 | void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) |
631 | { | 635 | { |
636 | struct iser_device *device = iser_task->iser_conn->ib_conn->device; | ||
632 | int is_rdma_aligned = 1; | 637 | int is_rdma_aligned = 1; |
633 | struct iser_regd_buf *regd; | ||
634 | 638 | ||
635 | /* if we were reading, copy back to unaligned sglist, | 639 | /* if we were reading, copy back to unaligned sglist, |
636 | * anyway dma_unmap and free the copy | 640 | * anyway dma_unmap and free the copy |
@@ -644,17 +648,11 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) | |||
644 | iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); | 648 | iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); |
645 | } | 649 | } |
646 | 650 | ||
647 | if (iser_task->dir[ISER_DIR_IN]) { | 651 | if (iser_task->dir[ISER_DIR_IN]) |
648 | regd = &iser_task->rdma_regd[ISER_DIR_IN]; | 652 | device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); |
649 | if (regd->reg.is_fmr) | ||
650 | iser_unreg_mem(®d->reg); | ||
651 | } | ||
652 | 653 | ||
653 | if (iser_task->dir[ISER_DIR_OUT]) { | 654 | if (iser_task->dir[ISER_DIR_OUT]) |
654 | regd = &iser_task->rdma_regd[ISER_DIR_OUT]; | 655 | device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); |
655 | if (regd->reg.is_fmr) | ||
656 | iser_unreg_mem(®d->reg); | ||
657 | } | ||
658 | 656 | ||
659 | /* if the data was unaligned, it was already unmapped and then copied */ | 657 | /* if the data was unaligned, it was already unmapped and then copied */ |
660 | if (is_rdma_aligned) | 658 | if (is_rdma_aligned) |