diff options
author | Erez Zilber <erezz@voltaire.com> | 2006-09-11 05:26:33 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-22 18:22:52 -0400 |
commit | d81110285f7f6c07a0ce8f99a5ff158a647cd649 (patch) | |
tree | 7ecbae2d81a7464114a7d57e1a7d0230aaa21071 /drivers/infiniband/ulp | |
parent | e981f1d4b8288072ba7cf6b7141cd4aefb404383 (diff) |
IB/iser: Do not use FMR for a single dma entry sg
Fast Memory Registration (fmr) is used to register for rdma an sg whose
elements are not linearly sequential after dma mapping.
The IB verbs layer provides an "all dma memory MR (memory region)" which
can be used for RDMA-ing a dma linearly sequential buffer.
Change the code to use the dma mr instead of doing fmr when dma mapping
produces a single dma entry sg.
Signed-off-by: Erez Zilber <erezz@voltaire.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 48 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 6 |
3 files changed, 39 insertions, 16 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 0ba02abb0414..7e1a411db2a3 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -175,6 +175,7 @@ struct iser_mem_reg { | |||
175 | u64 va; | 175 | u64 va; |
176 | u64 len; | 176 | u64 len; |
177 | void *mem_h; | 177 | void *mem_h; |
178 | int is_fmr; | ||
178 | }; | 179 | }; |
179 | 180 | ||
180 | struct iser_regd_buf { | 181 | struct iser_regd_buf { |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 8fea0bce5042..d0b03f426581 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -56,7 +56,7 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf) | |||
56 | if ((atomic_read(®d_buf->ref_count) == 0) || | 56 | if ((atomic_read(®d_buf->ref_count) == 0) || |
57 | atomic_dec_and_test(®d_buf->ref_count)) { | 57 | atomic_dec_and_test(®d_buf->ref_count)) { |
58 | /* if we used the dma mr, unreg is just NOP */ | 58 | /* if we used the dma mr, unreg is just NOP */ |
59 | if (regd_buf->reg.rkey != 0) | 59 | if (regd_buf->reg.is_fmr) |
60 | iser_unreg_mem(®d_buf->reg); | 60 | iser_unreg_mem(®d_buf->reg); |
61 | 61 | ||
62 | if (regd_buf->dma_addr) { | 62 | if (regd_buf->dma_addr) { |
@@ -91,9 +91,9 @@ void iser_reg_single(struct iser_device *device, | |||
91 | BUG_ON(dma_mapping_error(dma_addr)); | 91 | BUG_ON(dma_mapping_error(dma_addr)); |
92 | 92 | ||
93 | regd_buf->reg.lkey = device->mr->lkey; | 93 | regd_buf->reg.lkey = device->mr->lkey; |
94 | regd_buf->reg.rkey = 0; /* indicate there's no need to unreg */ | ||
95 | regd_buf->reg.len = regd_buf->data_size; | 94 | regd_buf->reg.len = regd_buf->data_size; |
96 | regd_buf->reg.va = dma_addr; | 95 | regd_buf->reg.va = dma_addr; |
96 | regd_buf->reg.is_fmr = 0; | ||
97 | 97 | ||
98 | regd_buf->dma_addr = dma_addr; | 98 | regd_buf->dma_addr = dma_addr; |
99 | regd_buf->direction = direction; | 99 | regd_buf->direction = direction; |
@@ -379,11 +379,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
379 | enum iser_data_dir cmd_dir) | 379 | enum iser_data_dir cmd_dir) |
380 | { | 380 | { |
381 | struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; | 381 | struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; |
382 | struct iser_device *device = ib_conn->device; | ||
382 | struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; | 383 | struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; |
383 | struct iser_regd_buf *regd_buf; | 384 | struct iser_regd_buf *regd_buf; |
384 | int aligned_len; | 385 | int aligned_len; |
385 | int err; | 386 | int err; |
386 | int i; | 387 | int i; |
388 | struct scatterlist *sg; | ||
387 | 389 | ||
388 | regd_buf = &iser_ctask->rdma_regd[cmd_dir]; | 390 | regd_buf = &iser_ctask->rdma_regd[cmd_dir]; |
389 | 391 | ||
@@ -399,19 +401,37 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
399 | mem = &iser_ctask->data_copy[cmd_dir]; | 401 | mem = &iser_ctask->data_copy[cmd_dir]; |
400 | } | 402 | } |
401 | 403 | ||
402 | iser_page_vec_build(mem, ib_conn->page_vec); | 404 | /* if there a single dma entry, FMR is not needed */ |
403 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); | 405 | if (mem->dma_nents == 1) { |
404 | if (err) { | 406 | sg = (struct scatterlist *)mem->buf; |
405 | iser_data_buf_dump(mem); | 407 | |
406 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, | 408 | regd_buf->reg.lkey = device->mr->lkey; |
407 | ntoh24(iser_ctask->desc.iscsi_header.dlength)); | 409 | regd_buf->reg.rkey = device->mr->rkey; |
408 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", | 410 | regd_buf->reg.len = sg_dma_len(&sg[0]); |
409 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, | 411 | regd_buf->reg.va = sg_dma_address(&sg[0]); |
410 | ib_conn->page_vec->offset); | 412 | regd_buf->reg.is_fmr = 0; |
411 | for (i=0 ; i<ib_conn->page_vec->length ; i++) { | 413 | |
412 | iser_err("page_vec[%d] = 0x%lx\n", i, ib_conn->page_vec->pages[i]); | 414 | iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " |
415 | "va: 0x%08lX sz: %ld]\n", | ||
416 | (unsigned int)regd_buf->reg.lkey, | ||
417 | (unsigned int)regd_buf->reg.rkey, | ||
418 | (unsigned long)regd_buf->reg.va, | ||
419 | (unsigned long)regd_buf->reg.len); | ||
420 | } else { /* use FMR for multiple dma entries */ | ||
421 | iser_page_vec_build(mem, ib_conn->page_vec); | ||
422 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); | ||
423 | if (err) { | ||
424 | iser_data_buf_dump(mem); | ||
425 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, | ||
426 | ntoh24(iser_ctask->desc.iscsi_header.dlength)); | ||
427 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", | ||
428 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, | ||
429 | ib_conn->page_vec->offset); | ||
430 | for (i=0 ; i<ib_conn->page_vec->length ; i++) | ||
431 | iser_err("page_vec[%d] = 0x%llx\n", i, | ||
432 | (unsigned long long) ib_conn->page_vec->pages[i]); | ||
433 | return err; | ||
413 | } | 434 | } |
414 | return err; | ||
415 | } | 435 | } |
416 | 436 | ||
417 | /* take a reference on this regd buf such that it will not be released * | 437 | /* take a reference on this regd buf such that it will not be released * |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 9b27a7c26aa8..ecdca7fc1e4c 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -88,8 +88,9 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
88 | iser_cq_tasklet_fn, | 88 | iser_cq_tasklet_fn, |
89 | (unsigned long)device); | 89 | (unsigned long)device); |
90 | 90 | ||
91 | device->mr = ib_get_dma_mr(device->pd, | 91 | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | |
92 | IB_ACCESS_LOCAL_WRITE); | 92 | IB_ACCESS_REMOTE_WRITE | |
93 | IB_ACCESS_REMOTE_READ); | ||
93 | if (IS_ERR(device->mr)) | 94 | if (IS_ERR(device->mr)) |
94 | goto dma_mr_err; | 95 | goto dma_mr_err; |
95 | 96 | ||
@@ -606,6 +607,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn, | |||
606 | mem_reg->rkey = mem->fmr->rkey; | 607 | mem_reg->rkey = mem->fmr->rkey; |
607 | mem_reg->len = page_vec->length * SIZE_4K; | 608 | mem_reg->len = page_vec->length * SIZE_4K; |
608 | mem_reg->va = io_addr; | 609 | mem_reg->va = io_addr; |
610 | mem_reg->is_fmr = 1; | ||
609 | mem_reg->mem_h = (void *)mem; | 611 | mem_reg->mem_h = (void *)mem; |
610 | 612 | ||
611 | mem_reg->va += page_vec->offset; | 613 | mem_reg->va += page_vec->offset; |