aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2014-03-05 12:43:40 -0500
committerRoland Dreier <roland@purestorage.com>2014-03-18 01:33:57 -0400
commitd11ec4ecf022f49df33a784f0cf445638573f577 (patch)
treecddcf71139442be958ff177f7e5ed73cbcec20e7
parent7306b8fad467c4c3c1e3fc68b237427cac1533a7 (diff)
IB/iser: Push the decision what memory key to use into fast_reg_mr routine
This is a preparation step for T10-PI offload support. We prefer to push the desicion of which mkey to use (global or fastreg) to iser_fast_reg_mr. We choose to do this since in T10-PI we may need to register for protection buffers and in this case we wish to simplify iser_fast_reg_mr instead of repeating the logic of which key to use. This patch does not change any functionality. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Alex Tabachnik <alext@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c101
1 files changed, 59 insertions, 42 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 6e9b7bcbc562..d25587e2f296 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -444,16 +444,40 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
444 return 0; 444 return 0;
445} 445}
446 446
447static int iser_fast_reg_mr(struct fast_reg_descriptor *desc, 447static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
448 struct iser_conn *ib_conn,
449 struct iser_regd_buf *regd_buf, 448 struct iser_regd_buf *regd_buf,
450 u32 offset, unsigned int data_size, 449 struct iser_data_buf *mem,
451 unsigned int page_list_len) 450 struct ib_sge *sge)
452{ 451{
452 struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
453 struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
454 struct iser_device *device = ib_conn->device;
455 struct ib_device *ibdev = device->ib_device;
453 struct ib_send_wr fastreg_wr, inv_wr; 456 struct ib_send_wr fastreg_wr, inv_wr;
454 struct ib_send_wr *bad_wr, *wr = NULL; 457 struct ib_send_wr *bad_wr, *wr = NULL;
455 u8 key; 458 u8 key;
456 int ret; 459 int ret, offset, size, plen;
460
461 /* if there a single dma entry, dma mr suffices */
462 if (mem->dma_nents == 1) {
463 struct scatterlist *sg = (struct scatterlist *)mem->buf;
464
465 sge->lkey = device->mr->lkey;
466 sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
467 sge->length = ib_sg_dma_len(ibdev, &sg[0]);
468
469 iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
470 sge->lkey, sge->addr, sge->length);
471 return 0;
472 }
473
474 plen = iser_sg_to_page_vec(mem, device->ib_device,
475 desc->data_frpl->page_list,
476 &offset, &size);
477 if (plen * SIZE_4K < size) {
478 iser_err("fast reg page_list too short to hold this SG\n");
479 return -EINVAL;
480 }
457 481
458 if (!desc->valid) { 482 if (!desc->valid) {
459 memset(&inv_wr, 0, sizeof(inv_wr)); 483 memset(&inv_wr, 0, sizeof(inv_wr));
@@ -472,9 +496,9 @@ static int iser_fast_reg_mr(struct fast_reg_descriptor *desc,
472 fastreg_wr.opcode = IB_WR_FAST_REG_MR; 496 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
473 fastreg_wr.wr.fast_reg.iova_start = desc->data_frpl->page_list[0] + offset; 497 fastreg_wr.wr.fast_reg.iova_start = desc->data_frpl->page_list[0] + offset;
474 fastreg_wr.wr.fast_reg.page_list = desc->data_frpl; 498 fastreg_wr.wr.fast_reg.page_list = desc->data_frpl;
475 fastreg_wr.wr.fast_reg.page_list_len = page_list_len; 499 fastreg_wr.wr.fast_reg.page_list_len = plen;
476 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; 500 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
477 fastreg_wr.wr.fast_reg.length = data_size; 501 fastreg_wr.wr.fast_reg.length = size;
478 fastreg_wr.wr.fast_reg.rkey = desc->data_mr->rkey; 502 fastreg_wr.wr.fast_reg.rkey = desc->data_mr->rkey;
479 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | 503 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
480 IB_ACCESS_REMOTE_WRITE | 504 IB_ACCESS_REMOTE_WRITE |
@@ -492,12 +516,9 @@ static int iser_fast_reg_mr(struct fast_reg_descriptor *desc,
492 } 516 }
493 desc->valid = false; 517 desc->valid = false;
494 518
495 regd_buf->reg.mem_h = desc; 519 sge->lkey = desc->data_mr->lkey;
496 regd_buf->reg.lkey = desc->data_mr->lkey; 520 sge->addr = desc->data_frpl->page_list[0] + offset;
497 regd_buf->reg.rkey = desc->data_mr->rkey; 521 sge->length = size;
498 regd_buf->reg.va = desc->data_frpl->page_list[0] + offset;
499 regd_buf->reg.len = data_size;
500 regd_buf->reg.is_mr = 1;
501 522
502 return ret; 523 return ret;
503} 524}
@@ -516,11 +537,10 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
516 struct ib_device *ibdev = device->ib_device; 537 struct ib_device *ibdev = device->ib_device;
517 struct iser_data_buf *mem = &iser_task->data[cmd_dir]; 538 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
518 struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; 539 struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
519 struct fast_reg_descriptor *desc; 540 struct fast_reg_descriptor *desc = NULL;
520 unsigned int data_size, page_list_len; 541 struct ib_sge data_sge;
521 int err, aligned_len; 542 int err, aligned_len;
522 unsigned long flags; 543 unsigned long flags;
523 u32 offset;
524 544
525 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 545 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
526 if (aligned_len != mem->dma_nents) { 546 if (aligned_len != mem->dma_nents) {
@@ -533,41 +553,38 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
533 mem = &iser_task->data_copy[cmd_dir]; 553 mem = &iser_task->data_copy[cmd_dir];
534 } 554 }
535 555
536 /* if there a single dma entry, dma mr suffices */ 556 if (mem->dma_nents != 1) {
537 if (mem->dma_nents == 1) {
538 struct scatterlist *sg = (struct scatterlist *)mem->buf;
539
540 regd_buf->reg.lkey = device->mr->lkey;
541 regd_buf->reg.rkey = device->mr->rkey;
542 regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
543 regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
544 regd_buf->reg.is_mr = 0;
545 } else {
546 spin_lock_irqsave(&ib_conn->lock, flags); 557 spin_lock_irqsave(&ib_conn->lock, flags);
547 desc = list_first_entry(&ib_conn->fastreg.pool, 558 desc = list_first_entry(&ib_conn->fastreg.pool,
548 struct fast_reg_descriptor, list); 559 struct fast_reg_descriptor, list);
549 list_del(&desc->list); 560 list_del(&desc->list);
550 spin_unlock_irqrestore(&ib_conn->lock, flags); 561 spin_unlock_irqrestore(&ib_conn->lock, flags);
551 page_list_len = iser_sg_to_page_vec(mem, device->ib_device, 562 regd_buf->reg.mem_h = desc;
552 desc->data_frpl->page_list, 563 }
553 &offset, &data_size);
554
555 if (page_list_len * SIZE_4K < data_size) {
556 iser_err("fast reg page_list too short to hold this SG\n");
557 err = -EINVAL;
558 goto err_reg;
559 }
560 564
561 err = iser_fast_reg_mr(desc, ib_conn, regd_buf, 565 err = iser_fast_reg_mr(iser_task, regd_buf, mem, &data_sge);
562 offset, data_size, page_list_len); 566 if (err)
563 if (err) 567 goto err_reg;
564 goto err_reg; 568
569 if (desc) {
570 regd_buf->reg.rkey = desc->data_mr->rkey;
571 regd_buf->reg.is_mr = 1;
572 } else {
573 regd_buf->reg.rkey = device->mr->rkey;
574 regd_buf->reg.is_mr = 0;
565 } 575 }
566 576
577 regd_buf->reg.lkey = data_sge.lkey;
578 regd_buf->reg.va = data_sge.addr;
579 regd_buf->reg.len = data_sge.length;
580
567 return 0; 581 return 0;
568err_reg: 582err_reg:
569 spin_lock_irqsave(&ib_conn->lock, flags); 583 if (desc) {
570 list_add_tail(&desc->list, &ib_conn->fastreg.pool); 584 spin_lock_irqsave(&ib_conn->lock, flags);
571 spin_unlock_irqrestore(&ib_conn->lock, flags); 585 list_add_tail(&desc->list, &ib_conn->fastreg.pool);
586 spin_unlock_irqrestore(&ib_conn->lock, flags);
587 }
588
572 return err; 589 return err;
573} 590}