aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-05-03 12:01:13 -0400
committerDoug Ledford <dledford@redhat.com>2016-05-13 13:37:20 -0400
commit38a2d0d429f1d87315c55d9139b8bdf66d51c4f4 (patch)
tree65bbfeab6a05664c0bfa871b95cbff6ce701cd76
parent0e353e34e1e740fe575eb479ca0f2a723a4ef51c (diff)
IB/isert: convert to the generic RDMA READ/WRITE API
Replace the homegrown RDMA READ/WRITE code in isert with the generic API, which also adds iWarp support to the I/O path as a side effect. Note that full iWarp operation will need a few additional patches from Steve. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c841
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h69
2 files changed, 85 insertions, 825 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a44a73639cba..897b5a4993e8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -33,7 +33,8 @@
33 33
34#define ISERT_MAX_CONN 8 34#define ISERT_MAX_CONN 8
35#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 35#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) 36#define ISER_MAX_TX_CQ_LEN \
37 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
37#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ 38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
38 ISERT_MAX_CONN) 39 ISERT_MAX_CONN)
39 40
@@ -46,14 +47,6 @@ static LIST_HEAD(device_list);
46static struct workqueue_struct *isert_comp_wq; 47static struct workqueue_struct *isert_comp_wq;
47static struct workqueue_struct *isert_release_wq; 48static struct workqueue_struct *isert_release_wq;
48 49
49static void
50isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51static int
52isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
53static void
54isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
55static int
56isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
57static int 50static int
58isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 51isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
59static int 52static int
@@ -142,6 +135,7 @@ isert_create_qp(struct isert_conn *isert_conn,
142 attr.recv_cq = comp->cq; 135 attr.recv_cq = comp->cq;
143 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; 136 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
144 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 137 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
138 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
145 attr.cap.max_send_sge = device->ib_device->attrs.max_sge; 139 attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
146 isert_conn->max_sge = min(device->ib_device->attrs.max_sge, 140 isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
147 device->ib_device->attrs.max_sge_rd); 141 device->ib_device->attrs.max_sge_rd);
@@ -270,9 +264,9 @@ isert_alloc_comps(struct isert_device *device)
270 device->ib_device->num_comp_vectors)); 264 device->ib_device->num_comp_vectors));
271 265
272 isert_info("Using %d CQs, %s supports %d vectors support " 266 isert_info("Using %d CQs, %s supports %d vectors support "
273 "Fast registration %d pi_capable %d\n", 267 "pi_capable %d\n",
274 device->comps_used, device->ib_device->name, 268 device->comps_used, device->ib_device->name,
275 device->ib_device->num_comp_vectors, device->use_fastreg, 269 device->ib_device->num_comp_vectors,
276 device->pi_capable); 270 device->pi_capable);
277 271
278 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), 272 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
@@ -313,18 +307,6 @@ isert_create_device_ib_res(struct isert_device *device)
313 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge); 307 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
314 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); 308 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
315 309
316 /* asign function handlers */
317 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
318 ib_dev->attrs.device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
319 device->use_fastreg = 1;
320 device->reg_rdma_mem = isert_reg_rdma;
321 device->unreg_rdma_mem = isert_unreg_rdma;
322 } else {
323 device->use_fastreg = 0;
324 device->reg_rdma_mem = isert_map_rdma;
325 device->unreg_rdma_mem = isert_unmap_cmd;
326 }
327
328 ret = isert_alloc_comps(device); 310 ret = isert_alloc_comps(device);
329 if (ret) 311 if (ret)
330 goto out; 312 goto out;
@@ -417,146 +399,6 @@ isert_device_get(struct rdma_cm_id *cma_id)
417} 399}
418 400
419static void 401static void
420isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
421{
422 struct fast_reg_descriptor *fr_desc, *tmp;
423 int i = 0;
424
425 if (list_empty(&isert_conn->fr_pool))
426 return;
427
428 isert_info("Freeing conn %p fastreg pool", isert_conn);
429
430 list_for_each_entry_safe(fr_desc, tmp,
431 &isert_conn->fr_pool, list) {
432 list_del(&fr_desc->list);
433 ib_dereg_mr(fr_desc->data_mr);
434 if (fr_desc->pi_ctx) {
435 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
436 ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
437 kfree(fr_desc->pi_ctx);
438 }
439 kfree(fr_desc);
440 ++i;
441 }
442
443 if (i < isert_conn->fr_pool_size)
444 isert_warn("Pool still has %d regions registered\n",
445 isert_conn->fr_pool_size - i);
446}
447
448static int
449isert_create_pi_ctx(struct fast_reg_descriptor *desc,
450 struct ib_device *device,
451 struct ib_pd *pd)
452{
453 struct pi_context *pi_ctx;
454 int ret;
455
456 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
457 if (!pi_ctx) {
458 isert_err("Failed to allocate pi context\n");
459 return -ENOMEM;
460 }
461
462 pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
463 ISCSI_ISER_SG_TABLESIZE);
464 if (IS_ERR(pi_ctx->prot_mr)) {
465 isert_err("Failed to allocate prot frmr err=%ld\n",
466 PTR_ERR(pi_ctx->prot_mr));
467 ret = PTR_ERR(pi_ctx->prot_mr);
468 goto err_pi_ctx;
469 }
470 desc->ind |= ISERT_PROT_KEY_VALID;
471
472 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
473 if (IS_ERR(pi_ctx->sig_mr)) {
474 isert_err("Failed to allocate signature enabled mr err=%ld\n",
475 PTR_ERR(pi_ctx->sig_mr));
476 ret = PTR_ERR(pi_ctx->sig_mr);
477 goto err_prot_mr;
478 }
479
480 desc->pi_ctx = pi_ctx;
481 desc->ind |= ISERT_SIG_KEY_VALID;
482 desc->ind &= ~ISERT_PROTECTED;
483
484 return 0;
485
486err_prot_mr:
487 ib_dereg_mr(pi_ctx->prot_mr);
488err_pi_ctx:
489 kfree(pi_ctx);
490
491 return ret;
492}
493
494static int
495isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
496 struct fast_reg_descriptor *fr_desc)
497{
498 fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
499 ISCSI_ISER_SG_TABLESIZE);
500 if (IS_ERR(fr_desc->data_mr)) {
501 isert_err("Failed to allocate data frmr err=%ld\n",
502 PTR_ERR(fr_desc->data_mr));
503 return PTR_ERR(fr_desc->data_mr);
504 }
505 fr_desc->ind |= ISERT_DATA_KEY_VALID;
506
507 isert_dbg("Created fr_desc %p\n", fr_desc);
508
509 return 0;
510}
511
512static int
513isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
514{
515 struct fast_reg_descriptor *fr_desc;
516 struct isert_device *device = isert_conn->device;
517 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
518 struct se_node_acl *se_nacl = se_sess->se_node_acl;
519 int i, ret, tag_num;
520 /*
521 * Setup the number of FRMRs based upon the number of tags
522 * available to session in iscsi_target_locate_portal().
523 */
524 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
525 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
526
527 isert_conn->fr_pool_size = 0;
528 for (i = 0; i < tag_num; i++) {
529 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
530 if (!fr_desc) {
531 isert_err("Failed to allocate fast_reg descriptor\n");
532 ret = -ENOMEM;
533 goto err;
534 }
535
536 ret = isert_create_fr_desc(device->ib_device,
537 device->pd, fr_desc);
538 if (ret) {
539 isert_err("Failed to create fastreg descriptor err=%d\n",
540 ret);
541 kfree(fr_desc);
542 goto err;
543 }
544
545 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
546 isert_conn->fr_pool_size++;
547 }
548
549 isert_dbg("Creating conn %p fastreg pool size=%d",
550 isert_conn, isert_conn->fr_pool_size);
551
552 return 0;
553
554err:
555 isert_conn_free_fastreg_pool(isert_conn);
556 return ret;
557}
558
559static void
560isert_init_conn(struct isert_conn *isert_conn) 402isert_init_conn(struct isert_conn *isert_conn)
561{ 403{
562 isert_conn->state = ISER_CONN_INIT; 404 isert_conn->state = ISER_CONN_INIT;
@@ -565,8 +407,6 @@ isert_init_conn(struct isert_conn *isert_conn)
565 init_completion(&isert_conn->login_req_comp); 407 init_completion(&isert_conn->login_req_comp);
566 kref_init(&isert_conn->kref); 408 kref_init(&isert_conn->kref);
567 mutex_init(&isert_conn->mutex); 409 mutex_init(&isert_conn->mutex);
568 spin_lock_init(&isert_conn->pool_lock);
569 INIT_LIST_HEAD(&isert_conn->fr_pool);
570 INIT_WORK(&isert_conn->release_work, isert_release_work); 410 INIT_WORK(&isert_conn->release_work, isert_release_work);
571} 411}
572 412
@@ -739,9 +579,6 @@ isert_connect_release(struct isert_conn *isert_conn)
739 579
740 BUG_ON(!device); 580 BUG_ON(!device);
741 581
742 if (device->use_fastreg)
743 isert_conn_free_fastreg_pool(isert_conn);
744
745 isert_free_rx_descriptors(isert_conn); 582 isert_free_rx_descriptors(isert_conn);
746 if (isert_conn->cm_id) 583 if (isert_conn->cm_id)
747 rdma_destroy_id(isert_conn->cm_id); 584 rdma_destroy_id(isert_conn->cm_id);
@@ -1080,7 +917,6 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1080{ 917{
1081 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 918 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1082 919
1083 isert_cmd->iser_ib_op = ISER_IB_SEND;
1084 tx_desc->tx_cqe.done = isert_send_done; 920 tx_desc->tx_cqe.done = isert_send_done;
1085 send_wr->wr_cqe = &tx_desc->tx_cqe; 921 send_wr->wr_cqe = &tx_desc->tx_cqe;
1086 922
@@ -1160,16 +996,6 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1160 } 996 }
1161 if (!login->login_failed) { 997 if (!login->login_failed) {
1162 if (login->login_complete) { 998 if (login->login_complete) {
1163 if (!conn->sess->sess_ops->SessionType &&
1164 isert_conn->device->use_fastreg) {
1165 ret = isert_conn_create_fastreg_pool(isert_conn);
1166 if (ret) {
1167 isert_err("Conn: %p failed to create"
1168 " fastreg pool\n", isert_conn);
1169 return ret;
1170 }
1171 }
1172
1173 ret = isert_alloc_rx_descriptors(isert_conn); 999 ret = isert_alloc_rx_descriptors(isert_conn);
1174 if (ret) 1000 if (ret)
1175 return ret; 1001 return ret;
@@ -1633,97 +1459,26 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1633 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1459 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1634} 1460}
1635 1461
1636static int
1637isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1638 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1639 enum iser_ib_op_code op, struct isert_data_buf *data)
1640{
1641 struct ib_device *ib_dev = isert_conn->cm_id->device;
1642
1643 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1644 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1645
1646 data->len = length - offset;
1647 data->offset = offset;
1648 data->sg_off = data->offset / PAGE_SIZE;
1649
1650 data->sg = &sg[data->sg_off];
1651 data->nents = min_t(unsigned int, nents - data->sg_off,
1652 ISCSI_ISER_SG_TABLESIZE);
1653 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1654 PAGE_SIZE);
1655
1656 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1657 data->dma_dir);
1658 if (unlikely(!data->dma_nents)) {
1659 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1660 return -EINVAL;
1661 }
1662
1663 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1664 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1665
1666 return 0;
1667}
1668
1669static void 1462static void
1670isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) 1463isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
1671{ 1464{
1672 struct ib_device *ib_dev = isert_conn->cm_id->device; 1465 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
1673 1466 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
1674 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1675 memset(data, 0, sizeof(*data));
1676}
1677
1678
1679
1680static void
1681isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1682{
1683 isert_dbg("Cmd %p\n", isert_cmd);
1684 1467
1685 if (isert_cmd->data.sg) { 1468 if (!cmd->rw.nr_ops)
1686 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); 1469 return;
1687 isert_unmap_data_buf(isert_conn, &isert_cmd->data);
1688 }
1689
1690 if (isert_cmd->rdma_wr) {
1691 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1692 kfree(isert_cmd->rdma_wr);
1693 isert_cmd->rdma_wr = NULL;
1694 }
1695
1696 if (isert_cmd->ib_sge) {
1697 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1698 kfree(isert_cmd->ib_sge);
1699 isert_cmd->ib_sge = NULL;
1700 }
1701}
1702
1703static void
1704isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1705{
1706 isert_dbg("Cmd %p\n", isert_cmd);
1707
1708 if (isert_cmd->fr_desc) {
1709 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc);
1710 if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
1711 isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
1712 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
1713 }
1714 spin_lock_bh(&isert_conn->pool_lock);
1715 list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool);
1716 spin_unlock_bh(&isert_conn->pool_lock);
1717 isert_cmd->fr_desc = NULL;
1718 }
1719 1470
1720 if (isert_cmd->data.sg) { 1471 if (isert_prot_cmd(conn, se_cmd)) {
1721 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); 1472 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
1722 isert_unmap_data_buf(isert_conn, &isert_cmd->data); 1473 conn->cm_id->port_num, se_cmd->t_data_sg,
1474 se_cmd->t_data_nents, se_cmd->t_prot_sg,
1475 se_cmd->t_prot_nents, dir);
1476 } else {
1477 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
1478 se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
1723 } 1479 }
1724 1480
1725 isert_cmd->ib_sge = NULL; 1481 cmd->rw.nr_ops = 0;
1726 isert_cmd->rdma_wr = NULL;
1727} 1482}
1728 1483
1729static void 1484static void
@@ -1732,7 +1487,6 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1732 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1487 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1733 struct isert_conn *isert_conn = isert_cmd->conn; 1488 struct isert_conn *isert_conn = isert_cmd->conn;
1734 struct iscsi_conn *conn = isert_conn->conn; 1489 struct iscsi_conn *conn = isert_conn->conn;
1735 struct isert_device *device = isert_conn->device;
1736 struct iscsi_text_rsp *hdr; 1490 struct iscsi_text_rsp *hdr;
1737 1491
1738 isert_dbg("Cmd %p\n", isert_cmd); 1492 isert_dbg("Cmd %p\n", isert_cmd);
@@ -1760,7 +1514,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1760 } 1514 }
1761 } 1515 }
1762 1516
1763 device->unreg_rdma_mem(isert_cmd, isert_conn); 1517 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1764 transport_generic_free_cmd(&cmd->se_cmd, 0); 1518 transport_generic_free_cmd(&cmd->se_cmd, 0);
1765 break; 1519 break;
1766 case ISCSI_OP_SCSI_TMFUNC: 1520 case ISCSI_OP_SCSI_TMFUNC:
@@ -1894,14 +1648,9 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1894 1648
1895 isert_dbg("Cmd %p\n", isert_cmd); 1649 isert_dbg("Cmd %p\n", isert_cmd);
1896 1650
1897 if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) { 1651 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1898 ret = isert_check_pi_status(cmd, 1652 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1899 isert_cmd->fr_desc->pi_ctx->sig_mr);
1900 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
1901 }
1902 1653
1903 device->unreg_rdma_mem(isert_cmd, isert_conn);
1904 isert_cmd->rdma_wr_num = 0;
1905 if (ret) 1654 if (ret)
1906 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1655 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
1907 else 1656 else
@@ -1929,16 +1678,12 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1929 1678
1930 isert_dbg("Cmd %p\n", isert_cmd); 1679 isert_dbg("Cmd %p\n", isert_cmd);
1931 1680
1932 if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
1933 ret = isert_check_pi_status(se_cmd,
1934 isert_cmd->fr_desc->pi_ctx->sig_mr);
1935 isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
1936 }
1937
1938 iscsit_stop_dataout_timer(cmd); 1681 iscsit_stop_dataout_timer(cmd);
1939 device->unreg_rdma_mem(isert_cmd, isert_conn); 1682
1940 cmd->write_data_done = isert_cmd->data.len; 1683 if (isert_prot_cmd(isert_conn, se_cmd))
1941 isert_cmd->rdma_wr_num = 0; 1684 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
1685 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1686 cmd->write_data_done = 0;
1942 1687
1943 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1688 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1944 spin_lock_bh(&cmd->istate_lock); 1689 spin_lock_bh(&cmd->istate_lock);
@@ -2111,7 +1856,6 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2111{ 1856{
2112 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1857 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2113 struct isert_conn *isert_conn = conn->context; 1858 struct isert_conn *isert_conn = conn->context;
2114 struct isert_device *device = isert_conn->device;
2115 1859
2116 spin_lock_bh(&conn->cmd_lock); 1860 spin_lock_bh(&conn->cmd_lock);
2117 if (!list_empty(&cmd->i_conn_node)) 1861 if (!list_empty(&cmd->i_conn_node))
@@ -2120,8 +1864,7 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2120 1864
2121 if (cmd->data_direction == DMA_TO_DEVICE) 1865 if (cmd->data_direction == DMA_TO_DEVICE)
2122 iscsit_stop_dataout_timer(cmd); 1866 iscsit_stop_dataout_timer(cmd);
2123 1867 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
2124 device->unreg_rdma_mem(isert_cmd, isert_conn);
2125} 1868}
2126 1869
2127static enum target_prot_op 1870static enum target_prot_op
@@ -2274,234 +2017,6 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2274 return isert_post_response(isert_conn, isert_cmd); 2017 return isert_post_response(isert_conn, isert_cmd);
2275} 2018}
2276 2019
2277static int
2278isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2279 struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr,
2280 u32 data_left, u32 offset)
2281{
2282 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2283 struct scatterlist *sg_start, *tmp_sg;
2284 struct isert_device *device = isert_conn->device;
2285 struct ib_device *ib_dev = device->ib_device;
2286 u32 sg_off, page_off;
2287 int i = 0, sg_nents;
2288
2289 sg_off = offset / PAGE_SIZE;
2290 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2291 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2292 page_off = offset % PAGE_SIZE;
2293
2294 rdma_wr->wr.sg_list = ib_sge;
2295 rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
2296
2297 /*
2298 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2299 */
2300 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2301 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2302 "page_off: %u\n",
2303 (unsigned long long)tmp_sg->dma_address,
2304 tmp_sg->length, page_off);
2305
2306 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2307 ib_sge->length = min_t(u32, data_left,
2308 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2309 ib_sge->lkey = device->pd->local_dma_lkey;
2310
2311 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2312 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2313 page_off = 0;
2314 data_left -= ib_sge->length;
2315 if (!data_left)
2316 break;
2317 ib_sge++;
2318 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2319 }
2320
2321 rdma_wr->wr.num_sge = ++i;
2322 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2323 rdma_wr->wr.sg_list, rdma_wr->wr.num_sge);
2324
2325 return rdma_wr->wr.num_sge;
2326}
2327
2328static int
2329isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
2330{
2331 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2332 struct se_cmd *se_cmd = &cmd->se_cmd;
2333 struct isert_conn *isert_conn = conn->context;
2334 struct isert_data_buf *data = &isert_cmd->data;
2335 struct ib_rdma_wr *rdma_wr;
2336 struct ib_sge *ib_sge;
2337 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2338 int ret = 0, i, ib_sge_cnt;
2339
2340 offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
2341 cmd->write_data_done : 0;
2342 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2343 se_cmd->t_data_nents, se_cmd->data_length,
2344 offset, isert_cmd->iser_ib_op,
2345 &isert_cmd->data);
2346 if (ret)
2347 return ret;
2348
2349 data_left = data->len;
2350 offset = data->offset;
2351
2352 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2353 if (!ib_sge) {
2354 isert_warn("Unable to allocate ib_sge\n");
2355 ret = -ENOMEM;
2356 goto unmap_cmd;
2357 }
2358 isert_cmd->ib_sge = ib_sge;
2359
2360 isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2361 isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) *
2362 isert_cmd->rdma_wr_num, GFP_KERNEL);
2363 if (!isert_cmd->rdma_wr) {
2364 isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
2365 ret = -ENOMEM;
2366 goto unmap_cmd;
2367 }
2368
2369 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2370
2371 for (i = 0; i < isert_cmd->rdma_wr_num; i++) {
2372 rdma_wr = &isert_cmd->rdma_wr[i];
2373 data_len = min(data_left, rdma_write_max);
2374
2375 rdma_wr->wr.send_flags = 0;
2376 if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
2377 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2378
2379 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2380 rdma_wr->remote_addr = isert_cmd->read_va + offset;
2381 rdma_wr->rkey = isert_cmd->read_stag;
2382 if (i + 1 == isert_cmd->rdma_wr_num)
2383 rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
2384 else
2385 rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
2386 } else {
2387 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2388
2389 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2390 rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
2391 rdma_wr->rkey = isert_cmd->write_stag;
2392 if (i + 1 == isert_cmd->rdma_wr_num)
2393 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2394 else
2395 rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
2396 }
2397
2398 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2399 rdma_wr, data_len, offset);
2400 ib_sge += ib_sge_cnt;
2401
2402 offset += data_len;
2403 va_offset += data_len;
2404 data_left -= data_len;
2405 }
2406
2407 return 0;
2408unmap_cmd:
2409 isert_unmap_data_buf(isert_conn, data);
2410
2411 return ret;
2412}
2413
2414static inline void
2415isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2416{
2417 u32 rkey;
2418
2419 memset(inv_wr, 0, sizeof(*inv_wr));
2420 inv_wr->wr_cqe = NULL;
2421 inv_wr->opcode = IB_WR_LOCAL_INV;
2422 inv_wr->ex.invalidate_rkey = mr->rkey;
2423
2424 /* Bump the key */
2425 rkey = ib_inc_rkey(mr->rkey);
2426 ib_update_fast_reg_key(mr, rkey);
2427}
2428
2429static int
2430isert_fast_reg_mr(struct isert_conn *isert_conn,
2431 struct fast_reg_descriptor *fr_desc,
2432 struct isert_data_buf *mem,
2433 enum isert_indicator ind,
2434 struct ib_sge *sge)
2435{
2436 struct isert_device *device = isert_conn->device;
2437 struct ib_device *ib_dev = device->ib_device;
2438 struct ib_mr *mr;
2439 struct ib_reg_wr reg_wr;
2440 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2441 int ret, n;
2442
2443 if (mem->dma_nents == 1) {
2444 sge->lkey = device->pd->local_dma_lkey;
2445 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2446 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2447 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2448 sge->addr, sge->length, sge->lkey);
2449 return 0;
2450 }
2451
2452 if (ind == ISERT_DATA_KEY_VALID)
2453 /* Registering data buffer */
2454 mr = fr_desc->data_mr;
2455 else
2456 /* Registering protection buffer */
2457 mr = fr_desc->pi_ctx->prot_mr;
2458
2459 if (!(fr_desc->ind & ind)) {
2460 isert_inv_rkey(&inv_wr, mr);
2461 wr = &inv_wr;
2462 }
2463
2464 n = ib_map_mr_sg(mr, mem->sg, mem->nents, 0, PAGE_SIZE);
2465 if (unlikely(n != mem->nents)) {
2466 isert_err("failed to map mr sg (%d/%d)\n",
2467 n, mem->nents);
2468 return n < 0 ? n : -EINVAL;
2469 }
2470
2471 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2472 fr_desc, mem->nents, mem->offset);
2473
2474 reg_wr.wr.next = NULL;
2475 reg_wr.wr.opcode = IB_WR_REG_MR;
2476 reg_wr.wr.wr_cqe = NULL;
2477 reg_wr.wr.send_flags = 0;
2478 reg_wr.wr.num_sge = 0;
2479 reg_wr.mr = mr;
2480 reg_wr.key = mr->lkey;
2481 reg_wr.access = IB_ACCESS_LOCAL_WRITE;
2482
2483 if (!wr)
2484 wr = &reg_wr.wr;
2485 else
2486 wr->next = &reg_wr.wr;
2487
2488 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2489 if (ret) {
2490 isert_err("fast registration failed, ret:%d\n", ret);
2491 return ret;
2492 }
2493 fr_desc->ind &= ~ind;
2494
2495 sge->lkey = mr->lkey;
2496 sge->addr = mr->iova;
2497 sge->length = mr->length;
2498
2499 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2500 sge->addr, sge->length, sge->lkey);
2501
2502 return ret;
2503}
2504
2505static inline void 2020static inline void
2506isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs, 2021isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2507 struct ib_sig_domain *domain) 2022 struct ib_sig_domain *domain)
@@ -2526,6 +2041,8 @@ isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2526static int 2041static int
2527isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) 2042isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2528{ 2043{
2044 memset(sig_attrs, 0, sizeof(*sig_attrs));
2045
2529 switch (se_cmd->prot_op) { 2046 switch (se_cmd->prot_op) {
2530 case TARGET_PROT_DIN_INSERT: 2047 case TARGET_PROT_DIN_INSERT:
2531 case TARGET_PROT_DOUT_STRIP: 2048 case TARGET_PROT_DOUT_STRIP:
@@ -2547,228 +2064,59 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2547 return -EINVAL; 2064 return -EINVAL;
2548 } 2065 }
2549 2066
2067 sig_attrs->check_mask =
2068 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2069 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2070 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2550 return 0; 2071 return 0;
2551} 2072}
2552 2073
2553static inline u8
2554isert_set_prot_checks(u8 prot_checks)
2555{
2556 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2557 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2558 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2559}
2560
2561static int
2562isert_reg_sig_mr(struct isert_conn *isert_conn,
2563 struct isert_cmd *isert_cmd,
2564 struct fast_reg_descriptor *fr_desc)
2565{
2566 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2567 struct ib_sig_handover_wr sig_wr;
2568 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2569 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2570 struct ib_sig_attrs sig_attrs;
2571 int ret;
2572
2573 memset(&sig_attrs, 0, sizeof(sig_attrs));
2574 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2575 if (ret)
2576 goto err;
2577
2578 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2579
2580 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2581 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2582 wr = &inv_wr;
2583 }
2584
2585 memset(&sig_wr, 0, sizeof(sig_wr));
2586 sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
2587 sig_wr.wr.wr_cqe = NULL;
2588 sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA];
2589 sig_wr.wr.num_sge = 1;
2590 sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
2591 sig_wr.sig_attrs = &sig_attrs;
2592 sig_wr.sig_mr = pi_ctx->sig_mr;
2593 if (se_cmd->t_prot_sg)
2594 sig_wr.prot = &isert_cmd->ib_sg[PROT];
2595
2596 if (!wr)
2597 wr = &sig_wr.wr;
2598 else
2599 wr->next = &sig_wr.wr;
2600
2601 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2602 if (ret) {
2603 isert_err("fast registration failed, ret:%d\n", ret);
2604 goto err;
2605 }
2606 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2607
2608 isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2609 isert_cmd->ib_sg[SIG].addr = 0;
2610 isert_cmd->ib_sg[SIG].length = se_cmd->data_length;
2611 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2612 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2613 /*
2614 * We have protection guards on the wire
2615 * so we need to set a larget transfer
2616 */
2617 isert_cmd->ib_sg[SIG].length += se_cmd->prot_length;
2618
2619 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2620 isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length,
2621 isert_cmd->ib_sg[SIG].lkey);
2622err:
2623 return ret;
2624}
2625
2626static int 2074static int
2627isert_handle_prot_cmd(struct isert_conn *isert_conn, 2075isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
2628 struct isert_cmd *isert_cmd) 2076 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2629{ 2077{
2630 struct isert_device *device = isert_conn->device; 2078 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
2631 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; 2079 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
2080 u8 port_num = conn->cm_id->port_num;
2081 u64 addr;
2082 u32 rkey, offset;
2632 int ret; 2083 int ret;
2633 2084
2634 if (!isert_cmd->fr_desc->pi_ctx) { 2085 if (dir == DMA_FROM_DEVICE) {
2635 ret = isert_create_pi_ctx(isert_cmd->fr_desc, 2086 addr = cmd->write_va;
2636 device->ib_device, 2087 rkey = cmd->write_stag;
2637 device->pd); 2088 offset = cmd->iscsi_cmd->write_data_done;
2638 if (ret) {
2639 isert_err("conn %p failed to allocate pi_ctx\n",
2640 isert_conn);
2641 return ret;
2642 }
2643 }
2644
2645 if (se_cmd->t_prot_sg) {
2646 ret = isert_map_data_buf(isert_conn, isert_cmd,
2647 se_cmd->t_prot_sg,
2648 se_cmd->t_prot_nents,
2649 se_cmd->prot_length,
2650 0,
2651 isert_cmd->iser_ib_op,
2652 &isert_cmd->prot);
2653 if (ret) {
2654 isert_err("conn %p failed to map protection buffer\n",
2655 isert_conn);
2656 return ret;
2657 }
2658
2659 memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT]));
2660 ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc,
2661 &isert_cmd->prot,
2662 ISERT_PROT_KEY_VALID,
2663 &isert_cmd->ib_sg[PROT]);
2664 if (ret) {
2665 isert_err("conn %p failed to fast reg mr\n",
2666 isert_conn);
2667 goto unmap_prot_cmd;
2668 }
2669 }
2670
2671 ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc);
2672 if (ret) {
2673 isert_err("conn %p failed to fast reg mr\n",
2674 isert_conn);
2675 goto unmap_prot_cmd;
2676 }
2677 isert_cmd->fr_desc->ind |= ISERT_PROTECTED;
2678
2679 return 0;
2680
2681unmap_prot_cmd:
2682 if (se_cmd->t_prot_sg)
2683 isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
2684
2685 return ret;
2686}
2687
2688static int
2689isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
2690{
2691 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2692 struct se_cmd *se_cmd = &cmd->se_cmd;
2693 struct isert_conn *isert_conn = conn->context;
2694 struct fast_reg_descriptor *fr_desc = NULL;
2695 struct ib_rdma_wr *rdma_wr;
2696 struct ib_sge *ib_sg;
2697 u32 offset;
2698 int ret = 0;
2699 unsigned long flags;
2700
2701 offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
2702 cmd->write_data_done : 0;
2703 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2704 se_cmd->t_data_nents, se_cmd->data_length,
2705 offset, isert_cmd->iser_ib_op,
2706 &isert_cmd->data);
2707 if (ret)
2708 return ret;
2709
2710 if (isert_cmd->data.dma_nents != 1 ||
2711 isert_prot_cmd(isert_conn, se_cmd)) {
2712 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2713 fr_desc = list_first_entry(&isert_conn->fr_pool,
2714 struct fast_reg_descriptor, list);
2715 list_del(&fr_desc->list);
2716 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2717 isert_cmd->fr_desc = fr_desc;
2718 }
2719
2720 ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data,
2721 ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]);
2722 if (ret)
2723 goto unmap_cmd;
2724
2725 if (isert_prot_cmd(isert_conn, se_cmd)) {
2726 ret = isert_handle_prot_cmd(isert_conn, isert_cmd);
2727 if (ret)
2728 goto unmap_cmd;
2729
2730 ib_sg = &isert_cmd->ib_sg[SIG];
2731 } else { 2089 } else {
2732 ib_sg = &isert_cmd->ib_sg[DATA]; 2090 addr = cmd->read_va;
2091 rkey = cmd->read_stag;
2092 offset = 0;
2733 } 2093 }
2734 2094
2735 memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg)); 2095 if (isert_prot_cmd(conn, se_cmd)) {
2736 isert_cmd->ib_sge = &isert_cmd->s_ib_sge; 2096 struct ib_sig_attrs sig_attrs;
2737 isert_cmd->rdma_wr_num = 1;
2738 memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr));
2739 isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr;
2740 2097
2741 rdma_wr = &isert_cmd->s_rdma_wr; 2098 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2742 rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge; 2099 if (ret)
2743 rdma_wr->wr.num_sge = 1; 2100 return ret;
2744 rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
2745 if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
2746 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2747 2101
2748 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 2102 WARN_ON_ONCE(offset);
2749 rdma_wr->remote_addr = isert_cmd->read_va; 2103 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
2750 rdma_wr->rkey = isert_cmd->read_stag; 2104 se_cmd->t_data_sg, se_cmd->t_data_nents,
2751 rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? 2105 se_cmd->t_prot_sg, se_cmd->t_prot_nents,
2752 0 : IB_SEND_SIGNALED; 2106 &sig_attrs, addr, rkey, dir);
2753 } else { 2107 } else {
2754 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2108 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
2755 2109 se_cmd->t_data_sg, se_cmd->t_data_nents,
2756 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 2110 offset, addr, rkey, dir);
2757 rdma_wr->remote_addr = isert_cmd->write_va;
2758 rdma_wr->rkey = isert_cmd->write_stag;
2759 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2760 } 2111 }
2761 2112 if (ret < 0) {
2762 return 0; 2113 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
2763 2114 return ret;
2764unmap_cmd:
2765 if (fr_desc) {
2766 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2767 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
2768 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2769 } 2115 }
2770 isert_unmap_data_buf(isert_conn, &isert_cmd->data);
2771 2116
2117 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2118 if (ret < 0)
2119 isert_err("Cmd: %p failed to post RDMA res\n", cmd);
2772 return ret; 2120 return ret;
2773} 2121}
2774 2122
@@ -2778,21 +2126,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2778 struct se_cmd *se_cmd = &cmd->se_cmd; 2126 struct se_cmd *se_cmd = &cmd->se_cmd;
2779 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2127 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2780 struct isert_conn *isert_conn = conn->context; 2128 struct isert_conn *isert_conn = conn->context;
2781 struct isert_device *device = isert_conn->device; 2129 struct ib_cqe *cqe = NULL;
2782 struct ib_send_wr *wr_failed; 2130 struct ib_send_wr *chain_wr = NULL;
2783 int rc; 2131 int rc;
2784 2132
2785 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", 2133 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2786 isert_cmd, se_cmd->data_length); 2134 isert_cmd, se_cmd->data_length);
2787 2135
2788 isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE; 2136 if (isert_prot_cmd(isert_conn, se_cmd)) {
2789 rc = device->reg_rdma_mem(isert_cmd, conn); 2137 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2790 if (rc) { 2138 cqe = &isert_cmd->tx_desc.tx_cqe;
2791 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2139 } else {
2792 return rc;
2793 }
2794
2795 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2796 /* 2140 /*
2797 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2141 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2798 */ 2142 */
@@ -2803,56 +2147,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2803 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2147 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2804 isert_init_send_wr(isert_conn, isert_cmd, 2148 isert_init_send_wr(isert_conn, isert_cmd,
2805 &isert_cmd->tx_desc.send_wr); 2149 &isert_cmd->tx_desc.send_wr);
2806 isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
2807 isert_cmd->rdma_wr_num += 1;
2808 2150
2809 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2151 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2810 if (rc) { 2152 if (rc) {
2811 isert_err("ib_post_recv failed with %d\n", rc); 2153 isert_err("ib_post_recv failed with %d\n", rc);
2812 return rc; 2154 return rc;
2813 } 2155 }
2814 }
2815 2156
2816 rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed); 2157 chain_wr = &isert_cmd->tx_desc.send_wr;
2817 if (rc) 2158 }
2818 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2819
2820 if (!isert_prot_cmd(isert_conn, se_cmd))
2821 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2822 "READ\n", isert_cmd);
2823 else
2824 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2825 isert_cmd);
2826 2159
2160 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2161 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
2827 return 1; 2162 return 1;
2828} 2163}
2829 2164
2830static int 2165static int
2831isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2166isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2832{ 2167{
2833 struct se_cmd *se_cmd = &cmd->se_cmd;
2834 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2168 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2835 struct isert_conn *isert_conn = conn->context;
2836 struct isert_device *device = isert_conn->device;
2837 struct ib_send_wr *wr_failed;
2838 int rc;
2839 2169
2840 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2170 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2841 isert_cmd, se_cmd->data_length, cmd->write_data_done); 2171 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2842 isert_cmd->iser_ib_op = ISER_IB_RDMA_READ;
2843 rc = device->reg_rdma_mem(isert_cmd, conn);
2844 if (rc) {
2845 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2846 return rc;
2847 }
2848 2172
2849 rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed); 2173 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2850 if (rc) 2174 isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2851 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2175 &isert_cmd->tx_desc.tx_cqe, NULL);
2852 2176
2853 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2177 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2854 isert_cmd); 2178 isert_cmd);
2855
2856 return 0; 2179 return 0;
2857} 2180}
2858 2181
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 147900cbb578..e512ba941f2f 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -3,6 +3,7 @@
3#include <linux/in6.h> 3#include <linux/in6.h>
4#include <rdma/ib_verbs.h> 4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h> 5#include <rdma/rdma_cm.h>
6#include <rdma/rw.h>
6#include <scsi/iser.h> 7#include <scsi/iser.h>
7 8
8 9
@@ -53,10 +54,7 @@
53 54
54#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2) 55#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
55 56
56#define ISERT_INFLIGHT_DATAOUTS 8 57#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
57
58#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
59 (1 + ISERT_INFLIGHT_DATAOUTS) + \
60 ISERT_MAX_TX_MISC_PDUS + \ 58 ISERT_MAX_TX_MISC_PDUS + \
61 ISERT_MAX_RX_MISC_PDUS) 59 ISERT_MAX_RX_MISC_PDUS)
62 60
@@ -71,13 +69,6 @@ enum isert_desc_type {
71 ISCSI_TX_DATAIN 69 ISCSI_TX_DATAIN
72}; 70};
73 71
74enum iser_ib_op_code {
75 ISER_IB_RECV,
76 ISER_IB_SEND,
77 ISER_IB_RDMA_WRITE,
78 ISER_IB_RDMA_READ,
79};
80
81enum iser_conn_state { 72enum iser_conn_state {
82 ISER_CONN_INIT, 73 ISER_CONN_INIT,
83 ISER_CONN_UP, 74 ISER_CONN_UP,
@@ -118,42 +109,6 @@ static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
118 return container_of(cqe, struct iser_tx_desc, tx_cqe); 109 return container_of(cqe, struct iser_tx_desc, tx_cqe);
119} 110}
120 111
121
122enum isert_indicator {
123 ISERT_PROTECTED = 1 << 0,
124 ISERT_DATA_KEY_VALID = 1 << 1,
125 ISERT_PROT_KEY_VALID = 1 << 2,
126 ISERT_SIG_KEY_VALID = 1 << 3,
127};
128
129struct pi_context {
130 struct ib_mr *prot_mr;
131 struct ib_mr *sig_mr;
132};
133
134struct fast_reg_descriptor {
135 struct list_head list;
136 struct ib_mr *data_mr;
137 u8 ind;
138 struct pi_context *pi_ctx;
139};
140
141struct isert_data_buf {
142 struct scatterlist *sg;
143 int nents;
144 u32 sg_off;
145 u32 len; /* cur_rdma_length */
146 u32 offset;
147 unsigned int dma_nents;
148 enum dma_data_direction dma_dir;
149};
150
151enum {
152 DATA = 0,
153 PROT = 1,
154 SIG = 2,
155};
156
157struct isert_cmd { 112struct isert_cmd {
158 uint32_t read_stag; 113 uint32_t read_stag;
159 uint32_t write_stag; 114 uint32_t write_stag;
@@ -166,16 +121,7 @@ struct isert_cmd {
166 struct iscsi_cmd *iscsi_cmd; 121 struct iscsi_cmd *iscsi_cmd;
167 struct iser_tx_desc tx_desc; 122 struct iser_tx_desc tx_desc;
168 struct iser_rx_desc *rx_desc; 123 struct iser_rx_desc *rx_desc;
169 enum iser_ib_op_code iser_ib_op; 124 struct rdma_rw_ctx rw;
170 struct ib_sge *ib_sge;
171 struct ib_sge s_ib_sge;
172 int rdma_wr_num;
173 struct ib_rdma_wr *rdma_wr;
174 struct ib_rdma_wr s_rdma_wr;
175 struct ib_sge ib_sg[3];
176 struct isert_data_buf data;
177 struct isert_data_buf prot;
178 struct fast_reg_descriptor *fr_desc;
179 struct work_struct comp_work; 125 struct work_struct comp_work;
180 struct scatterlist sg; 126 struct scatterlist sg;
181}; 127};
@@ -210,10 +156,6 @@ struct isert_conn {
210 struct isert_device *device; 156 struct isert_device *device;
211 struct mutex mutex; 157 struct mutex mutex;
212 struct kref kref; 158 struct kref kref;
213 struct list_head fr_pool;
214 int fr_pool_size;
215 /* lock to protect fastreg pool */
216 spinlock_t pool_lock;
217 struct work_struct release_work; 159 struct work_struct release_work;
218 bool logout_posted; 160 bool logout_posted;
219 bool snd_w_inv; 161 bool snd_w_inv;
@@ -236,7 +178,6 @@ struct isert_comp {
236}; 178};
237 179
238struct isert_device { 180struct isert_device {
239 int use_fastreg;
240 bool pi_capable; 181 bool pi_capable;
241 int refcount; 182 int refcount;
242 struct ib_device *ib_device; 183 struct ib_device *ib_device;
@@ -244,10 +185,6 @@ struct isert_device {
244 struct isert_comp *comps; 185 struct isert_comp *comps;
245 int comps_used; 186 int comps_used;
246 struct list_head dev_node; 187 struct list_head dev_node;
247 int (*reg_rdma_mem)(struct isert_cmd *isert_cmd,
248 struct iscsi_conn *conn);
249 void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
250 struct isert_conn *isert_conn);
251}; 188};
252 189
253struct isert_np { 190struct isert_np {