aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2014-02-19 10:50:25 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2014-04-07 04:48:47 -0400
commit9e961ae73c2ce81387e9b375231d4aefe5ffa13e (patch)
tree334920c6098f208f72393135e5b8054a3ffc4750
parentf93f3a70da9175b4641f93d466d779675eb83fa2 (diff)
IB/isert: Support T10-PI protected transactions
In case the Target core passed transport T10 protection operation: 1. Register data buffer (data memory region) 2. Register protection buffer if exsists (prot memory region) 3. Register signature region (signature memory region) - use work request IB_WR_REG_SIG_MR 4. Execute RDMA 5. Upon RDMA completion check the signature status - if succeeded send good SCSI response - if failed send SCSI bad response with appropriate sense buffer (Fix up compile error in isert_reg_sig_mr, and fix up incorrect se_cmd->prot_type -> TARGET_PROT_NORMAL comparision - nab) (Fix failed sector assignment in isert_completion_rdma_* - Sagi + nab) (Fix enum assignements for protection type - Sagi) (Fix devision on 32-bit in isert_completion_rdma_* - Sagi + Fengguang) (Fix context change for v3.14-rc6 code - nab) (Fix iscsit_build_rsp_pdu inc_statsn flag usage - nab) Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c339
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
2 files changed, 311 insertions, 29 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 2eb07b943ed9..f82fe3dccabe 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1552,6 +1552,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1552 if (wr->fr_desc) { 1552 if (wr->fr_desc) {
1553 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", 1553 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1554 isert_cmd, wr->fr_desc); 1554 isert_cmd, wr->fr_desc);
1555 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1556 isert_unmap_data_buf(isert_conn, &wr->prot);
1557 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1558 }
1555 spin_lock_bh(&isert_conn->conn_lock); 1559 spin_lock_bh(&isert_conn->conn_lock);
1556 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); 1560 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1557 spin_unlock_bh(&isert_conn->conn_lock); 1561 spin_unlock_bh(&isert_conn->conn_lock);
@@ -1657,12 +1661,55 @@ static void
1657isert_completion_rdma_write(struct iser_tx_desc *tx_desc, 1661isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1658 struct isert_cmd *isert_cmd) 1662 struct isert_cmd *isert_cmd)
1659{ 1663{
1664 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1660 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1665 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1666 struct se_cmd *se_cmd = &cmd->se_cmd;
1661 struct isert_conn *isert_conn = isert_cmd->conn; 1667 struct isert_conn *isert_conn = isert_cmd->conn;
1662 struct isert_device *device = isert_conn->conn_device; 1668 struct isert_device *device = isert_conn->conn_device;
1669 struct ib_mr_status mr_status;
1670 int ret = 0;
1671
1672 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1673 ret = ib_check_mr_status(wr->fr_desc->pi_ctx->sig_mr,
1674 IB_MR_CHECK_SIG_STATUS, &mr_status);
1675 if (ret) {
1676 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1677 goto fail_mr_status;
1678 }
1679 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1680 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1681
1682 switch (mr_status.sig_err.err_type) {
1683 case IB_SIG_BAD_GUARD:
1684 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1685 break;
1686 case IB_SIG_BAD_REFTAG:
1687 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1688 break;
1689 case IB_SIG_BAD_APPTAG:
1690 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1691 break;
1692 }
1693 se_cmd->bad_sector = mr_status.sig_err.sig_err_offset;
1694 do_div(se_cmd->bad_sector, block_size);
1695
1696 pr_err("isert: PI error found type %d at sector 0x%llx "
1697 "expected 0x%x vs actual 0x%x\n",
1698 mr_status.sig_err.err_type,
1699 (unsigned long long)se_cmd->bad_sector,
1700 mr_status.sig_err.expected,
1701 mr_status.sig_err.actual);
1702 ret = 1;
1703 }
1704 }
1663 1705
1706fail_mr_status:
1664 device->unreg_rdma_mem(isert_cmd, isert_conn); 1707 device->unreg_rdma_mem(isert_cmd, isert_conn);
1665 isert_put_response(isert_conn->conn, cmd); 1708 if (ret)
1709 transport_send_check_condition_and_sense(se_cmd,
1710 se_cmd->pi_err, 0);
1711 else
1712 isert_put_response(isert_conn->conn, cmd);
1666} 1713}
1667 1714
1668static void 1715static void
@@ -1674,7 +1721,43 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1674 struct se_cmd *se_cmd = &cmd->se_cmd; 1721 struct se_cmd *se_cmd = &cmd->se_cmd;
1675 struct isert_conn *isert_conn = isert_cmd->conn; 1722 struct isert_conn *isert_conn = isert_cmd->conn;
1676 struct isert_device *device = isert_conn->conn_device; 1723 struct isert_device *device = isert_conn->conn_device;
1724 struct ib_mr_status mr_status;
1725 int ret;
1677 1726
1727 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1728 ret = ib_check_mr_status(wr->fr_desc->pi_ctx->sig_mr,
1729 IB_MR_CHECK_SIG_STATUS, &mr_status);
1730 if (ret) {
1731 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1732 goto fail_mr_status;
1733 }
1734 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1735 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1736
1737 switch (mr_status.sig_err.err_type) {
1738 case IB_SIG_BAD_GUARD:
1739 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1740 break;
1741 case IB_SIG_BAD_REFTAG:
1742 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1743 break;
1744 case IB_SIG_BAD_APPTAG:
1745 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1746 break;
1747 }
1748 se_cmd->bad_sector = mr_status.sig_err.sig_err_offset;
1749 do_div(se_cmd->bad_sector, block_size);
1750
1751 pr_err("isert: PI error found type %d at sector 0x%llx "
1752 "expected 0x%x vs actual 0x%x\n",
1753 mr_status.sig_err.err_type,
1754 (unsigned long long)se_cmd->bad_sector,
1755 mr_status.sig_err.expected,
1756 mr_status.sig_err.actual);
1757 }
1758 }
1759
1760fail_mr_status:
1678 iscsit_stop_dataout_timer(cmd); 1761 iscsit_stop_dataout_timer(cmd);
1679 device->unreg_rdma_mem(isert_cmd, isert_conn); 1762 device->unreg_rdma_mem(isert_cmd, isert_conn);
1680 cmd->write_data_done = wr->data.len; 1763 cmd->write_data_done = wr->data.len;
@@ -2349,9 +2432,12 @@ static int
2349isert_fast_reg_mr(struct isert_conn *isert_conn, 2432isert_fast_reg_mr(struct isert_conn *isert_conn,
2350 struct fast_reg_descriptor *fr_desc, 2433 struct fast_reg_descriptor *fr_desc,
2351 struct isert_data_buf *mem, 2434 struct isert_data_buf *mem,
2435 enum isert_indicator ind,
2352 struct ib_sge *sge) 2436 struct ib_sge *sge)
2353{ 2437{
2354 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2438 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2439 struct ib_mr *mr;
2440 struct ib_fast_reg_page_list *frpl;
2355 struct ib_send_wr fr_wr, inv_wr; 2441 struct ib_send_wr fr_wr, inv_wr;
2356 struct ib_send_wr *bad_wr, *wr = NULL; 2442 struct ib_send_wr *bad_wr, *wr = NULL;
2357 int ret, pagelist_len; 2443 int ret, pagelist_len;
@@ -2362,39 +2448,51 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2362 sge->lkey = isert_conn->conn_mr->lkey; 2448 sge->lkey = isert_conn->conn_mr->lkey;
2363 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); 2449 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2364 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); 2450 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2451 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2452 __func__, __LINE__, sge->addr, sge->length,
2453 sge->lkey);
2365 return 0; 2454 return 0;
2366 } 2455 }
2367 2456
2457 if (ind == ISERT_DATA_KEY_VALID) {
2458 /* Registering data buffer */
2459 mr = fr_desc->data_mr;
2460 frpl = fr_desc->data_frpl;
2461 } else {
2462 /* Registering protection buffer */
2463 mr = fr_desc->pi_ctx->prot_mr;
2464 frpl = fr_desc->pi_ctx->prot_frpl;
2465 }
2466
2368 page_off = mem->offset % PAGE_SIZE; 2467 page_off = mem->offset % PAGE_SIZE;
2369 2468
2370 pr_debug("Use fr_desc %p sg_nents %d offset %u\n", 2469 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2371 fr_desc, mem->nents, mem->offset); 2470 fr_desc, mem->nents, mem->offset);
2372 2471
2373 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, 2472 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2374 &fr_desc->data_frpl->page_list[0]); 2473 &frpl->page_list[0]);
2375 2474
2376 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { 2475 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
2377 memset(&inv_wr, 0, sizeof(inv_wr)); 2476 memset(&inv_wr, 0, sizeof(inv_wr));
2378 inv_wr.wr_id = ISER_FASTREG_LI_WRID; 2477 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2379 inv_wr.opcode = IB_WR_LOCAL_INV; 2478 inv_wr.opcode = IB_WR_LOCAL_INV;
2380 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2479 inv_wr.ex.invalidate_rkey = mr->rkey;
2381 wr = &inv_wr; 2480 wr = &inv_wr;
2382 /* Bump the key */ 2481 /* Bump the key */
2383 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF); 2482 key = (u8)(mr->rkey & 0x000000FF);
2384 ib_update_fast_reg_key(fr_desc->data_mr, ++key); 2483 ib_update_fast_reg_key(mr, ++key);
2385 } 2484 }
2386 2485
2387 /* Prepare FASTREG WR */ 2486 /* Prepare FASTREG WR */
2388 memset(&fr_wr, 0, sizeof(fr_wr)); 2487 memset(&fr_wr, 0, sizeof(fr_wr));
2389 fr_wr.wr_id = ISER_FASTREG_LI_WRID; 2488 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2390 fr_wr.opcode = IB_WR_FAST_REG_MR; 2489 fr_wr.opcode = IB_WR_FAST_REG_MR;
2391 fr_wr.wr.fast_reg.iova_start = 2490 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2392 fr_desc->data_frpl->page_list[0] + page_off; 2491 fr_wr.wr.fast_reg.page_list = frpl;
2393 fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2394 fr_wr.wr.fast_reg.page_list_len = pagelist_len; 2492 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2395 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 2493 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2396 fr_wr.wr.fast_reg.length = mem->len; 2494 fr_wr.wr.fast_reg.length = mem->len;
2397 fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; 2495 fr_wr.wr.fast_reg.rkey = mr->rkey;
2398 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; 2496 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2399 2497
2400 if (!wr) 2498 if (!wr)
@@ -2407,18 +2505,158 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2407 pr_err("fast registration failed, ret:%d\n", ret); 2505 pr_err("fast registration failed, ret:%d\n", ret);
2408 return ret; 2506 return ret;
2409 } 2507 }
2410 fr_desc->ind &= ~ISERT_DATA_KEY_VALID; 2508 fr_desc->ind &= ~ind;
2411 2509
2412 sge->lkey = fr_desc->data_mr->lkey; 2510 sge->lkey = mr->lkey;
2413 sge->addr = fr_desc->data_frpl->page_list[0] + page_off; 2511 sge->addr = frpl->page_list[0] + page_off;
2414 sge->length = mem->len; 2512 sge->length = mem->len;
2415 2513
2416 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 2514 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2417 sge->addr, sge->length, sge->lkey); 2515 __func__, __LINE__, sge->addr, sge->length,
2516 sge->lkey);
2418 2517
2419 return ret; 2518 return ret;
2420} 2519}
2421 2520
2521static inline enum ib_t10_dif_type
2522se2ib_prot_type(enum target_prot_type prot_type)
2523{
2524 switch (prot_type) {
2525 case TARGET_DIF_TYPE0_PROT:
2526 return IB_T10DIF_NONE;
2527 case TARGET_DIF_TYPE1_PROT:
2528 return IB_T10DIF_TYPE1;
2529 case TARGET_DIF_TYPE2_PROT:
2530 return IB_T10DIF_TYPE2;
2531 case TARGET_DIF_TYPE3_PROT:
2532 return IB_T10DIF_TYPE3;
2533 default:
2534 return IB_T10DIF_NONE;
2535 }
2536}
2537
2538static int
2539isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2540{
2541 enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
2542
2543 sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
2544 sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
2545 sig_attrs->mem.sig.dif.pi_interval =
2546 se_cmd->se_dev->dev_attrib.block_size;
2547 sig_attrs->wire.sig.dif.pi_interval =
2548 se_cmd->se_dev->dev_attrib.block_size;
2549
2550 switch (se_cmd->prot_op) {
2551 case TARGET_PROT_DIN_INSERT:
2552 case TARGET_PROT_DOUT_STRIP:
2553 sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
2554 sig_attrs->wire.sig.dif.type = ib_prot_type;
2555 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
2556 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
2557 break;
2558 case TARGET_PROT_DOUT_INSERT:
2559 case TARGET_PROT_DIN_STRIP:
2560 sig_attrs->mem.sig.dif.type = ib_prot_type;
2561 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
2562 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
2563 sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
2564 break;
2565 case TARGET_PROT_DIN_PASS:
2566 case TARGET_PROT_DOUT_PASS:
2567 sig_attrs->mem.sig.dif.type = ib_prot_type;
2568 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
2569 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
2570 sig_attrs->wire.sig.dif.type = ib_prot_type;
2571 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
2572 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
2573 break;
2574 default:
2575 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2576 return -EINVAL;
2577 }
2578
2579 return 0;
2580}
2581
2582static inline u8
2583isert_set_prot_checks(u8 prot_checks)
2584{
2585 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2586 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2587 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2588}
2589
2590static int
2591isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2592 struct fast_reg_descriptor *fr_desc,
2593 struct ib_sge *data_sge, struct ib_sge *prot_sge,
2594 struct ib_sge *sig_sge)
2595{
2596 struct ib_send_wr sig_wr, inv_wr;
2597 struct ib_send_wr *bad_wr, *wr = NULL;
2598 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2599 struct ib_sig_attrs sig_attrs;
2600 int ret;
2601 u32 key;
2602
2603 memset(&sig_attrs, 0, sizeof(sig_attrs));
2604 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2605 if (ret)
2606 goto err;
2607
2608 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2609
2610 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2611 memset(&inv_wr, 0, sizeof(inv_wr));
2612 inv_wr.opcode = IB_WR_LOCAL_INV;
2613 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2614 wr = &inv_wr;
2615 /* Bump the key */
2616 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2617 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2618 }
2619
2620 memset(&sig_wr, 0, sizeof(sig_wr));
2621 sig_wr.opcode = IB_WR_REG_SIG_MR;
2622 sig_wr.sg_list = data_sge;
2623 sig_wr.num_sge = 1;
2624 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2625 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2626 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2627 if (se_cmd->t_prot_sg)
2628 sig_wr.wr.sig_handover.prot = prot_sge;
2629
2630 if (!wr)
2631 wr = &sig_wr;
2632 else
2633 wr->next = &sig_wr;
2634
2635 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2636 if (ret) {
2637 pr_err("fast registration failed, ret:%d\n", ret);
2638 goto err;
2639 }
2640 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2641
2642 sig_sge->lkey = pi_ctx->sig_mr->lkey;
2643 sig_sge->addr = 0;
2644 sig_sge->length = se_cmd->data_length;
2645 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2646 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2647 /*
2648 * We have protection guards on the wire
2649 * so we need to set a larget transfer
2650 */
2651 sig_sge->length += se_cmd->prot_length;
2652
2653 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2654 sig_sge->addr, sig_sge->length,
2655 sig_sge->lkey);
2656err:
2657 return ret;
2658}
2659
2422static int 2660static int
2423isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2661isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2424 struct isert_rdma_wr *wr) 2662 struct isert_rdma_wr *wr)
@@ -2426,6 +2664,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2426 struct se_cmd *se_cmd = &cmd->se_cmd; 2664 struct se_cmd *se_cmd = &cmd->se_cmd;
2427 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2665 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2428 struct isert_conn *isert_conn = conn->context; 2666 struct isert_conn *isert_conn = conn->context;
2667 struct ib_sge data_sge;
2429 struct ib_send_wr *send_wr; 2668 struct ib_send_wr *send_wr;
2430 struct fast_reg_descriptor *fr_desc = NULL; 2669 struct fast_reg_descriptor *fr_desc = NULL;
2431 u32 offset; 2670 u32 offset;
@@ -2441,7 +2680,8 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2441 if (ret) 2680 if (ret)
2442 return ret; 2681 return ret;
2443 2682
2444 if (wr->data.dma_nents != 1) { 2683 if (wr->data.dma_nents != 1 ||
2684 se_cmd->prot_op != TARGET_PROT_NORMAL) {
2445 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2685 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2446 fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2686 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2447 struct fast_reg_descriptor, list); 2687 struct fast_reg_descriptor, list);
@@ -2450,10 +2690,39 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2450 wr->fr_desc = fr_desc; 2690 wr->fr_desc = fr_desc;
2451 } 2691 }
2452 2692
2453 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, &wr->s_ib_sge); 2693 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2694 ISERT_DATA_KEY_VALID, &data_sge);
2454 if (ret) 2695 if (ret)
2455 goto unmap_cmd; 2696 goto unmap_cmd;
2456 2697
2698 if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
2699 struct ib_sge prot_sge, sig_sge;
2700
2701 if (se_cmd->t_prot_sg) {
2702 ret = isert_map_data_buf(isert_conn, isert_cmd,
2703 se_cmd->t_prot_sg,
2704 se_cmd->t_prot_nents,
2705 se_cmd->prot_length,
2706 0, wr->iser_ib_op, &wr->prot);
2707 if (ret)
2708 goto unmap_cmd;
2709
2710 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2711 ISERT_PROT_KEY_VALID, &prot_sge);
2712 if (ret)
2713 goto unmap_prot_cmd;
2714 }
2715
2716 ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2717 &data_sge, &prot_sge, &sig_sge);
2718 if (ret)
2719 goto unmap_prot_cmd;
2720
2721 fr_desc->ind |= ISERT_PROTECTED;
2722 memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
2723 } else
2724 memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
2725
2457 wr->ib_sge = &wr->s_ib_sge; 2726 wr->ib_sge = &wr->s_ib_sge;
2458 wr->send_wr_num = 1; 2727 wr->send_wr_num = 1;
2459 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2728 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
@@ -2468,8 +2737,8 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2468 send_wr->opcode = IB_WR_RDMA_WRITE; 2737 send_wr->opcode = IB_WR_RDMA_WRITE;
2469 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2738 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2470 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2739 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2471 send_wr->send_flags = 0; 2740 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2472 send_wr->next = &isert_cmd->tx_desc.send_wr; 2741 0 : IB_SEND_SIGNALED;
2473 } else { 2742 } else {
2474 send_wr->opcode = IB_WR_RDMA_READ; 2743 send_wr->opcode = IB_WR_RDMA_READ;
2475 send_wr->wr.rdma.remote_addr = isert_cmd->write_va; 2744 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
@@ -2478,6 +2747,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2478 } 2747 }
2479 2748
2480 return 0; 2749 return 0;
2750unmap_prot_cmd:
2751 if (se_cmd->t_prot_sg)
2752 isert_unmap_data_buf(isert_conn, &wr->prot);
2481unmap_cmd: 2753unmap_cmd:
2482 if (fr_desc) { 2754 if (fr_desc) {
2483 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2755 spin_lock_irqsave(&isert_conn->conn_lock, flags);
@@ -2509,15 +2781,19 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2509 return rc; 2781 return rc;
2510 } 2782 }
2511 2783
2512 /* 2784 if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
2513 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2785 /*
2514 */ 2786 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2515 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2787 */
2516 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2788 isert_create_send_desc(isert_conn, isert_cmd,
2517 &isert_cmd->tx_desc.iscsi_header); 2789 &isert_cmd->tx_desc);
2518 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2790 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2519 isert_init_send_wr(isert_conn, isert_cmd, 2791 &isert_cmd->tx_desc.iscsi_header);
2520 &isert_cmd->tx_desc.send_wr, true); 2792 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2793 isert_init_send_wr(isert_conn, isert_cmd,
2794 &isert_cmd->tx_desc.send_wr, true);
2795 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2796 }
2521 2797
2522 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2798 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2523 2799
@@ -2526,8 +2802,13 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2526 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2802 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2527 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2803 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2528 } 2804 }
2529 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2805
2530 isert_cmd); 2806 if (se_cmd->prot_op == TARGET_PROT_NORMAL)
2807 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2808 "READ\n", isert_cmd);
2809 else
2810 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2811 isert_cmd);
2531 2812
2532 return 1; 2813 return 1;
2533} 2814}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index a75b75fbc9d1..4c072ae34c01 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -91,6 +91,7 @@ struct isert_rdma_wr {
91 struct ib_send_wr *send_wr; 91 struct ib_send_wr *send_wr;
92 struct ib_send_wr s_send_wr; 92 struct ib_send_wr s_send_wr;
93 struct isert_data_buf data; 93 struct isert_data_buf data;
94 struct isert_data_buf prot;
94 struct fast_reg_descriptor *fr_desc; 95 struct fast_reg_descriptor *fr_desc;
95}; 96};
96 97