aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c747
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h26
2 files changed, 545 insertions, 228 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3f62041222f2..3591855cc5b5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs 2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 * 3 *
4 * (c) Copyright 2013 RisingTide Systems LLC. 4 * (c) Copyright 2013 Datera, Inc.
5 * 5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org> 6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 * 7 *
@@ -39,7 +39,17 @@ static DEFINE_MUTEX(device_list_mutex);
39static LIST_HEAD(device_list); 39static LIST_HEAD(device_list);
40static struct workqueue_struct *isert_rx_wq; 40static struct workqueue_struct *isert_rx_wq;
41static struct workqueue_struct *isert_comp_wq; 41static struct workqueue_struct *isert_comp_wq;
42static struct kmem_cache *isert_cmd_cache; 42
43static void
44isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
45static int
46isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
47 struct isert_rdma_wr *wr);
48static void
49isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
50static int
51isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
52 struct isert_rdma_wr *wr);
43 53
44static void 54static void
45isert_qp_event_callback(struct ib_event *e, void *context) 55isert_qp_event_callback(struct ib_event *e, void *context)
@@ -80,14 +90,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
80{ 90{
81 struct isert_device *device = isert_conn->conn_device; 91 struct isert_device *device = isert_conn->conn_device;
82 struct ib_qp_init_attr attr; 92 struct ib_qp_init_attr attr;
83 struct ib_device_attr devattr;
84 int ret, index, min_index = 0; 93 int ret, index, min_index = 0;
85 94
86 memset(&devattr, 0, sizeof(struct ib_device_attr));
87 ret = isert_query_device(cma_id->device, &devattr);
88 if (ret)
89 return ret;
90
91 mutex_lock(&device_list_mutex); 95 mutex_lock(&device_list_mutex);
92 for (index = 0; index < device->cqs_used; index++) 96 for (index = 0; index < device->cqs_used; index++)
93 if (device->cq_active_qps[index] < 97 if (device->cq_active_qps[index] <
@@ -108,7 +112,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
108 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 112 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
109 * work-around for RDMA_READ.. 113 * work-around for RDMA_READ..
110 */ 114 */
111 attr.cap.max_send_sge = devattr.max_sge - 2; 115 attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
112 isert_conn->max_sge = attr.cap.max_send_sge; 116 isert_conn->max_sge = attr.cap.max_send_sge;
113 117
114 attr.cap.max_recv_sge = 1; 118 attr.cap.max_recv_sge = 1;
@@ -210,14 +214,31 @@ isert_create_device_ib_res(struct isert_device *device)
210{ 214{
211 struct ib_device *ib_dev = device->ib_device; 215 struct ib_device *ib_dev = device->ib_device;
212 struct isert_cq_desc *cq_desc; 216 struct isert_cq_desc *cq_desc;
217 struct ib_device_attr *dev_attr;
213 int ret = 0, i, j; 218 int ret = 0, i, j;
214 219
220 dev_attr = &device->dev_attr;
221 ret = isert_query_device(ib_dev, dev_attr);
222 if (ret)
223 return ret;
224
225 /* asign function handlers */
226 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
227 device->use_frwr = 1;
228 device->reg_rdma_mem = isert_reg_rdma_frwr;
229 device->unreg_rdma_mem = isert_unreg_rdma_frwr;
230 } else {
231 device->use_frwr = 0;
232 device->reg_rdma_mem = isert_map_rdma;
233 device->unreg_rdma_mem = isert_unmap_cmd;
234 }
235
215 device->cqs_used = min_t(int, num_online_cpus(), 236 device->cqs_used = min_t(int, num_online_cpus(),
216 device->ib_device->num_comp_vectors); 237 device->ib_device->num_comp_vectors);
217 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 238 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
218 pr_debug("Using %d CQs, device %s supports %d vectors\n", 239 pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
219 device->cqs_used, device->ib_device->name, 240 device->cqs_used, device->ib_device->name,
220 device->ib_device->num_comp_vectors); 241 device->ib_device->num_comp_vectors, device->use_frwr);
221 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 242 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
222 device->cqs_used, GFP_KERNEL); 243 device->cqs_used, GFP_KERNEL);
223 if (!device->cq_desc) { 244 if (!device->cq_desc) {
@@ -363,6 +384,85 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
363 return device; 384 return device;
364} 385}
365 386
387static void
388isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
389{
390 struct fast_reg_descriptor *fr_desc, *tmp;
391 int i = 0;
392
393 if (list_empty(&isert_conn->conn_frwr_pool))
394 return;
395
396 pr_debug("Freeing conn %p frwr pool", isert_conn);
397
398 list_for_each_entry_safe(fr_desc, tmp,
399 &isert_conn->conn_frwr_pool, list) {
400 list_del(&fr_desc->list);
401 ib_free_fast_reg_page_list(fr_desc->data_frpl);
402 ib_dereg_mr(fr_desc->data_mr);
403 kfree(fr_desc);
404 ++i;
405 }
406
407 if (i < isert_conn->conn_frwr_pool_size)
408 pr_warn("Pool still has %d regions registered\n",
409 isert_conn->conn_frwr_pool_size - i);
410}
411
412static int
413isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
414{
415 struct fast_reg_descriptor *fr_desc;
416 struct isert_device *device = isert_conn->conn_device;
417 int i, ret;
418
419 INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
420 isert_conn->conn_frwr_pool_size = 0;
421 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
422 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
423 if (!fr_desc) {
424 pr_err("Failed to allocate fast_reg descriptor\n");
425 ret = -ENOMEM;
426 goto err;
427 }
428
429 fr_desc->data_frpl =
430 ib_alloc_fast_reg_page_list(device->ib_device,
431 ISCSI_ISER_SG_TABLESIZE);
432 if (IS_ERR(fr_desc->data_frpl)) {
433 pr_err("Failed to allocate fr_pg_list err=%ld\n",
434 PTR_ERR(fr_desc->data_frpl));
435 ret = PTR_ERR(fr_desc->data_frpl);
436 goto err;
437 }
438
439 fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
440 ISCSI_ISER_SG_TABLESIZE);
441 if (IS_ERR(fr_desc->data_mr)) {
442 pr_err("Failed to allocate frmr err=%ld\n",
443 PTR_ERR(fr_desc->data_mr));
444 ret = PTR_ERR(fr_desc->data_mr);
445 ib_free_fast_reg_page_list(fr_desc->data_frpl);
446 goto err;
447 }
448 pr_debug("Create fr_desc %p page_list %p\n",
449 fr_desc, fr_desc->data_frpl->page_list);
450
451 fr_desc->valid = true;
452 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
453 isert_conn->conn_frwr_pool_size++;
454 }
455
456 pr_debug("Creating conn %p frwr pool size=%d",
457 isert_conn, isert_conn->conn_frwr_pool_size);
458
459 return 0;
460
461err:
462 isert_conn_free_frwr_pool(isert_conn);
463 return ret;
464}
465
366static int 466static int
367isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 467isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
368{ 468{
@@ -389,6 +489,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
389 kref_init(&isert_conn->conn_kref); 489 kref_init(&isert_conn->conn_kref);
390 kref_get(&isert_conn->conn_kref); 490 kref_get(&isert_conn->conn_kref);
391 mutex_init(&isert_conn->conn_mutex); 491 mutex_init(&isert_conn->conn_mutex);
492 spin_lock_init(&isert_conn->conn_lock);
392 493
393 cma_id->context = isert_conn; 494 cma_id->context = isert_conn;
394 isert_conn->conn_cm_id = cma_id; 495 isert_conn->conn_cm_id = cma_id;
@@ -446,6 +547,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
446 isert_conn->conn_pd = device->dev_pd; 547 isert_conn->conn_pd = device->dev_pd;
447 isert_conn->conn_mr = device->dev_mr; 548 isert_conn->conn_mr = device->dev_mr;
448 549
550 if (device->use_frwr) {
551 ret = isert_conn_create_frwr_pool(isert_conn);
552 if (ret) {
553 pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
554 goto out_frwr;
555 }
556 }
557
449 ret = isert_conn_setup_qp(isert_conn, cma_id); 558 ret = isert_conn_setup_qp(isert_conn, cma_id);
450 if (ret) 559 if (ret)
451 goto out_conn_dev; 560 goto out_conn_dev;
@@ -459,6 +568,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
459 return 0; 568 return 0;
460 569
461out_conn_dev: 570out_conn_dev:
571 if (device->use_frwr)
572 isert_conn_free_frwr_pool(isert_conn);
573out_frwr:
462 isert_device_try_release(device); 574 isert_device_try_release(device);
463out_rsp_dma_map: 575out_rsp_dma_map:
464 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 576 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@ -482,6 +594,9 @@ isert_connect_release(struct isert_conn *isert_conn)
482 594
483 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
484 596
597 if (device->use_frwr)
598 isert_conn_free_frwr_pool(isert_conn);
599
485 if (isert_conn->conn_qp) { 600 if (isert_conn->conn_qp) {
486 cq_index = ((struct isert_cq_desc *) 601 cq_index = ((struct isert_cq_desc *)
487 isert_conn->conn_qp->recv_cq->cq_context)->cq_index; 602 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
@@ -869,46 +984,37 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
869 size, rx_buflen, MAX_KEY_VALUE_PAIRS); 984 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
870 memcpy(login->req_buf, &rx_desc->data[0], size); 985 memcpy(login->req_buf, &rx_desc->data[0], size);
871 986
872 complete(&isert_conn->conn_login_comp); 987 if (login->first_request) {
873} 988 complete(&isert_conn->conn_login_comp);
874 989 return;
875static void 990 }
876isert_release_cmd(struct iscsi_cmd *cmd) 991 schedule_delayed_work(&conn->login_work, 0);
877{
878 struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
879 iscsi_cmd);
880
881 pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
882
883 kfree(cmd->buf_ptr);
884 kfree(cmd->tmr_req);
885
886 kmem_cache_free(isert_cmd_cache, isert_cmd);
887} 992}
888 993
889static struct iscsi_cmd 994static struct iscsi_cmd
890*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp) 995*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
891{ 996{
892 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 997 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
893 struct isert_cmd *isert_cmd; 998 struct isert_cmd *isert_cmd;
999 struct iscsi_cmd *cmd;
894 1000
895 isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp); 1001 cmd = iscsit_allocate_cmd(conn, gfp);
896 if (!isert_cmd) { 1002 if (!cmd) {
897 pr_err("Unable to allocate isert_cmd\n"); 1003 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
898 return NULL; 1004 return NULL;
899 } 1005 }
1006 isert_cmd = iscsit_priv_cmd(cmd);
900 isert_cmd->conn = isert_conn; 1007 isert_cmd->conn = isert_conn;
901 isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd; 1008 isert_cmd->iscsi_cmd = cmd;
902 1009
903 return &isert_cmd->iscsi_cmd; 1010 return cmd;
904} 1011}
905 1012
906static int 1013static int
907isert_handle_scsi_cmd(struct isert_conn *isert_conn, 1014isert_handle_scsi_cmd(struct isert_conn *isert_conn,
908 struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc, 1015 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
909 unsigned char *buf) 1016 struct iser_rx_desc *rx_desc, unsigned char *buf)
910{ 1017{
911 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
912 struct iscsi_conn *conn = isert_conn->conn; 1018 struct iscsi_conn *conn = isert_conn->conn;
913 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1019 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
914 struct scatterlist *sg; 1020 struct scatterlist *sg;
@@ -1015,9 +1121,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1015 1121
1016static int 1122static int
1017isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1123isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1018 struct iser_rx_desc *rx_desc, unsigned char *buf) 1124 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1125 unsigned char *buf)
1019{ 1126{
1020 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1021 struct iscsi_conn *conn = isert_conn->conn; 1127 struct iscsi_conn *conn = isert_conn->conn;
1022 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1128 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1023 int rc; 1129 int rc;
@@ -1034,9 +1140,9 @@ isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1034 1140
1035static int 1141static int
1036isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1142isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1037 struct iser_rx_desc *rx_desc, struct iscsi_text *hdr) 1143 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1144 struct iscsi_text *hdr)
1038{ 1145{
1039 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1040 struct iscsi_conn *conn = isert_conn->conn; 1146 struct iscsi_conn *conn = isert_conn->conn;
1041 u32 payload_length = ntoh24(hdr->dlength); 1147 u32 payload_length = ntoh24(hdr->dlength);
1042 int rc; 1148 int rc;
@@ -1081,26 +1187,26 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1081 1187
1082 switch (opcode) { 1188 switch (opcode) {
1083 case ISCSI_OP_SCSI_CMD: 1189 case ISCSI_OP_SCSI_CMD:
1084 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1190 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1085 if (!cmd) 1191 if (!cmd)
1086 break; 1192 break;
1087 1193
1088 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1194 isert_cmd = iscsit_priv_cmd(cmd);
1089 isert_cmd->read_stag = read_stag; 1195 isert_cmd->read_stag = read_stag;
1090 isert_cmd->read_va = read_va; 1196 isert_cmd->read_va = read_va;
1091 isert_cmd->write_stag = write_stag; 1197 isert_cmd->write_stag = write_stag;
1092 isert_cmd->write_va = write_va; 1198 isert_cmd->write_va = write_va;
1093 1199
1094 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, 1200 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1095 rx_desc, (unsigned char *)hdr); 1201 rx_desc, (unsigned char *)hdr);
1096 break; 1202 break;
1097 case ISCSI_OP_NOOP_OUT: 1203 case ISCSI_OP_NOOP_OUT:
1098 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1204 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1099 if (!cmd) 1205 if (!cmd)
1100 break; 1206 break;
1101 1207
1102 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1208 isert_cmd = iscsit_priv_cmd(cmd);
1103 ret = isert_handle_nop_out(isert_conn, isert_cmd, 1209 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1104 rx_desc, (unsigned char *)hdr); 1210 rx_desc, (unsigned char *)hdr);
1105 break; 1211 break;
1106 case ISCSI_OP_SCSI_DATA_OUT: 1212 case ISCSI_OP_SCSI_DATA_OUT:
@@ -1108,7 +1214,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1108 (unsigned char *)hdr); 1214 (unsigned char *)hdr);
1109 break; 1215 break;
1110 case ISCSI_OP_SCSI_TMFUNC: 1216 case ISCSI_OP_SCSI_TMFUNC:
1111 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1217 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1112 if (!cmd) 1218 if (!cmd)
1113 break; 1219 break;
1114 1220
@@ -1116,7 +1222,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1116 (unsigned char *)hdr); 1222 (unsigned char *)hdr);
1117 break; 1223 break;
1118 case ISCSI_OP_LOGOUT: 1224 case ISCSI_OP_LOGOUT:
1119 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1225 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1120 if (!cmd) 1226 if (!cmd)
1121 break; 1227 break;
1122 1228
@@ -1127,12 +1233,12 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1127 HZ); 1233 HZ);
1128 break; 1234 break;
1129 case ISCSI_OP_TEXT: 1235 case ISCSI_OP_TEXT:
1130 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1236 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1131 if (!cmd) 1237 if (!cmd)
1132 break; 1238 break;
1133 1239
1134 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); 1240 isert_cmd = iscsit_priv_cmd(cmd);
1135 ret = isert_handle_text_cmd(isert_conn, isert_cmd, 1241 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1136 rx_desc, (struct iscsi_text *)hdr); 1242 rx_desc, (struct iscsi_text *)hdr);
1137 break; 1243 break;
1138 default: 1244 default:
@@ -1243,26 +1349,65 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1243 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1349 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1244 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1350 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1245 1351
1246 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n"); 1352 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1353 if (wr->sge) {
1354 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1355 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1356 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1357 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1358 wr->sge = NULL;
1359 }
1360
1361 if (wr->send_wr) {
1362 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1363 kfree(wr->send_wr);
1364 wr->send_wr = NULL;
1365 }
1366
1367 if (wr->ib_sge) {
1368 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1369 kfree(wr->ib_sge);
1370 wr->ib_sge = NULL;
1371 }
1372}
1373
1374static void
1375isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1376{
1377 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1378 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1379 LIST_HEAD(unmap_list);
1380
1381 pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
1382
1383 if (wr->fr_desc) {
1384 pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
1385 isert_cmd, wr->fr_desc);
1386 spin_lock_bh(&isert_conn->conn_lock);
1387 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
1388 spin_unlock_bh(&isert_conn->conn_lock);
1389 wr->fr_desc = NULL;
1390 }
1247 1391
1248 if (wr->sge) { 1392 if (wr->sge) {
1249 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); 1393 pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
1394 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1395 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1396 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1250 wr->sge = NULL; 1397 wr->sge = NULL;
1251 } 1398 }
1252 1399
1253 kfree(wr->send_wr); 1400 wr->ib_sge = NULL;
1254 wr->send_wr = NULL; 1401 wr->send_wr = NULL;
1255
1256 kfree(isert_cmd->ib_sge);
1257 isert_cmd->ib_sge = NULL;
1258} 1402}
1259 1403
1260static void 1404static void
1261isert_put_cmd(struct isert_cmd *isert_cmd) 1405isert_put_cmd(struct isert_cmd *isert_cmd)
1262{ 1406{
1263 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1407 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1264 struct isert_conn *isert_conn = isert_cmd->conn; 1408 struct isert_conn *isert_conn = isert_cmd->conn;
1265 struct iscsi_conn *conn = isert_conn->conn; 1409 struct iscsi_conn *conn = isert_conn->conn;
1410 struct isert_device *device = isert_conn->conn_device;
1266 1411
1267 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); 1412 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1268 1413
@@ -1276,7 +1421,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1276 if (cmd->data_direction == DMA_TO_DEVICE) 1421 if (cmd->data_direction == DMA_TO_DEVICE)
1277 iscsit_stop_dataout_timer(cmd); 1422 iscsit_stop_dataout_timer(cmd);
1278 1423
1279 isert_unmap_cmd(isert_cmd, isert_conn); 1424 device->unreg_rdma_mem(isert_cmd, isert_conn);
1280 transport_generic_free_cmd(&cmd->se_cmd, 0); 1425 transport_generic_free_cmd(&cmd->se_cmd, 0);
1281 break; 1426 break;
1282 case ISCSI_OP_SCSI_TMFUNC: 1427 case ISCSI_OP_SCSI_TMFUNC:
@@ -1311,7 +1456,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1311 * Fall-through 1456 * Fall-through
1312 */ 1457 */
1313 default: 1458 default:
1314 isert_release_cmd(cmd); 1459 iscsit_release_cmd(cmd);
1315 break; 1460 break;
1316 } 1461 }
1317} 1462}
@@ -1347,27 +1492,16 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1347 struct isert_cmd *isert_cmd) 1492 struct isert_cmd *isert_cmd)
1348{ 1493{
1349 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1494 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1350 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1495 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1351 struct se_cmd *se_cmd = &cmd->se_cmd; 1496 struct se_cmd *se_cmd = &cmd->se_cmd;
1352 struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device; 1497 struct isert_conn *isert_conn = isert_cmd->conn;
1498 struct isert_device *device = isert_conn->conn_device;
1353 1499
1354 iscsit_stop_dataout_timer(cmd); 1500 iscsit_stop_dataout_timer(cmd);
1501 device->unreg_rdma_mem(isert_cmd, isert_conn);
1502 cmd->write_data_done = wr->cur_rdma_length;
1355 1503
1356 if (wr->sge) { 1504 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1357 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1358 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1359 wr->sge = NULL;
1360 }
1361
1362 if (isert_cmd->ib_sge) {
1363 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1364 kfree(isert_cmd->ib_sge);
1365 isert_cmd->ib_sge = NULL;
1366 }
1367
1368 cmd->write_data_done = se_cmd->data_length;
1369
1370 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1371 spin_lock_bh(&cmd->istate_lock); 1505 spin_lock_bh(&cmd->istate_lock);
1372 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1506 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1373 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1507 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1383,7 +1517,7 @@ isert_do_control_comp(struct work_struct *work)
1383 struct isert_cmd, comp_work); 1517 struct isert_cmd, comp_work);
1384 struct isert_conn *isert_conn = isert_cmd->conn; 1518 struct isert_conn *isert_conn = isert_cmd->conn;
1385 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1519 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1386 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1520 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1387 1521
1388 switch (cmd->i_state) { 1522 switch (cmd->i_state) {
1389 case ISTATE_SEND_TASKMGTRSP: 1523 case ISTATE_SEND_TASKMGTRSP:
@@ -1429,7 +1563,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1429 struct isert_conn *isert_conn, 1563 struct isert_conn *isert_conn,
1430 struct ib_device *ib_dev) 1564 struct ib_device *ib_dev)
1431{ 1565{
1432 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1566 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1433 1567
1434 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1568 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1435 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1569 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1621,8 +1755,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1621static int 1755static int
1622isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1756isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1623{ 1757{
1624 struct isert_cmd *isert_cmd = container_of(cmd, 1758 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1625 struct isert_cmd, iscsi_cmd);
1626 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1759 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1627 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1760 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1628 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) 1761 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
@@ -1671,8 +1804,7 @@ static int
1671isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 1804isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1672 bool nopout_response) 1805 bool nopout_response)
1673{ 1806{
1674 struct isert_cmd *isert_cmd = container_of(cmd, 1807 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1675 struct isert_cmd, iscsi_cmd);
1676 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1808 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1677 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1809 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1678 1810
@@ -1691,8 +1823,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1691static int 1823static int
1692isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1824isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1693{ 1825{
1694 struct isert_cmd *isert_cmd = container_of(cmd, 1826 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1695 struct isert_cmd, iscsi_cmd);
1696 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1827 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1697 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1828 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1698 1829
@@ -1710,8 +1841,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1710static int 1841static int
1711isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1842isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1712{ 1843{
1713 struct isert_cmd *isert_cmd = container_of(cmd, 1844 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1714 struct isert_cmd, iscsi_cmd);
1715 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1845 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1716 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1846 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1717 1847
@@ -1729,8 +1859,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1729static int 1859static int
1730isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1860isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1731{ 1861{
1732 struct isert_cmd *isert_cmd = container_of(cmd, 1862 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1733 struct isert_cmd, iscsi_cmd);
1734 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1863 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1735 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1864 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1736 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1865 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
@@ -1762,8 +1891,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1762static int 1891static int
1763isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 1892isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1764{ 1893{
1765 struct isert_cmd *isert_cmd = container_of(cmd, 1894 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1766 struct isert_cmd, iscsi_cmd);
1767 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1895 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1768 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; 1896 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1769 struct iscsi_text_rsp *hdr = 1897 struct iscsi_text_rsp *hdr =
@@ -1805,7 +1933,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1805 struct ib_sge *ib_sge, struct ib_send_wr *send_wr, 1933 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1806 u32 data_left, u32 offset) 1934 u32 data_left, u32 offset)
1807{ 1935{
1808 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; 1936 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1809 struct scatterlist *sg_start, *tmp_sg; 1937 struct scatterlist *sg_start, *tmp_sg;
1810 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1938 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1811 u32 sg_off, page_off; 1939 u32 sg_off, page_off;
@@ -1832,8 +1960,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1832 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 1960 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1833 ib_sge->lkey = isert_conn->conn_mr->lkey; 1961 ib_sge->lkey = isert_conn->conn_mr->lkey;
1834 1962
1835 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n", 1963 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
1836 ib_sge->addr, ib_sge->length); 1964 ib_sge->addr, ib_sge->length, ib_sge->lkey);
1837 page_off = 0; 1965 page_off = 0;
1838 data_left -= ib_sge->length; 1966 data_left -= ib_sge->length;
1839 ib_sge++; 1967 ib_sge++;
@@ -1847,200 +1975,373 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1847} 1975}
1848 1976
1849static int 1977static int
1850isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1978isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1979 struct isert_rdma_wr *wr)
1851{ 1980{
1852 struct se_cmd *se_cmd = &cmd->se_cmd; 1981 struct se_cmd *se_cmd = &cmd->se_cmd;
1853 struct isert_cmd *isert_cmd = container_of(cmd, 1982 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1854 struct isert_cmd, iscsi_cmd);
1855 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1856 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1983 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1857 struct ib_send_wr *wr_failed, *send_wr;
1858 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1984 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1985 struct ib_send_wr *send_wr;
1859 struct ib_sge *ib_sge; 1986 struct ib_sge *ib_sge;
1860 struct scatterlist *sg; 1987 struct scatterlist *sg_start;
1861 u32 offset = 0, data_len, data_left, rdma_write_max; 1988 u32 sg_off = 0, sg_nents;
1862 int rc, ret = 0, count, sg_nents, i, ib_sge_cnt; 1989 u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
1863 1990 int ret = 0, count, i, ib_sge_cnt;
1864 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length); 1991
1992 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1993 data_left = se_cmd->data_length;
1994 iscsit_increment_maxcmdsn(cmd, conn->sess);
1995 cmd->stat_sn = conn->stat_sn++;
1996 } else {
1997 sg_off = cmd->write_data_done / PAGE_SIZE;
1998 data_left = se_cmd->data_length - cmd->write_data_done;
1999 offset = cmd->write_data_done;
2000 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2001 }
1865 2002
1866 sg = &se_cmd->t_data_sg[0]; 2003 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1867 sg_nents = se_cmd->t_data_nents; 2004 sg_nents = se_cmd->t_data_nents - sg_off;
1868 2005
1869 count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 2006 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2007 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2008 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1870 if (unlikely(!count)) { 2009 if (unlikely(!count)) {
1871 pr_err("Unable to map put_datain SGs\n"); 2010 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
1872 return -EINVAL; 2011 return -EINVAL;
1873 } 2012 }
1874 wr->sge = sg; 2013 wr->sge = sg_start;
1875 wr->num_sge = sg_nents; 2014 wr->num_sge = sg_nents;
1876 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n", 2015 wr->cur_rdma_length = data_left;
1877 count, sg, sg_nents); 2016 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2017 isert_cmd, count, sg_start, sg_nents, data_left);
1878 2018
1879 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2019 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1880 if (!ib_sge) { 2020 if (!ib_sge) {
1881 pr_warn("Unable to allocate datain ib_sge\n"); 2021 pr_warn("Unable to allocate ib_sge\n");
1882 ret = -ENOMEM; 2022 ret = -ENOMEM;
1883 goto unmap_sg; 2023 goto unmap_sg;
1884 } 2024 }
1885 isert_cmd->ib_sge = ib_sge; 2025 wr->ib_sge = ib_sge;
1886
1887 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1888 ib_sge, se_cmd->t_data_nents);
1889 2026
1890 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2027 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1891 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2028 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1892 GFP_KERNEL); 2029 GFP_KERNEL);
1893 if (!wr->send_wr) { 2030 if (!wr->send_wr) {
1894 pr_err("Unable to allocate wr->send_wr\n"); 2031 pr_debug("Unable to allocate wr->send_wr\n");
1895 ret = -ENOMEM; 2032 ret = -ENOMEM;
1896 goto unmap_sg; 2033 goto unmap_sg;
1897 } 2034 }
1898 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1899 wr->send_wr, wr->send_wr_num);
1900
1901 iscsit_increment_maxcmdsn(cmd, conn->sess);
1902 cmd->stat_sn = conn->stat_sn++;
1903 2035
1904 wr->isert_cmd = isert_cmd; 2036 wr->isert_cmd = isert_cmd;
1905 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2037 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1906 data_left = se_cmd->data_length;
1907 2038
1908 for (i = 0; i < wr->send_wr_num; i++) { 2039 for (i = 0; i < wr->send_wr_num; i++) {
1909 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2040 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1910 data_len = min(data_left, rdma_write_max); 2041 data_len = min(data_left, rdma_write_max);
1911 2042
1912 send_wr->opcode = IB_WR_RDMA_WRITE;
1913 send_wr->send_flags = 0; 2043 send_wr->send_flags = 0;
1914 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 2044 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1915 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2045 send_wr->opcode = IB_WR_RDMA_WRITE;
2046 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2047 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2048 if (i + 1 == wr->send_wr_num)
2049 send_wr->next = &isert_cmd->tx_desc.send_wr;
2050 else
2051 send_wr->next = &wr->send_wr[i + 1];
2052 } else {
2053 send_wr->opcode = IB_WR_RDMA_READ;
2054 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2055 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2056 if (i + 1 == wr->send_wr_num)
2057 send_wr->send_flags = IB_SEND_SIGNALED;
2058 else
2059 send_wr->next = &wr->send_wr[i + 1];
2060 }
1916 2061
1917 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2062 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1918 send_wr, data_len, offset); 2063 send_wr, data_len, offset);
1919 ib_sge += ib_sge_cnt; 2064 ib_sge += ib_sge_cnt;
1920 2065
1921 if (i + 1 == wr->send_wr_num)
1922 send_wr->next = &isert_cmd->tx_desc.send_wr;
1923 else
1924 send_wr->next = &wr->send_wr[i + 1];
1925
1926 offset += data_len; 2066 offset += data_len;
2067 va_offset += data_len;
1927 data_left -= data_len; 2068 data_left -= data_len;
1928 } 2069 }
1929 /*
1930 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1931 */
1932 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1933 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1934 &isert_cmd->tx_desc.iscsi_header);
1935 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1936 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1937 2070
1938 atomic_inc(&isert_conn->post_send_buf_count); 2071 return 0;
2072unmap_sg:
2073 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2074 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2075 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2076 return ret;
2077}
1939 2078
1940 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2079static int
1941 if (rc) { 2080isert_map_fr_pagelist(struct ib_device *ib_dev,
1942 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2081 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
1943 atomic_dec(&isert_conn->post_send_buf_count); 2082{
2083 u64 start_addr, end_addr, page, chunk_start = 0;
2084 struct scatterlist *tmp_sg;
2085 int i = 0, new_chunk, last_ent, n_pages;
2086
2087 n_pages = 0;
2088 new_chunk = 1;
2089 last_ent = sg_nents - 1;
2090 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2091 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2092 if (new_chunk)
2093 chunk_start = start_addr;
2094 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2095
2096 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2097 i, (unsigned long long)tmp_sg->dma_address,
2098 tmp_sg->length);
2099
2100 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2101 new_chunk = 0;
2102 continue;
2103 }
2104 new_chunk = 1;
2105
2106 page = chunk_start & PAGE_MASK;
2107 do {
2108 fr_pl[n_pages++] = page;
2109 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2110 n_pages - 1, page);
2111 page += PAGE_SIZE;
2112 } while (page < end_addr);
1944 } 2113 }
1945 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1946 return 1;
1947 2114
1948unmap_sg: 2115 return n_pages;
1949 ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 2116}
2117
2118static int
2119isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2120 struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
2121 struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
2122{
2123 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2124 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2125 struct scatterlist *sg_start;
2126 u32 sg_off, page_off;
2127 struct ib_send_wr fr_wr, inv_wr;
2128 struct ib_send_wr *bad_wr, *wr = NULL;
2129 u8 key;
2130 int ret, sg_nents, pagelist_len;
2131
2132 sg_off = offset / PAGE_SIZE;
2133 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2134 sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
2135 ISCSI_ISER_SG_TABLESIZE);
2136 page_off = offset % PAGE_SIZE;
2137
2138 pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
2139 isert_cmd, fr_desc, sg_nents, sg_off, offset);
2140
2141 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2142 &fr_desc->data_frpl->page_list[0]);
2143
2144 if (!fr_desc->valid) {
2145 memset(&inv_wr, 0, sizeof(inv_wr));
2146 inv_wr.opcode = IB_WR_LOCAL_INV;
2147 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2148 wr = &inv_wr;
2149 /* Bump the key */
2150 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2151 ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2152 }
2153
2154 /* Prepare FASTREG WR */
2155 memset(&fr_wr, 0, sizeof(fr_wr));
2156 fr_wr.opcode = IB_WR_FAST_REG_MR;
2157 fr_wr.wr.fast_reg.iova_start =
2158 fr_desc->data_frpl->page_list[0] + page_off;
2159 fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2160 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2161 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2162 fr_wr.wr.fast_reg.length = data_len;
2163 fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2164 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2165
2166 if (!wr)
2167 wr = &fr_wr;
2168 else
2169 wr->next = &fr_wr;
2170
2171 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2172 if (ret) {
2173 pr_err("fast registration failed, ret:%d\n", ret);
2174 return ret;
2175 }
2176 fr_desc->valid = false;
2177
2178 ib_sge->lkey = fr_desc->data_mr->lkey;
2179 ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2180 ib_sge->length = data_len;
2181
2182 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2183 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2184
1950 return ret; 2185 return ret;
1951} 2186}
1952 2187
1953static int 2188static int
1954isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2189isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2190 struct isert_rdma_wr *wr)
1955{ 2191{
1956 struct se_cmd *se_cmd = &cmd->se_cmd; 2192 struct se_cmd *se_cmd = &cmd->se_cmd;
1957 struct isert_cmd *isert_cmd = container_of(cmd, 2193 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1958 struct isert_cmd, iscsi_cmd);
1959 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1960 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2194 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1961 struct ib_send_wr *wr_failed, *send_wr;
1962 struct ib_sge *ib_sge;
1963 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2195 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2196 struct ib_send_wr *send_wr;
2197 struct ib_sge *ib_sge;
1964 struct scatterlist *sg_start; 2198 struct scatterlist *sg_start;
1965 u32 sg_off, sg_nents, page_off, va_offset = 0; 2199 struct fast_reg_descriptor *fr_desc;
2200 u32 sg_off = 0, sg_nents;
1966 u32 offset = 0, data_len, data_left, rdma_write_max; 2201 u32 offset = 0, data_len, data_left, rdma_write_max;
1967 int rc, ret = 0, count, i, ib_sge_cnt; 2202 int ret = 0, count;
2203 unsigned long flags;
1968 2204
1969 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n", 2205 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1970 se_cmd->data_length, cmd->write_data_done); 2206 data_left = se_cmd->data_length;
2207 iscsit_increment_maxcmdsn(cmd, conn->sess);
2208 cmd->stat_sn = conn->stat_sn++;
2209 } else {
2210 sg_off = cmd->write_data_done / PAGE_SIZE;
2211 data_left = se_cmd->data_length - cmd->write_data_done;
2212 offset = cmd->write_data_done;
2213 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2214 }
1971 2215
1972 sg_off = cmd->write_data_done / PAGE_SIZE;
1973 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2216 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1974 page_off = cmd->write_data_done % PAGE_SIZE;
1975
1976 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1977 sg_off, sg_start, page_off);
1978
1979 data_left = se_cmd->data_length - cmd->write_data_done;
1980 sg_nents = se_cmd->t_data_nents - sg_off; 2217 sg_nents = se_cmd->t_data_nents - sg_off;
1981 2218
1982 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n", 2219 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
1983 data_left, sg_nents); 2220 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1984 2221 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1985 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1986 if (unlikely(!count)) { 2222 if (unlikely(!count)) {
1987 pr_err("Unable to map get_dataout SGs\n"); 2223 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
1988 return -EINVAL; 2224 return -EINVAL;
1989 } 2225 }
1990 wr->sge = sg_start; 2226 wr->sge = sg_start;
1991 wr->num_sge = sg_nents; 2227 wr->num_sge = sg_nents;
1992 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n", 2228 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1993 count, sg_start, sg_nents); 2229 isert_cmd, count, sg_start, sg_nents, data_left);
1994 2230
1995 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2231 memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
1996 if (!ib_sge) { 2232 ib_sge = &wr->s_ib_sge;
1997 pr_warn("Unable to allocate dataout ib_sge\n"); 2233 wr->ib_sge = ib_sge;
1998 ret = -ENOMEM; 2234
1999 goto unmap_sg; 2235 wr->send_wr_num = 1;
2236 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2237 wr->send_wr = &wr->s_send_wr;
2238
2239 wr->isert_cmd = isert_cmd;
2240 rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2241
2242 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2243 send_wr->sg_list = ib_sge;
2244 send_wr->num_sge = 1;
2245 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2246 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2247 send_wr->opcode = IB_WR_RDMA_WRITE;
2248 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2249 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2250 send_wr->send_flags = 0;
2251 send_wr->next = &isert_cmd->tx_desc.send_wr;
2252 } else {
2253 send_wr->opcode = IB_WR_RDMA_READ;
2254 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2255 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2256 send_wr->send_flags = IB_SEND_SIGNALED;
2000 } 2257 }
2001 isert_cmd->ib_sge = ib_sge;
2002 2258
2003 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n", 2259 data_len = min(data_left, rdma_write_max);
2004 ib_sge, sg_nents); 2260 wr->cur_rdma_length = data_len;
2005 2261
2006 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2262 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2007 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2263 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
2008 GFP_KERNEL); 2264 struct fast_reg_descriptor, list);
2009 if (!wr->send_wr) { 2265 list_del(&fr_desc->list);
2010 pr_debug("Unable to allocate wr->send_wr\n"); 2266 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2011 ret = -ENOMEM; 2267 wr->fr_desc = fr_desc;
2268
2269 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
2270 ib_sge, offset, data_len);
2271 if (ret) {
2272 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
2012 goto unmap_sg; 2273 goto unmap_sg;
2013 } 2274 }
2014 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
2015 wr->send_wr, wr->send_wr_num);
2016 2275
2017 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2276 return 0;
2018 2277
2019 wr->iser_ib_op = ISER_IB_RDMA_READ; 2278unmap_sg:
2020 wr->isert_cmd = isert_cmd; 2279 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2021 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2280 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2022 offset = cmd->write_data_done; 2281 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2282 return ret;
2283}
2023 2284
2024 for (i = 0; i < wr->send_wr_num; i++) { 2285static int
2025 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2286isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2026 data_len = min(data_left, rdma_write_max); 2287{
2288 struct se_cmd *se_cmd = &cmd->se_cmd;
2289 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2290 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2291 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2292 struct isert_device *device = isert_conn->conn_device;
2293 struct ib_send_wr *wr_failed;
2294 int rc;
2027 2295
2028 send_wr->opcode = IB_WR_RDMA_READ; 2296 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2029 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; 2297 isert_cmd, se_cmd->data_length);
2030 send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2298 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2299 rc = device->reg_rdma_mem(conn, cmd, wr);
2300 if (rc) {
2301 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2302 return rc;
2303 }
2031 2304
2032 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2305 /*
2033 send_wr, data_len, offset); 2306 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2034 ib_sge += ib_sge_cnt; 2307 */
2308 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2309 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
2310 &isert_cmd->tx_desc.iscsi_header);
2311 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2312 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
2035 2313
2036 if (i + 1 == wr->send_wr_num) 2314 atomic_inc(&isert_conn->post_send_buf_count);
2037 send_wr->send_flags = IB_SEND_SIGNALED;
2038 else
2039 send_wr->next = &wr->send_wr[i + 1];
2040 2315
2041 offset += data_len; 2316 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2042 va_offset += data_len; 2317 if (rc) {
2043 data_left -= data_len; 2318 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2319 atomic_dec(&isert_conn->post_send_buf_count);
2320 }
2321 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2322 isert_cmd);
2323
2324 return 1;
2325}
2326
2327static int
2328isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2329{
2330 struct se_cmd *se_cmd = &cmd->se_cmd;
2331 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2332 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2333 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2334 struct isert_device *device = isert_conn->conn_device;
2335 struct ib_send_wr *wr_failed;
2336 int rc;
2337
2338 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2339 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2340 wr->iser_ib_op = ISER_IB_RDMA_READ;
2341 rc = device->reg_rdma_mem(conn, cmd, wr);
2342 if (rc) {
2343 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2344 return rc;
2044 } 2345 }
2045 2346
2046 atomic_inc(&isert_conn->post_send_buf_count); 2347 atomic_inc(&isert_conn->post_send_buf_count);
@@ -2050,12 +2351,10 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2050 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2351 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2051 atomic_dec(&isert_conn->post_send_buf_count); 2352 atomic_dec(&isert_conn->post_send_buf_count);
2052 } 2353 }
2053 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n"); 2354 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2054 return 0; 2355 isert_cmd);
2055 2356
2056unmap_sg: 2357 return 0;
2057 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
2058 return ret;
2059} 2358}
2060 2359
2061static int 2360static int
@@ -2224,6 +2523,14 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2224 int ret; 2523 int ret;
2225 2524
2226 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); 2525 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2526 /*
2527 * For login requests after the first PDU, isert_rx_login_req() will
2528 * kick schedule_delayed_work(&conn->login_work) as the packet is
2529 * received, which turns this callback from iscsi_target_do_login_rx()
2530 * into a NOP.
2531 */
2532 if (!login->first_request)
2533 return 0;
2227 2534
2228 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); 2535 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2229 if (ret) 2536 if (ret)
@@ -2393,12 +2700,12 @@ static void isert_free_conn(struct iscsi_conn *conn)
2393static struct iscsit_transport iser_target_transport = { 2700static struct iscsit_transport iser_target_transport = {
2394 .name = "IB/iSER", 2701 .name = "IB/iSER",
2395 .transport_type = ISCSI_INFINIBAND, 2702 .transport_type = ISCSI_INFINIBAND,
2703 .priv_size = sizeof(struct isert_cmd),
2396 .owner = THIS_MODULE, 2704 .owner = THIS_MODULE,
2397 .iscsit_setup_np = isert_setup_np, 2705 .iscsit_setup_np = isert_setup_np,
2398 .iscsit_accept_np = isert_accept_np, 2706 .iscsit_accept_np = isert_accept_np,
2399 .iscsit_free_np = isert_free_np, 2707 .iscsit_free_np = isert_free_np,
2400 .iscsit_free_conn = isert_free_conn, 2708 .iscsit_free_conn = isert_free_conn,
2401 .iscsit_alloc_cmd = isert_alloc_cmd,
2402 .iscsit_get_login_rx = isert_get_login_rx, 2709 .iscsit_get_login_rx = isert_get_login_rx,
2403 .iscsit_put_login_tx = isert_put_login_tx, 2710 .iscsit_put_login_tx = isert_put_login_tx,
2404 .iscsit_immediate_queue = isert_immediate_queue, 2711 .iscsit_immediate_queue = isert_immediate_queue,
@@ -2425,21 +2732,10 @@ static int __init isert_init(void)
2425 goto destroy_rx_wq; 2732 goto destroy_rx_wq;
2426 } 2733 }
2427 2734
2428 isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
2429 sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
2430 0, NULL);
2431 if (!isert_cmd_cache) {
2432 pr_err("Unable to create isert_cmd_cache\n");
2433 ret = -ENOMEM;
2434 goto destroy_tx_cq;
2435 }
2436
2437 iscsit_register_transport(&iser_target_transport); 2735 iscsit_register_transport(&iser_target_transport);
2438 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); 2736 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2439 return 0; 2737 return 0;
2440 2738
2441destroy_tx_cq:
2442 destroy_workqueue(isert_comp_wq);
2443destroy_rx_wq: 2739destroy_rx_wq:
2444 destroy_workqueue(isert_rx_wq); 2740 destroy_workqueue(isert_rx_wq);
2445 return ret; 2741 return ret;
@@ -2447,7 +2743,6 @@ destroy_rx_wq:
2447 2743
2448static void __exit isert_exit(void) 2744static void __exit isert_exit(void)
2449{ 2745{
2450 kmem_cache_destroy(isert_cmd_cache);
2451 destroy_workqueue(isert_comp_wq); 2746 destroy_workqueue(isert_comp_wq);
2452 destroy_workqueue(isert_rx_wq); 2747 destroy_workqueue(isert_rx_wq);
2453 iscsit_unregister_transport(&iser_target_transport); 2748 iscsit_unregister_transport(&iser_target_transport);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 191117b5b508..631f2090f0b8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -5,6 +5,7 @@
5#include <rdma/rdma_cm.h> 5#include <rdma/rdma_cm.h>
6 6
7#define ISERT_RDMA_LISTEN_BACKLOG 10 7#define ISERT_RDMA_LISTEN_BACKLOG 10
8#define ISCSI_ISER_SG_TABLESIZE 256
8 9
9enum isert_desc_type { 10enum isert_desc_type {
10 ISCSI_TX_CONTROL, 11 ISCSI_TX_CONTROL,
@@ -45,15 +46,26 @@ struct iser_tx_desc {
45 struct ib_send_wr send_wr; 46 struct ib_send_wr send_wr;
46} __packed; 47} __packed;
47 48
49struct fast_reg_descriptor {
50 struct list_head list;
51 struct ib_mr *data_mr;
52 struct ib_fast_reg_page_list *data_frpl;
53 bool valid;
54};
55
48struct isert_rdma_wr { 56struct isert_rdma_wr {
49 struct list_head wr_list; 57 struct list_head wr_list;
50 struct isert_cmd *isert_cmd; 58 struct isert_cmd *isert_cmd;
51 enum iser_ib_op_code iser_ib_op; 59 enum iser_ib_op_code iser_ib_op;
52 struct ib_sge *ib_sge; 60 struct ib_sge *ib_sge;
61 struct ib_sge s_ib_sge;
53 int num_sge; 62 int num_sge;
54 struct scatterlist *sge; 63 struct scatterlist *sge;
55 int send_wr_num; 64 int send_wr_num;
56 struct ib_send_wr *send_wr; 65 struct ib_send_wr *send_wr;
66 struct ib_send_wr s_send_wr;
67 u32 cur_rdma_length;
68 struct fast_reg_descriptor *fr_desc;
57}; 69};
58 70
59struct isert_cmd { 71struct isert_cmd {
@@ -67,8 +79,7 @@ struct isert_cmd {
67 u32 write_va_off; 79 u32 write_va_off;
68 u32 rdma_wr_num; 80 u32 rdma_wr_num;
69 struct isert_conn *conn; 81 struct isert_conn *conn;
70 struct iscsi_cmd iscsi_cmd; 82 struct iscsi_cmd *iscsi_cmd;
71 struct ib_sge *ib_sge;
72 struct iser_tx_desc tx_desc; 83 struct iser_tx_desc tx_desc;
73 struct isert_rdma_wr rdma_wr; 84 struct isert_rdma_wr rdma_wr;
74 struct work_struct comp_work; 85 struct work_struct comp_work;
@@ -106,6 +117,10 @@ struct isert_conn {
106 wait_queue_head_t conn_wait; 117 wait_queue_head_t conn_wait;
107 wait_queue_head_t conn_wait_comp_err; 118 wait_queue_head_t conn_wait_comp_err;
108 struct kref conn_kref; 119 struct kref conn_kref;
120 struct list_head conn_frwr_pool;
121 int conn_frwr_pool_size;
122 /* lock to protect frwr_pool */
123 spinlock_t conn_lock;
109}; 124};
110 125
111#define ISERT_MAX_CQ 64 126#define ISERT_MAX_CQ 64
@@ -118,6 +133,7 @@ struct isert_cq_desc {
118}; 133};
119 134
120struct isert_device { 135struct isert_device {
136 int use_frwr;
121 int cqs_used; 137 int cqs_used;
122 int refcount; 138 int refcount;
123 int cq_active_qps[ISERT_MAX_CQ]; 139 int cq_active_qps[ISERT_MAX_CQ];
@@ -128,6 +144,12 @@ struct isert_device {
128 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 144 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
129 struct isert_cq_desc *cq_desc; 145 struct isert_cq_desc *cq_desc;
130 struct list_head dev_node; 146 struct list_head dev_node;
147 struct ib_device_attr dev_attr;
148 int (*reg_rdma_mem)(struct iscsi_conn *conn,
149 struct iscsi_cmd *cmd,
150 struct isert_rdma_wr *wr);
151 void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
152 struct isert_conn *isert_conn);
131}; 153};
132 154
133struct isert_np { 155struct isert_np {