diff options
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 367 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 17 |
2 files changed, 371 insertions, 13 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 4c3d66009237..51c3bed6c12b 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -45,6 +45,11 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); | |||
45 | static int | 45 | static int |
46 | isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | 46 | isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, |
47 | struct isert_rdma_wr *wr); | 47 | struct isert_rdma_wr *wr); |
48 | static void | ||
49 | isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); | ||
50 | static int | ||
51 | isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
52 | struct isert_rdma_wr *wr); | ||
48 | 53 | ||
49 | static void | 54 | static void |
50 | isert_qp_event_callback(struct ib_event *e, void *context) | 55 | isert_qp_event_callback(struct ib_event *e, void *context) |
@@ -85,14 +90,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | |||
85 | { | 90 | { |
86 | struct isert_device *device = isert_conn->conn_device; | 91 | struct isert_device *device = isert_conn->conn_device; |
87 | struct ib_qp_init_attr attr; | 92 | struct ib_qp_init_attr attr; |
88 | struct ib_device_attr devattr; | ||
89 | int ret, index, min_index = 0; | 93 | int ret, index, min_index = 0; |
90 | 94 | ||
91 | memset(&devattr, 0, sizeof(struct ib_device_attr)); | ||
92 | ret = isert_query_device(cma_id->device, &devattr); | ||
93 | if (ret) | ||
94 | return ret; | ||
95 | |||
96 | mutex_lock(&device_list_mutex); | 95 | mutex_lock(&device_list_mutex); |
97 | for (index = 0; index < device->cqs_used; index++) | 96 | for (index = 0; index < device->cqs_used; index++) |
98 | if (device->cq_active_qps[index] < | 97 | if (device->cq_active_qps[index] < |
@@ -113,7 +112,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | |||
113 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as | 112 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as |
114 | * work-around for RDMA_READ.. | 113 | * work-around for RDMA_READ.. |
115 | */ | 114 | */ |
116 | attr.cap.max_send_sge = devattr.max_sge - 2; | 115 | attr.cap.max_send_sge = device->dev_attr.max_sge - 2; |
117 | isert_conn->max_sge = attr.cap.max_send_sge; | 116 | isert_conn->max_sge = attr.cap.max_send_sge; |
118 | 117 | ||
119 | attr.cap.max_recv_sge = 1; | 118 | attr.cap.max_recv_sge = 1; |
@@ -215,18 +214,31 @@ isert_create_device_ib_res(struct isert_device *device) | |||
215 | { | 214 | { |
216 | struct ib_device *ib_dev = device->ib_device; | 215 | struct ib_device *ib_dev = device->ib_device; |
217 | struct isert_cq_desc *cq_desc; | 216 | struct isert_cq_desc *cq_desc; |
217 | struct ib_device_attr *dev_attr; | ||
218 | int ret = 0, i, j; | 218 | int ret = 0, i, j; |
219 | 219 | ||
220 | dev_attr = &device->dev_attr; | ||
221 | ret = isert_query_device(ib_dev, dev_attr); | ||
222 | if (ret) | ||
223 | return ret; | ||
224 | |||
220 | /* asign function handlers */ | 225 | /* asign function handlers */ |
221 | device->reg_rdma_mem = isert_map_rdma; | 226 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { |
222 | device->unreg_rdma_mem = isert_unmap_cmd; | 227 | device->use_frwr = 1; |
228 | device->reg_rdma_mem = isert_reg_rdma_frwr; | ||
229 | device->unreg_rdma_mem = isert_unreg_rdma_frwr; | ||
230 | } else { | ||
231 | device->use_frwr = 0; | ||
232 | device->reg_rdma_mem = isert_map_rdma; | ||
233 | device->unreg_rdma_mem = isert_unmap_cmd; | ||
234 | } | ||
223 | 235 | ||
224 | device->cqs_used = min_t(int, num_online_cpus(), | 236 | device->cqs_used = min_t(int, num_online_cpus(), |
225 | device->ib_device->num_comp_vectors); | 237 | device->ib_device->num_comp_vectors); |
226 | device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); | 238 | device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); |
227 | pr_debug("Using %d CQs, device %s supports %d vectors\n", | 239 | pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n", |
228 | device->cqs_used, device->ib_device->name, | 240 | device->cqs_used, device->ib_device->name, |
229 | device->ib_device->num_comp_vectors); | 241 | device->ib_device->num_comp_vectors, device->use_frwr); |
230 | device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * | 242 | device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * |
231 | device->cqs_used, GFP_KERNEL); | 243 | device->cqs_used, GFP_KERNEL); |
232 | if (!device->cq_desc) { | 244 | if (!device->cq_desc) { |
@@ -372,6 +384,85 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) | |||
372 | return device; | 384 | return device; |
373 | } | 385 | } |
374 | 386 | ||
387 | static void | ||
388 | isert_conn_free_frwr_pool(struct isert_conn *isert_conn) | ||
389 | { | ||
390 | struct fast_reg_descriptor *fr_desc, *tmp; | ||
391 | int i = 0; | ||
392 | |||
393 | if (list_empty(&isert_conn->conn_frwr_pool)) | ||
394 | return; | ||
395 | |||
396 | pr_debug("Freeing conn %p frwr pool", isert_conn); | ||
397 | |||
398 | list_for_each_entry_safe(fr_desc, tmp, | ||
399 | &isert_conn->conn_frwr_pool, list) { | ||
400 | list_del(&fr_desc->list); | ||
401 | ib_free_fast_reg_page_list(fr_desc->data_frpl); | ||
402 | ib_dereg_mr(fr_desc->data_mr); | ||
403 | kfree(fr_desc); | ||
404 | ++i; | ||
405 | } | ||
406 | |||
407 | if (i < isert_conn->conn_frwr_pool_size) | ||
408 | pr_warn("Pool still has %d regions registered\n", | ||
409 | isert_conn->conn_frwr_pool_size - i); | ||
410 | } | ||
411 | |||
412 | static int | ||
413 | isert_conn_create_frwr_pool(struct isert_conn *isert_conn) | ||
414 | { | ||
415 | struct fast_reg_descriptor *fr_desc; | ||
416 | struct isert_device *device = isert_conn->conn_device; | ||
417 | int i, ret; | ||
418 | |||
419 | INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); | ||
420 | isert_conn->conn_frwr_pool_size = 0; | ||
421 | for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { | ||
422 | fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); | ||
423 | if (!fr_desc) { | ||
424 | pr_err("Failed to allocate fast_reg descriptor\n"); | ||
425 | ret = -ENOMEM; | ||
426 | goto err; | ||
427 | } | ||
428 | |||
429 | fr_desc->data_frpl = | ||
430 | ib_alloc_fast_reg_page_list(device->ib_device, | ||
431 | ISCSI_ISER_SG_TABLESIZE); | ||
432 | if (IS_ERR(fr_desc->data_frpl)) { | ||
433 | pr_err("Failed to allocate fr_pg_list err=%ld\n", | ||
434 | PTR_ERR(fr_desc->data_frpl)); | ||
435 | ret = PTR_ERR(fr_desc->data_frpl); | ||
436 | goto err; | ||
437 | } | ||
438 | |||
439 | fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd, | ||
440 | ISCSI_ISER_SG_TABLESIZE); | ||
441 | if (IS_ERR(fr_desc->data_mr)) { | ||
442 | pr_err("Failed to allocate frmr err=%ld\n", | ||
443 | PTR_ERR(fr_desc->data_mr)); | ||
444 | ret = PTR_ERR(fr_desc->data_mr); | ||
445 | ib_free_fast_reg_page_list(fr_desc->data_frpl); | ||
446 | goto err; | ||
447 | } | ||
448 | pr_debug("Create fr_desc %p page_list %p\n", | ||
449 | fr_desc, fr_desc->data_frpl->page_list); | ||
450 | |||
451 | fr_desc->valid = true; | ||
452 | list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); | ||
453 | isert_conn->conn_frwr_pool_size++; | ||
454 | } | ||
455 | |||
456 | pr_debug("Creating conn %p frwr pool size=%d", | ||
457 | isert_conn, isert_conn->conn_frwr_pool_size); | ||
458 | |||
459 | return 0; | ||
460 | |||
461 | err: | ||
462 | isert_conn_free_frwr_pool(isert_conn); | ||
463 | return ret; | ||
464 | } | ||
465 | |||
375 | static int | 466 | static int |
376 | isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 467 | isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
377 | { | 468 | { |
@@ -398,6 +489,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
398 | kref_init(&isert_conn->conn_kref); | 489 | kref_init(&isert_conn->conn_kref); |
399 | kref_get(&isert_conn->conn_kref); | 490 | kref_get(&isert_conn->conn_kref); |
400 | mutex_init(&isert_conn->conn_mutex); | 491 | mutex_init(&isert_conn->conn_mutex); |
492 | spin_lock_init(&isert_conn->conn_lock); | ||
401 | 493 | ||
402 | cma_id->context = isert_conn; | 494 | cma_id->context = isert_conn; |
403 | isert_conn->conn_cm_id = cma_id; | 495 | isert_conn->conn_cm_id = cma_id; |
@@ -455,6 +547,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
455 | isert_conn->conn_pd = device->dev_pd; | 547 | isert_conn->conn_pd = device->dev_pd; |
456 | isert_conn->conn_mr = device->dev_mr; | 548 | isert_conn->conn_mr = device->dev_mr; |
457 | 549 | ||
550 | if (device->use_frwr) { | ||
551 | ret = isert_conn_create_frwr_pool(isert_conn); | ||
552 | if (ret) { | ||
553 | pr_err("Conn: %p failed to create frwr_pool\n", isert_conn); | ||
554 | goto out_frwr; | ||
555 | } | ||
556 | } | ||
557 | |||
458 | ret = isert_conn_setup_qp(isert_conn, cma_id); | 558 | ret = isert_conn_setup_qp(isert_conn, cma_id); |
459 | if (ret) | 559 | if (ret) |
460 | goto out_conn_dev; | 560 | goto out_conn_dev; |
@@ -468,6 +568,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
468 | return 0; | 568 | return 0; |
469 | 569 | ||
470 | out_conn_dev: | 570 | out_conn_dev: |
571 | if (device->use_frwr) | ||
572 | isert_conn_free_frwr_pool(isert_conn); | ||
573 | out_frwr: | ||
471 | isert_device_try_release(device); | 574 | isert_device_try_release(device); |
472 | out_rsp_dma_map: | 575 | out_rsp_dma_map: |
473 | ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, | 576 | ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, |
@@ -491,6 +594,9 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
491 | 594 | ||
492 | pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 595 | pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); |
493 | 596 | ||
597 | if (device->use_frwr) | ||
598 | isert_conn_free_frwr_pool(isert_conn); | ||
599 | |||
494 | if (isert_conn->conn_qp) { | 600 | if (isert_conn->conn_qp) { |
495 | cq_index = ((struct isert_cq_desc *) | 601 | cq_index = ((struct isert_cq_desc *) |
496 | isert_conn->conn_qp->recv_cq->cq_context)->cq_index; | 602 | isert_conn->conn_qp->recv_cq->cq_context)->cq_index; |
@@ -1266,6 +1372,36 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1266 | } | 1372 | } |
1267 | 1373 | ||
1268 | static void | 1374 | static void |
1375 | isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | ||
1376 | { | ||
1377 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | ||
1378 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
1379 | LIST_HEAD(unmap_list); | ||
1380 | |||
1381 | pr_debug("unreg_frwr_cmd: %p\n", isert_cmd); | ||
1382 | |||
1383 | if (wr->fr_desc) { | ||
1384 | pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n", | ||
1385 | isert_cmd, wr->fr_desc); | ||
1386 | spin_lock_bh(&isert_conn->conn_lock); | ||
1387 | list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool); | ||
1388 | spin_unlock_bh(&isert_conn->conn_lock); | ||
1389 | wr->fr_desc = NULL; | ||
1390 | } | ||
1391 | |||
1392 | if (wr->sge) { | ||
1393 | pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd); | ||
1394 | ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, | ||
1395 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | ||
1396 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
1397 | wr->sge = NULL; | ||
1398 | } | ||
1399 | |||
1400 | wr->ib_sge = NULL; | ||
1401 | wr->send_wr = NULL; | ||
1402 | } | ||
1403 | |||
1404 | static void | ||
1269 | isert_put_cmd(struct isert_cmd *isert_cmd) | 1405 | isert_put_cmd(struct isert_cmd *isert_cmd) |
1270 | { | 1406 | { |
1271 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1407 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
@@ -1941,11 +2077,216 @@ unmap_sg: | |||
1941 | } | 2077 | } |
1942 | 2078 | ||
1943 | static int | 2079 | static int |
2080 | isert_map_fr_pagelist(struct ib_device *ib_dev, | ||
2081 | struct scatterlist *sg_start, int sg_nents, u64 *fr_pl) | ||
2082 | { | ||
2083 | u64 start_addr, end_addr, page, chunk_start = 0; | ||
2084 | struct scatterlist *tmp_sg; | ||
2085 | int i = 0, new_chunk, last_ent, n_pages; | ||
2086 | |||
2087 | n_pages = 0; | ||
2088 | new_chunk = 1; | ||
2089 | last_ent = sg_nents - 1; | ||
2090 | for_each_sg(sg_start, tmp_sg, sg_nents, i) { | ||
2091 | start_addr = ib_sg_dma_address(ib_dev, tmp_sg); | ||
2092 | if (new_chunk) | ||
2093 | chunk_start = start_addr; | ||
2094 | end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); | ||
2095 | |||
2096 | pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n", | ||
2097 | i, (unsigned long long)tmp_sg->dma_address, | ||
2098 | tmp_sg->length); | ||
2099 | |||
2100 | if ((end_addr & ~PAGE_MASK) && i < last_ent) { | ||
2101 | new_chunk = 0; | ||
2102 | continue; | ||
2103 | } | ||
2104 | new_chunk = 1; | ||
2105 | |||
2106 | page = chunk_start & PAGE_MASK; | ||
2107 | do { | ||
2108 | fr_pl[n_pages++] = page; | ||
2109 | pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n", | ||
2110 | n_pages - 1, page); | ||
2111 | page += PAGE_SIZE; | ||
2112 | } while (page < end_addr); | ||
2113 | } | ||
2114 | |||
2115 | return n_pages; | ||
2116 | } | ||
2117 | |||
2118 | static int | ||
2119 | isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | ||
2120 | struct isert_cmd *isert_cmd, struct isert_conn *isert_conn, | ||
2121 | struct ib_sge *ib_sge, u32 offset, unsigned int data_len) | ||
2122 | { | ||
2123 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | ||
2124 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
2125 | struct scatterlist *sg_start; | ||
2126 | u32 sg_off, page_off; | ||
2127 | struct ib_send_wr fr_wr, inv_wr; | ||
2128 | struct ib_send_wr *bad_wr, *wr = NULL; | ||
2129 | u8 key; | ||
2130 | int ret, sg_nents, pagelist_len; | ||
2131 | |||
2132 | sg_off = offset / PAGE_SIZE; | ||
2133 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | ||
2134 | sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off, | ||
2135 | ISCSI_ISER_SG_TABLESIZE); | ||
2136 | page_off = offset % PAGE_SIZE; | ||
2137 | |||
2138 | pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n", | ||
2139 | isert_cmd, fr_desc, sg_nents, sg_off, offset); | ||
2140 | |||
2141 | pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, | ||
2142 | &fr_desc->data_frpl->page_list[0]); | ||
2143 | |||
2144 | if (!fr_desc->valid) { | ||
2145 | memset(&inv_wr, 0, sizeof(inv_wr)); | ||
2146 | inv_wr.opcode = IB_WR_LOCAL_INV; | ||
2147 | inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; | ||
2148 | wr = &inv_wr; | ||
2149 | /* Bump the key */ | ||
2150 | key = (u8)(fr_desc->data_mr->rkey & 0x000000FF); | ||
2151 | ib_update_fast_reg_key(fr_desc->data_mr, ++key); | ||
2152 | } | ||
2153 | |||
2154 | /* Prepare FASTREG WR */ | ||
2155 | memset(&fr_wr, 0, sizeof(fr_wr)); | ||
2156 | fr_wr.opcode = IB_WR_FAST_REG_MR; | ||
2157 | fr_wr.wr.fast_reg.iova_start = | ||
2158 | fr_desc->data_frpl->page_list[0] + page_off; | ||
2159 | fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl; | ||
2160 | fr_wr.wr.fast_reg.page_list_len = pagelist_len; | ||
2161 | fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | ||
2162 | fr_wr.wr.fast_reg.length = data_len; | ||
2163 | fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; | ||
2164 | fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; | ||
2165 | |||
2166 | if (!wr) | ||
2167 | wr = &fr_wr; | ||
2168 | else | ||
2169 | wr->next = &fr_wr; | ||
2170 | |||
2171 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); | ||
2172 | if (ret) { | ||
2173 | pr_err("fast registration failed, ret:%d\n", ret); | ||
2174 | return ret; | ||
2175 | } | ||
2176 | fr_desc->valid = false; | ||
2177 | |||
2178 | ib_sge->lkey = fr_desc->data_mr->lkey; | ||
2179 | ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off; | ||
2180 | ib_sge->length = data_len; | ||
2181 | |||
2182 | pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", | ||
2183 | ib_sge->addr, ib_sge->length, ib_sge->lkey); | ||
2184 | |||
2185 | return ret; | ||
2186 | } | ||
2187 | |||
2188 | static int | ||
2189 | isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
2190 | struct isert_rdma_wr *wr) | ||
2191 | { | ||
2192 | struct se_cmd *se_cmd = &cmd->se_cmd; | ||
2193 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | ||
2194 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | ||
2195 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
2196 | struct ib_send_wr *send_wr; | ||
2197 | struct ib_sge *ib_sge; | ||
2198 | struct scatterlist *sg_start; | ||
2199 | struct fast_reg_descriptor *fr_desc; | ||
2200 | u32 sg_off = 0, sg_nents; | ||
2201 | u32 offset = 0, data_len, data_left, rdma_write_max; | ||
2202 | int ret = 0, count; | ||
2203 | unsigned long flags; | ||
2204 | |||
2205 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | ||
2206 | data_left = se_cmd->data_length; | ||
2207 | iscsit_increment_maxcmdsn(cmd, conn->sess); | ||
2208 | cmd->stat_sn = conn->stat_sn++; | ||
2209 | } else { | ||
2210 | sg_off = cmd->write_data_done / PAGE_SIZE; | ||
2211 | data_left = se_cmd->data_length - cmd->write_data_done; | ||
2212 | offset = cmd->write_data_done; | ||
2213 | isert_cmd->tx_desc.isert_cmd = isert_cmd; | ||
2214 | } | ||
2215 | |||
2216 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | ||
2217 | sg_nents = se_cmd->t_data_nents - sg_off; | ||
2218 | |||
2219 | count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, | ||
2220 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | ||
2221 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
2222 | if (unlikely(!count)) { | ||
2223 | pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); | ||
2224 | return -EINVAL; | ||
2225 | } | ||
2226 | wr->sge = sg_start; | ||
2227 | wr->num_sge = sg_nents; | ||
2228 | pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", | ||
2229 | isert_cmd, count, sg_start, sg_nents, data_left); | ||
2230 | |||
2231 | memset(&wr->s_ib_sge, 0, sizeof(*ib_sge)); | ||
2232 | ib_sge = &wr->s_ib_sge; | ||
2233 | wr->ib_sge = ib_sge; | ||
2234 | |||
2235 | wr->send_wr_num = 1; | ||
2236 | memset(&wr->s_send_wr, 0, sizeof(*send_wr)); | ||
2237 | wr->send_wr = &wr->s_send_wr; | ||
2238 | |||
2239 | wr->isert_cmd = isert_cmd; | ||
2240 | rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE; | ||
2241 | |||
2242 | send_wr = &isert_cmd->rdma_wr.s_send_wr; | ||
2243 | send_wr->sg_list = ib_sge; | ||
2244 | send_wr->num_sge = 1; | ||
2245 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; | ||
2246 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | ||
2247 | send_wr->opcode = IB_WR_RDMA_WRITE; | ||
2248 | send_wr->wr.rdma.remote_addr = isert_cmd->read_va; | ||
2249 | send_wr->wr.rdma.rkey = isert_cmd->read_stag; | ||
2250 | send_wr->send_flags = 0; | ||
2251 | send_wr->next = &isert_cmd->tx_desc.send_wr; | ||
2252 | } else { | ||
2253 | send_wr->opcode = IB_WR_RDMA_READ; | ||
2254 | send_wr->wr.rdma.remote_addr = isert_cmd->write_va; | ||
2255 | send_wr->wr.rdma.rkey = isert_cmd->write_stag; | ||
2256 | send_wr->send_flags = IB_SEND_SIGNALED; | ||
2257 | } | ||
2258 | |||
2259 | data_len = min(data_left, rdma_write_max); | ||
2260 | wr->cur_rdma_length = data_len; | ||
2261 | |||
2262 | spin_lock_irqsave(&isert_conn->conn_lock, flags); | ||
2263 | fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, | ||
2264 | struct fast_reg_descriptor, list); | ||
2265 | list_del(&fr_desc->list); | ||
2266 | spin_unlock_irqrestore(&isert_conn->conn_lock, flags); | ||
2267 | wr->fr_desc = fr_desc; | ||
2268 | |||
2269 | ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, | ||
2270 | ib_sge, offset, data_len); | ||
2271 | if (ret) { | ||
2272 | list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); | ||
2273 | goto unmap_sg; | ||
2274 | } | ||
2275 | |||
2276 | return 0; | ||
2277 | |||
2278 | unmap_sg: | ||
2279 | ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, | ||
2280 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | ||
2281 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
2282 | return ret; | ||
2283 | } | ||
2284 | |||
2285 | static int | ||
1944 | isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | 2286 | isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) |
1945 | { | 2287 | { |
1946 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2288 | struct se_cmd *se_cmd = &cmd->se_cmd; |
1947 | struct isert_cmd *isert_cmd = container_of(cmd, | 2289 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
1948 | struct isert_cmd, iscsi_cmd); | ||
1949 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 2290 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
1950 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2291 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; |
1951 | struct isert_device *device = isert_conn->conn_device; | 2292 | struct isert_device *device = isert_conn->conn_device; |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 76565030e4e7..631f2090f0b8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <rdma/rdma_cm.h> | 5 | #include <rdma/rdma_cm.h> |
6 | 6 | ||
7 | #define ISERT_RDMA_LISTEN_BACKLOG 10 | 7 | #define ISERT_RDMA_LISTEN_BACKLOG 10 |
8 | #define ISCSI_ISER_SG_TABLESIZE 256 | ||
8 | 9 | ||
9 | enum isert_desc_type { | 10 | enum isert_desc_type { |
10 | ISCSI_TX_CONTROL, | 11 | ISCSI_TX_CONTROL, |
@@ -45,16 +46,26 @@ struct iser_tx_desc { | |||
45 | struct ib_send_wr send_wr; | 46 | struct ib_send_wr send_wr; |
46 | } __packed; | 47 | } __packed; |
47 | 48 | ||
49 | struct fast_reg_descriptor { | ||
50 | struct list_head list; | ||
51 | struct ib_mr *data_mr; | ||
52 | struct ib_fast_reg_page_list *data_frpl; | ||
53 | bool valid; | ||
54 | }; | ||
55 | |||
48 | struct isert_rdma_wr { | 56 | struct isert_rdma_wr { |
49 | struct list_head wr_list; | 57 | struct list_head wr_list; |
50 | struct isert_cmd *isert_cmd; | 58 | struct isert_cmd *isert_cmd; |
51 | enum iser_ib_op_code iser_ib_op; | 59 | enum iser_ib_op_code iser_ib_op; |
52 | struct ib_sge *ib_sge; | 60 | struct ib_sge *ib_sge; |
61 | struct ib_sge s_ib_sge; | ||
53 | int num_sge; | 62 | int num_sge; |
54 | struct scatterlist *sge; | 63 | struct scatterlist *sge; |
55 | int send_wr_num; | 64 | int send_wr_num; |
56 | struct ib_send_wr *send_wr; | 65 | struct ib_send_wr *send_wr; |
66 | struct ib_send_wr s_send_wr; | ||
57 | u32 cur_rdma_length; | 67 | u32 cur_rdma_length; |
68 | struct fast_reg_descriptor *fr_desc; | ||
58 | }; | 69 | }; |
59 | 70 | ||
60 | struct isert_cmd { | 71 | struct isert_cmd { |
@@ -106,6 +117,10 @@ struct isert_conn { | |||
106 | wait_queue_head_t conn_wait; | 117 | wait_queue_head_t conn_wait; |
107 | wait_queue_head_t conn_wait_comp_err; | 118 | wait_queue_head_t conn_wait_comp_err; |
108 | struct kref conn_kref; | 119 | struct kref conn_kref; |
120 | struct list_head conn_frwr_pool; | ||
121 | int conn_frwr_pool_size; | ||
122 | /* lock to protect frwr_pool */ | ||
123 | spinlock_t conn_lock; | ||
109 | }; | 124 | }; |
110 | 125 | ||
111 | #define ISERT_MAX_CQ 64 | 126 | #define ISERT_MAX_CQ 64 |
@@ -118,6 +133,7 @@ struct isert_cq_desc { | |||
118 | }; | 133 | }; |
119 | 134 | ||
120 | struct isert_device { | 135 | struct isert_device { |
136 | int use_frwr; | ||
121 | int cqs_used; | 137 | int cqs_used; |
122 | int refcount; | 138 | int refcount; |
123 | int cq_active_qps[ISERT_MAX_CQ]; | 139 | int cq_active_qps[ISERT_MAX_CQ]; |
@@ -128,6 +144,7 @@ struct isert_device { | |||
128 | struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; | 144 | struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; |
129 | struct isert_cq_desc *cq_desc; | 145 | struct isert_cq_desc *cq_desc; |
130 | struct list_head dev_node; | 146 | struct list_head dev_node; |
147 | struct ib_device_attr dev_attr; | ||
131 | int (*reg_rdma_mem)(struct iscsi_conn *conn, | 148 | int (*reg_rdma_mem)(struct iscsi_conn *conn, |
132 | struct iscsi_cmd *cmd, | 149 | struct iscsi_cmd *cmd, |
133 | struct isert_rdma_wr *wr); | 150 | struct isert_rdma_wr *wr); |