diff options
author | Sagi Grimberg <sagig@mellanox.com> | 2014-02-19 10:50:22 -0500 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2014-04-07 04:48:45 -0400 |
commit | e3d7e4c30c494431d492864448fbb16cdd7a6178 (patch) | |
tree | ff13db0fde856e95d66a631fd37bc61b1f7799b6 | |
parent | 03abad9e8715563ab3eef9609ba4d16b32bfd22d (diff) |
IB/isert: Introduce isert_map/unmap_data_buf
export map/unmap data buffer to a routine that may
be used in various places in the code and keep the
mapping data in a designated descriptor. Also, let
isert_fast_reg_mr to decide weather to use global
MR or do fast registration.
This commit does not change any functionality.
(Fix context change for v3.14-rc6 code - nab)
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 238 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 14 |
2 files changed, 127 insertions, 125 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 8ee228e9ab5a..d0ca3328aa4d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -1392,19 +1392,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | |||
1392 | } | 1392 | } |
1393 | } | 1393 | } |
1394 | 1394 | ||
1395 | static int | ||
1396 | isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | ||
1397 | struct scatterlist *sg, u32 nents, u32 length, u32 offset, | ||
1398 | enum iser_ib_op_code op, struct isert_data_buf *data) | ||
1399 | { | ||
1400 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
1401 | |||
1402 | data->dma_dir = op == ISER_IB_RDMA_WRITE ? | ||
1403 | DMA_TO_DEVICE : DMA_FROM_DEVICE; | ||
1404 | |||
1405 | data->len = length - offset; | ||
1406 | data->offset = offset; | ||
1407 | data->sg_off = data->offset / PAGE_SIZE; | ||
1408 | |||
1409 | data->sg = &sg[data->sg_off]; | ||
1410 | data->nents = min_t(unsigned int, nents - data->sg_off, | ||
1411 | ISCSI_ISER_SG_TABLESIZE); | ||
1412 | data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE * | ||
1413 | PAGE_SIZE); | ||
1414 | |||
1415 | data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, | ||
1416 | data->dma_dir); | ||
1417 | if (unlikely(!data->dma_nents)) { | ||
1418 | pr_err("Cmd: unable to dma map SGs %p\n", sg); | ||
1419 | return -EINVAL; | ||
1420 | } | ||
1421 | |||
1422 | pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", | ||
1423 | isert_cmd, data->dma_nents, data->sg, data->nents, data->len); | ||
1424 | |||
1425 | return 0; | ||
1426 | } | ||
1427 | |||
1428 | static void | ||
1429 | isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) | ||
1430 | { | ||
1431 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
1432 | |||
1433 | ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir); | ||
1434 | memset(data, 0, sizeof(*data)); | ||
1435 | } | ||
1436 | |||
1437 | |||
1438 | |||
1395 | static void | 1439 | static void |
1396 | isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | 1440 | isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) |
1397 | { | 1441 | { |
1398 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 1442 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
1399 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
1400 | 1443 | ||
1401 | pr_debug("isert_unmap_cmd: %p\n", isert_cmd); | 1444 | pr_debug("isert_unmap_cmd: %p\n", isert_cmd); |
1402 | if (wr->sge) { | 1445 | |
1446 | if (wr->data.sg) { | ||
1403 | pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); | 1447 | pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); |
1404 | ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, | 1448 | isert_unmap_data_buf(isert_conn, &wr->data); |
1405 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | ||
1406 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
1407 | wr->sge = NULL; | ||
1408 | } | 1449 | } |
1409 | 1450 | ||
1410 | if (wr->send_wr) { | 1451 | if (wr->send_wr) { |
@@ -1424,7 +1465,6 @@ static void | |||
1424 | isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | 1465 | isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) |
1425 | { | 1466 | { |
1426 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 1467 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
1427 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
1428 | LIST_HEAD(unmap_list); | 1468 | LIST_HEAD(unmap_list); |
1429 | 1469 | ||
1430 | pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); | 1470 | pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); |
@@ -1438,12 +1478,9 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1438 | wr->fr_desc = NULL; | 1478 | wr->fr_desc = NULL; |
1439 | } | 1479 | } |
1440 | 1480 | ||
1441 | if (wr->sge) { | 1481 | if (wr->data.sg) { |
1442 | pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); | 1482 | pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); |
1443 | ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, | 1483 | isert_unmap_data_buf(isert_conn, &wr->data); |
1444 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | ||
1445 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
1446 | wr->sge = NULL; | ||
1447 | } | 1484 | } |
1448 | 1485 | ||
1449 | wr->ib_sge = NULL; | 1486 | wr->ib_sge = NULL; |
@@ -1548,7 +1585,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
1548 | 1585 | ||
1549 | iscsit_stop_dataout_timer(cmd); | 1586 | iscsit_stop_dataout_timer(cmd); |
1550 | device->unreg_rdma_mem(isert_cmd, isert_conn); | 1587 | device->unreg_rdma_mem(isert_cmd, isert_conn); |
1551 | cmd->write_data_done = wr->cur_rdma_length; | 1588 | cmd->write_data_done = wr->data.len; |
1552 | wr->send_wr_num = 0; | 1589 | wr->send_wr_num = 0; |
1553 | 1590 | ||
1554 | pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); | 1591 | pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); |
@@ -2099,54 +2136,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2099 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2136 | struct se_cmd *se_cmd = &cmd->se_cmd; |
2100 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2137 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2101 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2138 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; |
2102 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2139 | struct isert_data_buf *data = &wr->data; |
2103 | struct ib_send_wr *send_wr; | 2140 | struct ib_send_wr *send_wr; |
2104 | struct ib_sge *ib_sge; | 2141 | struct ib_sge *ib_sge; |
2105 | struct scatterlist *sg_start; | 2142 | u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; |
2106 | u32 sg_off = 0, sg_nents; | 2143 | int ret = 0, i, ib_sge_cnt; |
2107 | u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0; | ||
2108 | int ret = 0, count, i, ib_sge_cnt; | ||
2109 | 2144 | ||
2110 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | 2145 | isert_cmd->tx_desc.isert_cmd = isert_cmd; |
2111 | data_left = se_cmd->data_length; | ||
2112 | } else { | ||
2113 | sg_off = cmd->write_data_done / PAGE_SIZE; | ||
2114 | data_left = se_cmd->data_length - cmd->write_data_done; | ||
2115 | offset = cmd->write_data_done; | ||
2116 | isert_cmd->tx_desc.isert_cmd = isert_cmd; | ||
2117 | } | ||
2118 | 2146 | ||
2119 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | 2147 | offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0; |
2120 | sg_nents = se_cmd->t_data_nents - sg_off; | 2148 | ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, |
2149 | se_cmd->t_data_nents, se_cmd->data_length, | ||
2150 | offset, wr->iser_ib_op, &wr->data); | ||
2151 | if (ret) | ||
2152 | return ret; | ||
2121 | 2153 | ||
2122 | count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, | 2154 | data_left = data->len; |
2123 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | 2155 | offset = data->offset; |
2124 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
2125 | if (unlikely(!count)) { | ||
2126 | pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); | ||
2127 | return -EINVAL; | ||
2128 | } | ||
2129 | wr->sge = sg_start; | ||
2130 | wr->num_sge = sg_nents; | ||
2131 | wr->cur_rdma_length = data_left; | ||
2132 | pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", | ||
2133 | isert_cmd, count, sg_start, sg_nents, data_left); | ||
2134 | 2156 | ||
2135 | ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); | 2157 | ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); |
2136 | if (!ib_sge) { | 2158 | if (!ib_sge) { |
2137 | pr_warn("Unable to allocate ib_sge\n"); | 2159 | pr_warn("Unable to allocate ib_sge\n"); |
2138 | ret = -ENOMEM; | 2160 | ret = -ENOMEM; |
2139 | goto unmap_sg; | 2161 | goto unmap_cmd; |
2140 | } | 2162 | } |
2141 | wr->ib_sge = ib_sge; | 2163 | wr->ib_sge = ib_sge; |
2142 | 2164 | ||
2143 | wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); | 2165 | wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); |
2144 | wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, | 2166 | wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, |
2145 | GFP_KERNEL); | 2167 | GFP_KERNEL); |
2146 | if (!wr->send_wr) { | 2168 | if (!wr->send_wr) { |
2147 | pr_debug("Unable to allocate wr->send_wr\n"); | 2169 | pr_debug("Unable to allocate wr->send_wr\n"); |
2148 | ret = -ENOMEM; | 2170 | ret = -ENOMEM; |
2149 | goto unmap_sg; | 2171 | goto unmap_cmd; |
2150 | } | 2172 | } |
2151 | 2173 | ||
2152 | wr->isert_cmd = isert_cmd; | 2174 | wr->isert_cmd = isert_cmd; |
@@ -2185,10 +2207,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2185 | } | 2207 | } |
2186 | 2208 | ||
2187 | return 0; | 2209 | return 0; |
2188 | unmap_sg: | 2210 | unmap_cmd: |
2189 | ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, | 2211 | isert_unmap_data_buf(isert_conn, data); |
2190 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | 2212 | |
2191 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
2192 | return ret; | 2213 | return ret; |
2193 | } | 2214 | } |
2194 | 2215 | ||
@@ -2232,10 +2253,10 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, | |||
2232 | } | 2253 | } |
2233 | 2254 | ||
2234 | static int | 2255 | static int |
2235 | isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | 2256 | isert_fast_reg_mr(struct isert_conn *isert_conn, |
2236 | struct isert_conn *isert_conn, struct scatterlist *sg_start, | 2257 | struct fast_reg_descriptor *fr_desc, |
2237 | struct ib_sge *ib_sge, u32 sg_nents, u32 offset, | 2258 | struct isert_data_buf *mem, |
2238 | unsigned int data_len) | 2259 | struct ib_sge *sge) |
2239 | { | 2260 | { |
2240 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2261 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
2241 | struct ib_send_wr fr_wr, inv_wr; | 2262 | struct ib_send_wr fr_wr, inv_wr; |
@@ -2244,13 +2265,19 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | |||
2244 | u32 page_off; | 2265 | u32 page_off; |
2245 | u8 key; | 2266 | u8 key; |
2246 | 2267 | ||
2247 | sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); | 2268 | if (mem->dma_nents == 1) { |
2248 | page_off = offset % PAGE_SIZE; | 2269 | sge->lkey = isert_conn->conn_mr->lkey; |
2270 | sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); | ||
2271 | sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); | ||
2272 | return 0; | ||
2273 | } | ||
2274 | |||
2275 | page_off = mem->offset % PAGE_SIZE; | ||
2249 | 2276 | ||
2250 | pr_debug("Use fr_desc %p sg_nents %d offset %u\n", | 2277 | pr_debug("Use fr_desc %p sg_nents %d offset %u\n", |
2251 | fr_desc, sg_nents, offset); | 2278 | fr_desc, mem->nents, mem->offset); |
2252 | 2279 | ||
2253 | pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, | 2280 | pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, |
2254 | &fr_desc->data_frpl->page_list[0]); | 2281 | &fr_desc->data_frpl->page_list[0]); |
2255 | 2282 | ||
2256 | if (!fr_desc->valid) { | 2283 | if (!fr_desc->valid) { |
@@ -2273,7 +2300,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | |||
2273 | fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl; | 2300 | fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl; |
2274 | fr_wr.wr.fast_reg.page_list_len = pagelist_len; | 2301 | fr_wr.wr.fast_reg.page_list_len = pagelist_len; |
2275 | fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | 2302 | fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
2276 | fr_wr.wr.fast_reg.length = data_len; | 2303 | fr_wr.wr.fast_reg.length = mem->len; |
2277 | fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; | 2304 | fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; |
2278 | fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; | 2305 | fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; |
2279 | 2306 | ||
@@ -2289,12 +2316,12 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | |||
2289 | } | 2316 | } |
2290 | fr_desc->valid = false; | 2317 | fr_desc->valid = false; |
2291 | 2318 | ||
2292 | ib_sge->lkey = fr_desc->data_mr->lkey; | 2319 | sge->lkey = fr_desc->data_mr->lkey; |
2293 | ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off; | 2320 | sge->addr = fr_desc->data_frpl->page_list[0] + page_off; |
2294 | ib_sge->length = data_len; | 2321 | sge->length = mem->len; |
2295 | 2322 | ||
2296 | pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", | 2323 | pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", |
2297 | ib_sge->addr, ib_sge->length, ib_sge->lkey); | 2324 | sge->addr, sge->length, sge->lkey); |
2298 | 2325 | ||
2299 | return ret; | 2326 | return ret; |
2300 | } | 2327 | } |
@@ -2305,54 +2332,43 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2305 | { | 2332 | { |
2306 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2333 | struct se_cmd *se_cmd = &cmd->se_cmd; |
2307 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2334 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2308 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2335 | struct isert_conn *isert_conn = conn->context; |
2309 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
2310 | struct ib_send_wr *send_wr; | 2336 | struct ib_send_wr *send_wr; |
2311 | struct ib_sge *ib_sge; | 2337 | struct fast_reg_descriptor *fr_desc = NULL; |
2312 | struct scatterlist *sg_start; | 2338 | u32 offset; |
2313 | struct fast_reg_descriptor *fr_desc; | 2339 | int ret = 0; |
2314 | u32 sg_off = 0, sg_nents; | ||
2315 | u32 offset = 0, data_len, data_left, rdma_write_max; | ||
2316 | int ret = 0, count; | ||
2317 | unsigned long flags; | 2340 | unsigned long flags; |
2318 | 2341 | ||
2319 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | 2342 | isert_cmd->tx_desc.isert_cmd = isert_cmd; |
2320 | data_left = se_cmd->data_length; | ||
2321 | } else { | ||
2322 | offset = cmd->write_data_done; | ||
2323 | sg_off = offset / PAGE_SIZE; | ||
2324 | data_left = se_cmd->data_length - cmd->write_data_done; | ||
2325 | isert_cmd->tx_desc.isert_cmd = isert_cmd; | ||
2326 | } | ||
2327 | 2343 | ||
2328 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | 2344 | offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0; |
2329 | sg_nents = se_cmd->t_data_nents - sg_off; | 2345 | ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, |
2346 | se_cmd->t_data_nents, se_cmd->data_length, | ||
2347 | offset, wr->iser_ib_op, &wr->data); | ||
2348 | if (ret) | ||
2349 | return ret; | ||
2330 | 2350 | ||
2331 | count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, | 2351 | if (wr->data.dma_nents != 1) { |
2332 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | 2352 | spin_lock_irqsave(&isert_conn->conn_lock, flags); |
2333 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 2353 | fr_desc = list_first_entry(&isert_conn->conn_fr_pool, |
2334 | if (unlikely(!count)) { | 2354 | struct fast_reg_descriptor, list); |
2335 | pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); | 2355 | list_del(&fr_desc->list); |
2336 | return -EINVAL; | 2356 | spin_unlock_irqrestore(&isert_conn->conn_lock, flags); |
2357 | wr->fr_desc = fr_desc; | ||
2337 | } | 2358 | } |
2338 | wr->sge = sg_start; | ||
2339 | wr->num_sge = sg_nents; | ||
2340 | pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", | ||
2341 | isert_cmd, count, sg_start, sg_nents, data_left); | ||
2342 | 2359 | ||
2343 | memset(&wr->s_ib_sge, 0, sizeof(*ib_sge)); | 2360 | ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, &wr->s_ib_sge); |
2344 | ib_sge = &wr->s_ib_sge; | 2361 | if (ret) |
2345 | wr->ib_sge = ib_sge; | 2362 | goto unmap_cmd; |
2346 | 2363 | ||
2364 | wr->ib_sge = &wr->s_ib_sge; | ||
2347 | wr->send_wr_num = 1; | 2365 | wr->send_wr_num = 1; |
2348 | memset(&wr->s_send_wr, 0, sizeof(*send_wr)); | 2366 | memset(&wr->s_send_wr, 0, sizeof(*send_wr)); |
2349 | wr->send_wr = &wr->s_send_wr; | 2367 | wr->send_wr = &wr->s_send_wr; |
2350 | |||
2351 | wr->isert_cmd = isert_cmd; | 2368 | wr->isert_cmd = isert_cmd; |
2352 | rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE; | ||
2353 | 2369 | ||
2354 | send_wr = &isert_cmd->rdma_wr.s_send_wr; | 2370 | send_wr = &isert_cmd->rdma_wr.s_send_wr; |
2355 | send_wr->sg_list = ib_sge; | 2371 | send_wr->sg_list = &wr->s_ib_sge; |
2356 | send_wr->num_sge = 1; | 2372 | send_wr->num_sge = 1; |
2357 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; | 2373 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; |
2358 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | 2374 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { |
@@ -2368,37 +2384,15 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2368 | send_wr->send_flags = IB_SEND_SIGNALED; | 2384 | send_wr->send_flags = IB_SEND_SIGNALED; |
2369 | } | 2385 | } |
2370 | 2386 | ||
2371 | data_len = min(data_left, rdma_write_max); | 2387 | return 0; |
2372 | wr->cur_rdma_length = data_len; | 2388 | unmap_cmd: |
2373 | 2389 | if (fr_desc) { | |
2374 | /* if there is a single dma entry, dma mr is sufficient */ | ||
2375 | if (count == 1) { | ||
2376 | ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]); | ||
2377 | ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]); | ||
2378 | ib_sge->lkey = isert_conn->conn_mr->lkey; | ||
2379 | wr->fr_desc = NULL; | ||
2380 | } else { | ||
2381 | spin_lock_irqsave(&isert_conn->conn_lock, flags); | 2390 | spin_lock_irqsave(&isert_conn->conn_lock, flags); |
2382 | fr_desc = list_first_entry(&isert_conn->conn_fr_pool, | 2391 | list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); |
2383 | struct fast_reg_descriptor, list); | ||
2384 | list_del(&fr_desc->list); | ||
2385 | spin_unlock_irqrestore(&isert_conn->conn_lock, flags); | 2392 | spin_unlock_irqrestore(&isert_conn->conn_lock, flags); |
2386 | wr->fr_desc = fr_desc; | ||
2387 | |||
2388 | ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start, | ||
2389 | ib_sge, sg_nents, offset, data_len); | ||
2390 | if (ret) { | ||
2391 | list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); | ||
2392 | goto unmap_sg; | ||
2393 | } | ||
2394 | } | 2393 | } |
2394 | isert_unmap_data_buf(isert_conn, &wr->data); | ||
2395 | 2395 | ||
2396 | return 0; | ||
2397 | |||
2398 | unmap_sg: | ||
2399 | ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, | ||
2400 | (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? | ||
2401 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
2402 | return ret; | 2396 | return ret; |
2403 | } | 2397 | } |
2404 | 2398 | ||
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index f6ae7f5dd408..8a02c4ebe373 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -57,18 +57,26 @@ struct fast_reg_descriptor { | |||
57 | bool valid; | 57 | bool valid; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | struct isert_data_buf { | ||
61 | struct scatterlist *sg; | ||
62 | int nents; | ||
63 | u32 sg_off; | ||
64 | u32 len; /* cur_rdma_length */ | ||
65 | u32 offset; | ||
66 | unsigned int dma_nents; | ||
67 | enum dma_data_direction dma_dir; | ||
68 | }; | ||
69 | |||
60 | struct isert_rdma_wr { | 70 | struct isert_rdma_wr { |
61 | struct list_head wr_list; | 71 | struct list_head wr_list; |
62 | struct isert_cmd *isert_cmd; | 72 | struct isert_cmd *isert_cmd; |
63 | enum iser_ib_op_code iser_ib_op; | 73 | enum iser_ib_op_code iser_ib_op; |
64 | struct ib_sge *ib_sge; | 74 | struct ib_sge *ib_sge; |
65 | struct ib_sge s_ib_sge; | 75 | struct ib_sge s_ib_sge; |
66 | int num_sge; | ||
67 | struct scatterlist *sge; | ||
68 | int send_wr_num; | 76 | int send_wr_num; |
69 | struct ib_send_wr *send_wr; | 77 | struct ib_send_wr *send_wr; |
70 | struct ib_send_wr s_send_wr; | 78 | struct ib_send_wr s_send_wr; |
71 | u32 cur_rdma_length; | 79 | struct isert_data_buf data; |
72 | struct fast_reg_descriptor *fr_desc; | 80 | struct fast_reg_descriptor *fr_desc; |
73 | }; | 81 | }; |
74 | 82 | ||