summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorIsrael Rukshin <israelr@mellanox.com>2019-06-11 11:52:47 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-06-24 10:49:27 -0400
commitb76a439982f8483beb2ffcfe93be50026940030a (patch)
tree8da11022e23b94fc9c9d60b420c85887790552d6 /drivers/infiniband/ulp
parent38ca87c6f1e514686d4a385246d1afe1e1f2e482 (diff)
IB/iser: Use IB_WR_REG_MR_INTEGRITY for PI handover
Using this new API reduces iSER code complexity. It also reduces the maximum number of work requests per task and the need of dealing with multiple MRs (and their registrations and invalidations) per task. It is done by using a single WR and a special MR type (IB_MR_TYPE_INTEGRITY) for PI operation. The setup of the tested benchmark: - 2 servers with 24 cores (1 initiator and 1 target) - 24 target sessions with 1 LUN each - ramdisk backstore - PI active Performance results running fio (24 jobs, 128 iodepth) using write_generate=0 and read_verify=0 (w/w.o patch): bs IOPS(read) IOPS(write) ---- ---------- ---------- 512 1236.6K/1164.3K 1357.2K/1332.8K 1k 1196.5K/1163.8K 1348.4K/1262.7K 2k 1016.7K/921950 1003.7K/931230 4k 662728/600545 595423/501513 8k 385954/384345 333775/277090 16k 222864/222820 170317/170671 32k 116869/114896 82331/82244 64k 55205/54931 40264/40021 Using write_generate=1 and read_verify=1 (w/w.o patch): bs IOPS(read) IOPS(write) ---- ---------- ---------- 512 1090.1K/1030.9K 1303.9K/1101.4K 1k 1057.7K/904583 1318.4K/988085 2k 965226/638799 1008.6K/692514 4k 555479/410151 542414/414517 8k 298675/224964 264729/237508 16k 133485/122481 164625/138647 32k 74329/67615 80143/78743 64k 35716/35519 39294/37334 We get performance improvement at all block sizes. The most significant improvement is when writing 4k bs (almost 30% more iops). Signed-off-by: Israel Rukshin <israelr@mellanox.com> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h38
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c12
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c98
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c140
4 files changed, 95 insertions, 193 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 36d525110fd2..6bf9eaa8ec96 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -225,13 +225,11 @@ enum iser_desc_type {
225 ISCSI_TX_DATAOUT 225 ISCSI_TX_DATAOUT
226}; 226};
227 227
228/* Maximum number of work requests per task: 228/*
229 * Data memory region local invalidate + fast registration 229 * Maximum number of work requests per task
230 * Protection memory region local invalidate + fast registration 230 * (invalidate, registration, send)
231 * Signature memory region local invalidate + fast registration
232 * PDU send
233 */ 231 */
234#define ISER_MAX_WRS 7 232#define ISER_MAX_WRS 3
235 233
236/** 234/**
237 * struct iser_tx_desc - iSER TX descriptor 235 * struct iser_tx_desc - iSER TX descriptor
@@ -247,9 +245,6 @@ enum iser_desc_type {
247 * @mapped: Is the task header mapped 245 * @mapped: Is the task header mapped
248 * @wr_idx: Current WR index 246 * @wr_idx: Current WR index
249 * @wrs: Array of WRs per task 247 * @wrs: Array of WRs per task
250 * @data_reg: Data buffer registration details
251 * @prot_reg: Protection buffer registration details
252 * @sig_attrs: Signature attributes
253 */ 248 */
254struct iser_tx_desc { 249struct iser_tx_desc {
255 struct iser_ctrl iser_header; 250 struct iser_ctrl iser_header;
@@ -264,11 +259,7 @@ struct iser_tx_desc {
264 union iser_wr { 259 union iser_wr {
265 struct ib_send_wr send; 260 struct ib_send_wr send;
266 struct ib_reg_wr fast_reg; 261 struct ib_reg_wr fast_reg;
267 struct ib_sig_handover_wr sig;
268 } wrs[ISER_MAX_WRS]; 262 } wrs[ISER_MAX_WRS];
269 struct iser_mem_reg data_reg;
270 struct iser_mem_reg prot_reg;
271 struct ib_sig_attrs sig_attrs;
272}; 263};
273 264
274#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ 265#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
@@ -388,6 +379,7 @@ struct iser_device {
388 * 379 *
389 * @mr: memory region 380 * @mr: memory region
390 * @fmr_pool: pool of fmrs 381 * @fmr_pool: pool of fmrs
382 * @sig_mr: signature memory region
391 * @page_vec: fast reg page list used by fmr pool 383 * @page_vec: fast reg page list used by fmr pool
392 * @mr_valid: is mr valid indicator 384 * @mr_valid: is mr valid indicator
393 */ 385 */
@@ -396,36 +388,22 @@ struct iser_reg_resources {
396 struct ib_mr *mr; 388 struct ib_mr *mr;
397 struct ib_fmr_pool *fmr_pool; 389 struct ib_fmr_pool *fmr_pool;
398 }; 390 };
391 struct ib_mr *sig_mr;
399 struct iser_page_vec *page_vec; 392 struct iser_page_vec *page_vec;
400 u8 mr_valid:1; 393 u8 mr_valid:1;
401}; 394};
402 395
403/** 396/**
404 * struct iser_pi_context - Protection information context
405 *
406 * @rsc: protection buffer registration resources
407 * @sig_mr: signature enable memory region
408 * @sig_mr_valid: is sig_mr valid indicator
409 * @sig_protected: is region protected indicator
410 */
411struct iser_pi_context {
412 struct iser_reg_resources rsc;
413 struct ib_mr *sig_mr;
414 u8 sig_mr_valid:1;
415 u8 sig_protected:1;
416};
417
418/**
419 * struct iser_fr_desc - Fast registration descriptor 397 * struct iser_fr_desc - Fast registration descriptor
420 * 398 *
421 * @list: entry in connection fastreg pool 399 * @list: entry in connection fastreg pool
422 * @rsc: data buffer registration resources 400 * @rsc: data buffer registration resources
423 * @pi_ctx: protection information context 401 * @sig_protected: is region protected indicator
424 */ 402 */
425struct iser_fr_desc { 403struct iser_fr_desc {
426 struct list_head list; 404 struct list_head list;
427 struct iser_reg_resources rsc; 405 struct iser_reg_resources rsc;
428 struct iser_pi_context *pi_ctx; 406 bool sig_protected;
429 struct list_head all_list; 407 struct list_head all_list;
430}; 408};
431 409
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 96af06cfe0af..5cbb4b3a0566 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -592,15 +592,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
592static inline int 592static inline int
593iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) 593iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
594{ 594{
595 if (likely(rkey == desc->rsc.mr->rkey)) { 595 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
596 desc->rsc.mr_valid = 0; 596 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
597 } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) {
598 desc->pi_ctx->sig_mr_valid = 0;
599 } else {
600 iser_err("Bogus remote invalidation for rkey %#x\n", rkey); 597 iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
601 return -EINVAL; 598 return -EINVAL;
602 } 599 }
603 600
601 desc->rsc.mr_valid = 0;
602
604 return 0; 603 return 0;
605} 604}
606 605
@@ -750,6 +749,9 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
750 iser_task->prot[ISER_DIR_IN].data_len = 0; 749 iser_task->prot[ISER_DIR_IN].data_len = 0;
751 iser_task->prot[ISER_DIR_OUT].data_len = 0; 750 iser_task->prot[ISER_DIR_OUT].data_len = 0;
752 751
752 iser_task->prot[ISER_DIR_IN].dma_nents = 0;
753 iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
754
753 memset(&iser_task->rdma_reg[ISER_DIR_IN], 0, 755 memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
754 sizeof(struct iser_mem_reg)); 756 sizeof(struct iser_mem_reg));
755 memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0, 757 memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index f431c9b4065c..d66e17c2a085 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -376,16 +376,16 @@ iser_inv_rkey(struct ib_send_wr *inv_wr,
376 376
377static int 377static int
378iser_reg_sig_mr(struct iscsi_iser_task *iser_task, 378iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
379 struct iser_pi_context *pi_ctx, 379 struct iser_data_buf *mem,
380 struct iser_mem_reg *data_reg, 380 struct iser_data_buf *sig_mem,
381 struct iser_mem_reg *prot_reg, 381 struct iser_reg_resources *rsc,
382 struct iser_mem_reg *sig_reg) 382 struct iser_mem_reg *sig_reg)
383{ 383{
384 struct iser_tx_desc *tx_desc = &iser_task->desc; 384 struct iser_tx_desc *tx_desc = &iser_task->desc;
385 struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
386 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; 385 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
387 struct ib_sig_handover_wr *wr; 386 struct ib_mr *mr = rsc->sig_mr;
388 struct ib_mr *mr = pi_ctx->sig_mr; 387 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
388 struct ib_reg_wr *wr;
389 int ret; 389 int ret;
390 390
391 memset(sig_attrs, 0, sizeof(*sig_attrs)); 391 memset(sig_attrs, 0, sizeof(*sig_attrs));
@@ -395,33 +395,36 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
395 395
396 iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); 396 iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
397 397
398 if (pi_ctx->sig_mr_valid) 398 if (rsc->mr_valid)
399 iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); 399 iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
400 400
401 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); 401 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
402 402
403 wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr, 403 ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
404 wr); 404 sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
405 wr->wr.opcode = IB_WR_REG_SIG_MR; 405 if (unlikely(ret)) {
406 iser_err("failed to map PI sg (%d)\n",
407 mem->dma_nents + sig_mem->dma_nents);
408 goto err;
409 }
410
411 wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr);
412 memset(wr, 0, sizeof(*wr));
413 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
406 wr->wr.wr_cqe = cqe; 414 wr->wr.wr_cqe = cqe;
407 wr->wr.sg_list = &data_reg->sge; 415 wr->wr.num_sge = 0;
408 wr->wr.num_sge = 1;
409 wr->wr.send_flags = 0; 416 wr->wr.send_flags = 0;
410 wr->sig_attrs = sig_attrs; 417 wr->mr = mr;
411 wr->sig_mr = mr; 418 wr->key = mr->rkey;
412 if (scsi_prot_sg_count(iser_task->sc)) 419 wr->access = IB_ACCESS_LOCAL_WRITE |
413 wr->prot = &prot_reg->sge; 420 IB_ACCESS_REMOTE_READ |
414 else 421 IB_ACCESS_REMOTE_WRITE;
415 wr->prot = NULL; 422 rsc->mr_valid = 1;
416 wr->access_flags = IB_ACCESS_LOCAL_WRITE |
417 IB_ACCESS_REMOTE_READ |
418 IB_ACCESS_REMOTE_WRITE;
419 pi_ctx->sig_mr_valid = 1;
420 423
421 sig_reg->sge.lkey = mr->lkey; 424 sig_reg->sge.lkey = mr->lkey;
422 sig_reg->rkey = mr->rkey; 425 sig_reg->rkey = mr->rkey;
423 sig_reg->sge.addr = 0; 426 sig_reg->sge.addr = mr->iova;
424 sig_reg->sge.length = scsi_transfer_length(iser_task->sc); 427 sig_reg->sge.length = mr->length;
425 428
426 iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n", 429 iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
427 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, 430 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
@@ -478,21 +481,6 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
478} 481}
479 482
480static int 483static int
481iser_reg_prot_sg(struct iscsi_iser_task *task,
482 struct iser_data_buf *mem,
483 struct iser_fr_desc *desc,
484 bool use_dma_key,
485 struct iser_mem_reg *reg)
486{
487 struct iser_device *device = task->iser_conn->ib_conn.device;
488
489 if (use_dma_key)
490 return iser_reg_dma(device, mem, reg);
491
492 return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
493}
494
495static int
496iser_reg_data_sg(struct iscsi_iser_task *task, 484iser_reg_data_sg(struct iscsi_iser_task *task,
497 struct iser_data_buf *mem, 485 struct iser_data_buf *mem,
498 struct iser_fr_desc *desc, 486 struct iser_fr_desc *desc,
@@ -515,7 +503,6 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
515 struct iser_device *device = ib_conn->device; 503 struct iser_device *device = ib_conn->device;
516 struct iser_data_buf *mem = &task->data[dir]; 504 struct iser_data_buf *mem = &task->data[dir];
517 struct iser_mem_reg *reg = &task->rdma_reg[dir]; 505 struct iser_mem_reg *reg = &task->rdma_reg[dir];
518 struct iser_mem_reg *data_reg;
519 struct iser_fr_desc *desc = NULL; 506 struct iser_fr_desc *desc = NULL;
520 bool use_dma_key; 507 bool use_dma_key;
521 int err; 508 int err;
@@ -528,32 +515,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
528 reg->mem_h = desc; 515 reg->mem_h = desc;
529 } 516 }
530 517
531 if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) 518 if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
532 data_reg = reg; 519 err = iser_reg_data_sg(task, mem, desc, use_dma_key, reg);
533 else 520 if (unlikely(err))
534 data_reg = &task->desc.data_reg; 521 goto err_reg;
535 522 } else {
536 err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); 523 err = iser_reg_sig_mr(task, mem, &task->prot[dir],
537 if (unlikely(err)) 524 &desc->rsc, reg);
538 goto err_reg;
539
540 if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
541 struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
542
543 if (scsi_prot_sg_count(task->sc)) {
544 mem = &task->prot[dir];
545 err = iser_reg_prot_sg(task, mem, desc,
546 use_dma_key, prot_reg);
547 if (unlikely(err))
548 goto err_reg;
549 }
550
551 err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
552 prot_reg, reg);
553 if (unlikely(err)) 525 if (unlikely(err))
554 goto err_reg; 526 goto err_reg;
555 527
556 desc->pi_ctx->sig_protected = 1; 528 desc->sig_protected = 1;
557 } 529 }
558 530
559 return 0; 531 return 0;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 4ff3d98fa6a4..ffd6bbc819f7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -233,116 +233,63 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn)
233 kfree(desc); 233 kfree(desc);
234} 234}
235 235
236static int 236static struct iser_fr_desc *
237iser_alloc_reg_res(struct iser_device *device, 237iser_create_fastreg_desc(struct iser_device *device,
238 struct ib_pd *pd, 238 struct ib_pd *pd,
239 struct iser_reg_resources *res, 239 bool pi_enable,
240 unsigned int size) 240 unsigned int size)
241{ 241{
242 struct iser_fr_desc *desc;
242 struct ib_device *ib_dev = device->ib_device; 243 struct ib_device *ib_dev = device->ib_device;
243 enum ib_mr_type mr_type; 244 enum ib_mr_type mr_type;
244 int ret; 245 int ret;
245 246
247 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
248 if (!desc)
249 return ERR_PTR(-ENOMEM);
250
246 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) 251 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
247 mr_type = IB_MR_TYPE_SG_GAPS; 252 mr_type = IB_MR_TYPE_SG_GAPS;
248 else 253 else
249 mr_type = IB_MR_TYPE_MEM_REG; 254 mr_type = IB_MR_TYPE_MEM_REG;
250 255
251 res->mr = ib_alloc_mr(pd, mr_type, size); 256 desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
252 if (IS_ERR(res->mr)) { 257 if (IS_ERR(desc->rsc.mr)) {
253 ret = PTR_ERR(res->mr); 258 ret = PTR_ERR(desc->rsc.mr);
254 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); 259 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
255 return ret; 260 goto err_alloc_mr;
256 }
257 res->mr_valid = 0;
258
259 return 0;
260}
261
262static void
263iser_free_reg_res(struct iser_reg_resources *rsc)
264{
265 ib_dereg_mr(rsc->mr);
266}
267
268static int
269iser_alloc_pi_ctx(struct iser_device *device,
270 struct ib_pd *pd,
271 struct iser_fr_desc *desc,
272 unsigned int size)
273{
274 struct iser_pi_context *pi_ctx = NULL;
275 int ret;
276
277 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
278 if (!desc->pi_ctx)
279 return -ENOMEM;
280
281 pi_ctx = desc->pi_ctx;
282
283 ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
284 if (ret) {
285 iser_err("failed to allocate reg_resources\n");
286 goto alloc_reg_res_err;
287 } 261 }
288 262
289 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); 263 if (pi_enable) {
290 if (IS_ERR(pi_ctx->sig_mr)) { 264 desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
291 ret = PTR_ERR(pi_ctx->sig_mr); 265 if (IS_ERR(desc->rsc.sig_mr)) {
292 goto sig_mr_failure; 266 ret = PTR_ERR(desc->rsc.sig_mr);
267 iser_err("Failed to allocate sig_mr err=%d\n", ret);
268 goto err_alloc_mr_integrity;
269 }
293 } 270 }
294 pi_ctx->sig_mr_valid = 0; 271 desc->rsc.mr_valid = 0;
295 desc->pi_ctx->sig_protected = 0;
296 272
297 return 0; 273 return desc;
298 274
299sig_mr_failure: 275err_alloc_mr_integrity:
300 iser_free_reg_res(&pi_ctx->rsc); 276 ib_dereg_mr(desc->rsc.mr);
301alloc_reg_res_err: 277err_alloc_mr:
302 kfree(desc->pi_ctx); 278 kfree(desc);
303 279
304 return ret; 280 return ERR_PTR(ret);
305} 281}
306 282
307static void 283static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
308iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
309{ 284{
310 iser_free_reg_res(&pi_ctx->rsc); 285 struct iser_reg_resources *res = &desc->rsc;
311 ib_dereg_mr(pi_ctx->sig_mr);
312 kfree(pi_ctx);
313}
314
315static struct iser_fr_desc *
316iser_create_fastreg_desc(struct iser_device *device,
317 struct ib_pd *pd,
318 bool pi_enable,
319 unsigned int size)
320{
321 struct iser_fr_desc *desc;
322 int ret;
323 286
324 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 287 ib_dereg_mr(res->mr);
325 if (!desc) 288 if (res->sig_mr) {
326 return ERR_PTR(-ENOMEM); 289 ib_dereg_mr(res->sig_mr);
327 290 res->sig_mr = NULL;
328 ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
329 if (ret)
330 goto reg_res_alloc_failure;
331
332 if (pi_enable) {
333 ret = iser_alloc_pi_ctx(device, pd, desc, size);
334 if (ret)
335 goto pi_ctx_alloc_failure;
336 } 291 }
337
338 return desc;
339
340pi_ctx_alloc_failure:
341 iser_free_reg_res(&desc->rsc);
342reg_res_alloc_failure:
343 kfree(desc); 292 kfree(desc);
344
345 return ERR_PTR(ret);
346} 293}
347 294
348/** 295/**
@@ -399,10 +346,7 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
399 346
400 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { 347 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
401 list_del(&desc->all_list); 348 list_del(&desc->all_list);
402 iser_free_reg_res(&desc->rsc); 349 iser_destroy_fastreg_desc(desc);
403 if (desc->pi_ctx)
404 iser_free_pi_ctx(desc->pi_ctx);
405 kfree(desc);
406 ++i; 350 ++i;
407 } 351 }
408 352
@@ -707,6 +651,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
707 struct ib_device_attr *attr = &device->ib_device->attrs; 651 struct ib_device_attr *attr = &device->ib_device->attrs;
708 unsigned short sg_tablesize, sup_sg_tablesize; 652 unsigned short sg_tablesize, sup_sg_tablesize;
709 unsigned short reserved_mr_pages; 653 unsigned short reserved_mr_pages;
654 u32 max_num_sg;
710 655
711 /* 656 /*
712 * FRs without SG_GAPS or FMRs can only map up to a (device) page per 657 * FRs without SG_GAPS or FMRs can only map up to a (device) page per
@@ -720,12 +665,17 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
720 else 665 else
721 reserved_mr_pages = 1; 666 reserved_mr_pages = 1;
722 667
668 if (iser_conn->ib_conn.pi_support)
669 max_num_sg = attr->max_pi_fast_reg_page_list_len;
670 else
671 max_num_sg = attr->max_fast_reg_page_list_len;
672
723 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); 673 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
724 if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) 674 if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
725 sup_sg_tablesize = 675 sup_sg_tablesize =
726 min_t( 676 min_t(
727 uint, ISCSI_ISER_MAX_SG_TABLESIZE, 677 uint, ISCSI_ISER_MAX_SG_TABLESIZE,
728 attr->max_fast_reg_page_list_len - reserved_mr_pages); 678 max_num_sg - reserved_mr_pages);
729 else 679 else
730 sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE; 680 sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
731 681
@@ -1118,9 +1068,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1118 struct ib_mr_status mr_status; 1068 struct ib_mr_status mr_status;
1119 int ret; 1069 int ret;
1120 1070
1121 if (desc && desc->pi_ctx->sig_protected) { 1071 if (desc && desc->sig_protected) {
1122 desc->pi_ctx->sig_protected = 0; 1072 desc->sig_protected = 0;
1123 ret = ib_check_mr_status(desc->pi_ctx->sig_mr, 1073 ret = ib_check_mr_status(desc->rsc.sig_mr,
1124 IB_MR_CHECK_SIG_STATUS, &mr_status); 1074 IB_MR_CHECK_SIG_STATUS, &mr_status);
1125 if (ret) { 1075 if (ret) {
1126 pr_err("ib_check_mr_status failed, ret %d\n", ret); 1076 pr_err("ib_check_mr_status failed, ret %d\n", ret);