aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2015-04-14 11:08:15 -0400
committerDoug Ledford <dledford@redhat.com>2015-04-15 16:07:12 -0400
commite3784bd1d9f1039f28dff2c0c0d17daabb3d6761 (patch)
tree3fcdf1f97cf69334da4ea6920634ff2e35427a77 /drivers/infiniband/ulp
parentecc3993a2ad2a4ce8f1a58a08e9177f21015492d (diff)
IB/iser: Remove a redundant struct iser_data_buf
No need to keep two iser_data_buf structures just in case we use mem copy. We can avoid that just by adding a pointer to the original sg. So keep only two iser_data_buf per command (data and protection) and pass the relevant data_buf to bounce buffer routine. This patch does not change any functionality. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Adir Lev <adirl@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h12
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c16
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c58
3 files changed, 34 insertions, 52 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index b47aea1094b2..5c7036c90766 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -218,20 +218,23 @@ enum iser_data_dir {
218/** 218/**
219 * struct iser_data_buf - iSER data buffer 219 * struct iser_data_buf - iSER data buffer
220 * 220 *
221 * @buf: pointer to the sg list 221 * @sg: pointer to the sg list
222 * @size: num entries of this sg 222 * @size: num entries of this sg
223 * @data_len: total beffer byte len 223 * @data_len: total beffer byte len
224 * @dma_nents: returned by dma_map_sg 224 * @dma_nents: returned by dma_map_sg
225 * @copy_buf: allocated copy buf for SGs unaligned 225 * @copy_buf: allocated copy buf for SGs unaligned
226 * for rdma which are copied 226 * for rdma which are copied
227 * @orig_sg: pointer to the original sg list (in case
228 * we used a copy)
227 * @sg_single: SG-ified clone of a non SG SC or 229 * @sg_single: SG-ified clone of a non SG SC or
228 * unaligned SG 230 * unaligned SG
229 */ 231 */
230struct iser_data_buf { 232struct iser_data_buf {
231 void *buf; 233 struct scatterlist *sg;
232 unsigned int size; 234 unsigned int size;
233 unsigned long data_len; 235 unsigned long data_len;
234 unsigned int dma_nents; 236 unsigned int dma_nents;
237 struct scatterlist *orig_sg;
235 char *copy_buf; 238 char *copy_buf;
236 struct scatterlist sg_single; 239 struct scatterlist sg_single;
237 }; 240 };
@@ -536,9 +539,7 @@ struct iser_conn {
536 * @dir: iser data direction 539 * @dir: iser data direction
537 * @rdma_regd: task rdma registration desc 540 * @rdma_regd: task rdma registration desc
538 * @data: iser data buffer desc 541 * @data: iser data buffer desc
539 * @data_copy: iser data copy buffer desc (bounce buffer)
540 * @prot: iser protection buffer desc 542 * @prot: iser protection buffer desc
541 * @prot_copy: iser protection copy buffer desc (bounce buffer)
542 */ 543 */
543struct iscsi_iser_task { 544struct iscsi_iser_task {
544 struct iser_tx_desc desc; 545 struct iser_tx_desc desc;
@@ -549,9 +550,7 @@ struct iscsi_iser_task {
549 int dir[ISER_DIRS_NUM]; 550 int dir[ISER_DIRS_NUM];
550 struct iser_regd_buf rdma_regd[ISER_DIRS_NUM]; 551 struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];
551 struct iser_data_buf data[ISER_DIRS_NUM]; 552 struct iser_data_buf data[ISER_DIRS_NUM];
552 struct iser_data_buf data_copy[ISER_DIRS_NUM];
553 struct iser_data_buf prot[ISER_DIRS_NUM]; 553 struct iser_data_buf prot[ISER_DIRS_NUM];
554 struct iser_data_buf prot_copy[ISER_DIRS_NUM];
555}; 554};
556 555
557struct iser_page_vec { 556struct iser_page_vec {
@@ -621,7 +620,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn);
621 620
622void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 621void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
623 struct iser_data_buf *mem, 622 struct iser_data_buf *mem,
624 struct iser_data_buf *mem_copy,
625 enum iser_data_dir cmd_dir); 623 enum iser_data_dir cmd_dir);
626 624
627int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, 625int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 76eb57b31a59..0e414dbaa523 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -401,13 +401,13 @@ int iser_send_command(struct iscsi_conn *conn,
401 } 401 }
402 402
403 if (scsi_sg_count(sc)) { /* using a scatter list */ 403 if (scsi_sg_count(sc)) { /* using a scatter list */
404 data_buf->buf = scsi_sglist(sc); 404 data_buf->sg = scsi_sglist(sc);
405 data_buf->size = scsi_sg_count(sc); 405 data_buf->size = scsi_sg_count(sc);
406 } 406 }
407 data_buf->data_len = scsi_bufflen(sc); 407 data_buf->data_len = scsi_bufflen(sc);
408 408
409 if (scsi_prot_sg_count(sc)) { 409 if (scsi_prot_sg_count(sc)) {
410 prot_buf->buf = scsi_prot_sglist(sc); 410 prot_buf->sg = scsi_prot_sglist(sc);
411 prot_buf->size = scsi_prot_sg_count(sc); 411 prot_buf->size = scsi_prot_sg_count(sc);
412 prot_buf->data_len = (data_buf->data_len >> 412 prot_buf->data_len = (data_buf->data_len >>
413 ilog2(sc->device->sector_size)) * 8; 413 ilog2(sc->device->sector_size)) * 8;
@@ -674,35 +674,31 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
674 /* if we were reading, copy back to unaligned sglist, 674 /* if we were reading, copy back to unaligned sglist,
675 * anyway dma_unmap and free the copy 675 * anyway dma_unmap and free the copy
676 */ 676 */
677 if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { 677 if (iser_task->data[ISER_DIR_IN].copy_buf) {
678 is_rdma_data_aligned = 0; 678 is_rdma_data_aligned = 0;
679 iser_finalize_rdma_unaligned_sg(iser_task, 679 iser_finalize_rdma_unaligned_sg(iser_task,
680 &iser_task->data[ISER_DIR_IN], 680 &iser_task->data[ISER_DIR_IN],
681 &iser_task->data_copy[ISER_DIR_IN],
682 ISER_DIR_IN); 681 ISER_DIR_IN);
683 } 682 }
684 683
685 if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { 684 if (iser_task->data[ISER_DIR_OUT].copy_buf) {
686 is_rdma_data_aligned = 0; 685 is_rdma_data_aligned = 0;
687 iser_finalize_rdma_unaligned_sg(iser_task, 686 iser_finalize_rdma_unaligned_sg(iser_task,
688 &iser_task->data[ISER_DIR_OUT], 687 &iser_task->data[ISER_DIR_OUT],
689 &iser_task->data_copy[ISER_DIR_OUT],
690 ISER_DIR_OUT); 688 ISER_DIR_OUT);
691 } 689 }
692 690
693 if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) { 691 if (iser_task->prot[ISER_DIR_IN].copy_buf) {
694 is_rdma_prot_aligned = 0; 692 is_rdma_prot_aligned = 0;
695 iser_finalize_rdma_unaligned_sg(iser_task, 693 iser_finalize_rdma_unaligned_sg(iser_task,
696 &iser_task->prot[ISER_DIR_IN], 694 &iser_task->prot[ISER_DIR_IN],
697 &iser_task->prot_copy[ISER_DIR_IN],
698 ISER_DIR_IN); 695 ISER_DIR_IN);
699 } 696 }
700 697
701 if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) { 698 if (iser_task->prot[ISER_DIR_OUT].copy_buf) {
702 is_rdma_prot_aligned = 0; 699 is_rdma_prot_aligned = 0;
703 iser_finalize_rdma_unaligned_sg(iser_task, 700 iser_finalize_rdma_unaligned_sg(iser_task,
704 &iser_task->prot[ISER_DIR_OUT], 701 &iser_task->prot[ISER_DIR_OUT],
705 &iser_task->prot_copy[ISER_DIR_OUT],
706 ISER_DIR_OUT); 702 ISER_DIR_OUT);
707 } 703 }
708 704
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 32ccd5cea675..beeabd0e05c1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -46,11 +46,10 @@
46 */ 46 */
47static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 47static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
48 struct iser_data_buf *data, 48 struct iser_data_buf *data,
49 struct iser_data_buf *data_copy,
50 enum iser_data_dir cmd_dir) 49 enum iser_data_dir cmd_dir)
51{ 50{
52 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; 51 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
53 struct scatterlist *sgl = (struct scatterlist *)data->buf; 52 struct scatterlist *sgl = data->sg;
54 struct scatterlist *sg; 53 struct scatterlist *sg;
55 char *mem = NULL; 54 char *mem = NULL;
56 unsigned long cmd_data_len = data->data_len; 55 unsigned long cmd_data_len = data->data_len;
@@ -72,7 +71,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
72 /* copy the unaligned sg the buffer which is used for RDMA */ 71 /* copy the unaligned sg the buffer which is used for RDMA */
73 char *p, *from; 72 char *p, *from;
74 73
75 sgl = (struct scatterlist *)data->buf; 74 sgl = data->sg;
76 p = mem; 75 p = mem;
77 for_each_sg(sgl, sg, data->size, i) { 76 for_each_sg(sgl, sg, data->size, i) {
78 from = kmap_atomic(sg_page(sg)); 77 from = kmap_atomic(sg_page(sg));
@@ -84,18 +83,16 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
84 } 83 }
85 } 84 }
86 85
87 sg_init_one(&data_copy->sg_single, mem, cmd_data_len); 86 sg_init_one(&data->sg_single, mem, cmd_data_len);
88 data_copy->buf = &data_copy->sg_single; 87 data->orig_sg = data->sg;
89 data_copy->size = 1; 88 data->sg = &data->sg_single;
90 data_copy->copy_buf = mem; 89 data->copy_buf = mem;
91 90 dma_nents = ib_dma_map_sg(dev, data->sg, 1,
92 dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
93 (cmd_dir == ISER_DIR_OUT) ? 91 (cmd_dir == ISER_DIR_OUT) ?
94 DMA_TO_DEVICE : DMA_FROM_DEVICE); 92 DMA_TO_DEVICE : DMA_FROM_DEVICE);
95 BUG_ON(dma_nents == 0); 93 BUG_ON(dma_nents == 0);
96 94
97 data_copy->dma_nents = dma_nents; 95 data->dma_nents = dma_nents;
98 data_copy->data_len = cmd_data_len;
99 96
100 return 0; 97 return 0;
101} 98}
@@ -106,7 +103,6 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
106 103
107void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 104void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
108 struct iser_data_buf *data, 105 struct iser_data_buf *data,
109 struct iser_data_buf *data_copy,
110 enum iser_data_dir cmd_dir) 106 enum iser_data_dir cmd_dir)
111{ 107{
112 struct ib_device *dev; 108 struct ib_device *dev;
@@ -114,7 +110,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
114 110
115 dev = iser_task->iser_conn->ib_conn.device->ib_device; 111 dev = iser_task->iser_conn->ib_conn.device->ib_device;
116 112
117 ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, 113 ib_dma_unmap_sg(dev, data->sg, 1,
118 (cmd_dir == ISER_DIR_OUT) ? 114 (cmd_dir == ISER_DIR_OUT) ?
119 DMA_TO_DEVICE : DMA_FROM_DEVICE); 115 DMA_TO_DEVICE : DMA_FROM_DEVICE);
120 116
@@ -126,9 +122,9 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
126 int i; 122 int i;
127 123
128 /* copy back read RDMA to unaligned sg */ 124 /* copy back read RDMA to unaligned sg */
129 mem = data_copy->copy_buf; 125 mem = data->copy_buf;
130 126
131 sgl = (struct scatterlist *)data->buf; 127 sgl = data->sg;
132 sg_size = data->size; 128 sg_size = data->size;
133 129
134 p = mem; 130 p = mem;
@@ -145,12 +141,12 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
145 cmd_data_len = data->data_len; 141 cmd_data_len = data->data_len;
146 142
147 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 143 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
148 free_pages((unsigned long)data_copy->copy_buf, 144 free_pages((unsigned long)data->copy_buf,
149 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 145 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
150 else 146 else
151 kfree(data_copy->copy_buf); 147 kfree(data->copy_buf);
152 148
153 data_copy->copy_buf = NULL; 149 data->copy_buf = NULL;
154} 150}
155 151
156#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) 152#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
@@ -172,7 +168,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
172 struct ib_device *ibdev, u64 *pages, 168 struct ib_device *ibdev, u64 *pages,
173 int *offset, int *data_size) 169 int *offset, int *data_size)
174{ 170{
175 struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf; 171 struct scatterlist *sg, *sgl = data->sg;
176 u64 start_addr, end_addr, page, chunk_start = 0; 172 u64 start_addr, end_addr, page, chunk_start = 0;
177 unsigned long total_sz = 0; 173 unsigned long total_sz = 0;
178 unsigned int dma_len; 174 unsigned int dma_len;
@@ -224,14 +220,14 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
224static int iser_data_buf_aligned_len(struct iser_data_buf *data, 220static int iser_data_buf_aligned_len(struct iser_data_buf *data,
225 struct ib_device *ibdev) 221 struct ib_device *ibdev)
226{ 222{
227 struct scatterlist *sgl, *sg, *next_sg = NULL; 223 struct scatterlist *sg, *sgl, *next_sg = NULL;
228 u64 start_addr, end_addr; 224 u64 start_addr, end_addr;
229 int i, ret_len, start_check = 0; 225 int i, ret_len, start_check = 0;
230 226
231 if (data->dma_nents == 1) 227 if (data->dma_nents == 1)
232 return 1; 228 return 1;
233 229
234 sgl = (struct scatterlist *)data->buf; 230 sgl = data->sg;
235 start_addr = ib_sg_dma_address(ibdev, sgl); 231 start_addr = ib_sg_dma_address(ibdev, sgl);
236 232
237 for_each_sg(sgl, sg, data->dma_nents, i) { 233 for_each_sg(sgl, sg, data->dma_nents, i) {
@@ -263,11 +259,10 @@ static int iser_data_buf_aligned_len(struct iser_data_buf *data,
263static void iser_data_buf_dump(struct iser_data_buf *data, 259static void iser_data_buf_dump(struct iser_data_buf *data,
264 struct ib_device *ibdev) 260 struct ib_device *ibdev)
265{ 261{
266 struct scatterlist *sgl = (struct scatterlist *)data->buf;
267 struct scatterlist *sg; 262 struct scatterlist *sg;
268 int i; 263 int i;
269 264
270 for_each_sg(sgl, sg, data->dma_nents, i) 265 for_each_sg(data->sg, sg, data->dma_nents, i)
271 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " 266 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
272 "off:0x%x sz:0x%x dma_len:0x%x\n", 267 "off:0x%x sz:0x%x dma_len:0x%x\n",
273 i, (unsigned long)ib_sg_dma_address(ibdev, sg), 268 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
@@ -320,7 +315,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
320 iser_task->dir[iser_dir] = 1; 315 iser_task->dir[iser_dir] = 1;
321 dev = iser_task->iser_conn->ib_conn.device->ib_device; 316 dev = iser_task->iser_conn->ib_conn.device->ib_device;
322 317
323 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 318 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
324 if (data->dma_nents == 0) { 319 if (data->dma_nents == 0) {
325 iser_err("dma_map_sg failed!!!\n"); 320 iser_err("dma_map_sg failed!!!\n");
326 return -EINVAL; 321 return -EINVAL;
@@ -335,13 +330,12 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
335 struct ib_device *dev; 330 struct ib_device *dev;
336 331
337 dev = iser_task->iser_conn->ib_conn.device->ib_device; 332 dev = iser_task->iser_conn->ib_conn.device->ib_device;
338 ib_dma_unmap_sg(dev, data->buf, data->size, dir); 333 ib_dma_unmap_sg(dev, data->sg, data->size, dir);
339} 334}
340 335
341static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, 336static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
342 struct ib_device *ibdev, 337 struct ib_device *ibdev,
343 struct iser_data_buf *mem, 338 struct iser_data_buf *mem,
344 struct iser_data_buf *mem_copy,
345 enum iser_data_dir cmd_dir, 339 enum iser_data_dir cmd_dir,
346 int aligned_len) 340 int aligned_len)
347{ 341{
@@ -361,7 +355,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
361 355
362 /* allocate copy buf, if we are writing, copy the */ 356 /* allocate copy buf, if we are writing, copy the */
363 /* unaligned scatterlist, dma map the copy */ 357 /* unaligned scatterlist, dma map the copy */
364 if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0) 358 if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
365 return -ENOMEM; 359 return -ENOMEM;
366 360
367 return 0; 361 return 0;
@@ -391,18 +385,16 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
391 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 385 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
392 if (aligned_len != mem->dma_nents) { 386 if (aligned_len != mem->dma_nents) {
393 err = fall_to_bounce_buf(iser_task, ibdev, mem, 387 err = fall_to_bounce_buf(iser_task, ibdev, mem,
394 &iser_task->data_copy[cmd_dir],
395 cmd_dir, aligned_len); 388 cmd_dir, aligned_len);
396 if (err) { 389 if (err) {
397 iser_err("failed to allocate bounce buffer\n"); 390 iser_err("failed to allocate bounce buffer\n");
398 return err; 391 return err;
399 } 392 }
400 mem = &iser_task->data_copy[cmd_dir];
401 } 393 }
402 394
403 /* if there a single dma entry, FMR is not needed */ 395 /* if there a single dma entry, FMR is not needed */
404 if (mem->dma_nents == 1) { 396 if (mem->dma_nents == 1) {
405 sg = (struct scatterlist *)mem->buf; 397 sg = mem->sg;
406 398
407 regd_buf->reg.lkey = device->mr->lkey; 399 regd_buf->reg.lkey = device->mr->lkey;
408 regd_buf->reg.rkey = device->mr->rkey; 400 regd_buf->reg.rkey = device->mr->rkey;
@@ -592,7 +584,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
592 584
593 /* if there a single dma entry, dma mr suffices */ 585 /* if there a single dma entry, dma mr suffices */
594 if (mem->dma_nents == 1) { 586 if (mem->dma_nents == 1) {
595 struct scatterlist *sg = (struct scatterlist *)mem->buf; 587 struct scatterlist *sg = mem->sg;
596 588
597 sge->lkey = device->mr->lkey; 589 sge->lkey = device->mr->lkey;
598 sge->addr = ib_sg_dma_address(ibdev, &sg[0]); 590 sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
@@ -678,13 +670,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
678 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 670 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
679 if (aligned_len != mem->dma_nents) { 671 if (aligned_len != mem->dma_nents) {
680 err = fall_to_bounce_buf(iser_task, ibdev, mem, 672 err = fall_to_bounce_buf(iser_task, ibdev, mem,
681 &iser_task->data_copy[cmd_dir],
682 cmd_dir, aligned_len); 673 cmd_dir, aligned_len);
683 if (err) { 674 if (err) {
684 iser_err("failed to allocate bounce buffer\n"); 675 iser_err("failed to allocate bounce buffer\n");
685 return err; 676 return err;
686 } 677 }
687 mem = &iser_task->data_copy[cmd_dir];
688 } 678 }
689 679
690 if (mem->dma_nents != 1 || 680 if (mem->dma_nents != 1 ||
@@ -711,13 +701,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
711 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 701 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
712 if (aligned_len != mem->dma_nents) { 702 if (aligned_len != mem->dma_nents) {
713 err = fall_to_bounce_buf(iser_task, ibdev, mem, 703 err = fall_to_bounce_buf(iser_task, ibdev, mem,
714 &iser_task->prot_copy[cmd_dir],
715 cmd_dir, aligned_len); 704 cmd_dir, aligned_len);
716 if (err) { 705 if (err) {
717 iser_err("failed to allocate bounce buffer\n"); 706 iser_err("failed to allocate bounce buffer\n");
718 return err; 707 return err;
719 } 708 }
720 mem = &iser_task->prot_copy[cmd_dir];
721 } 709 }
722 710
723 err = iser_fast_reg_mr(iser_task, regd_buf, mem, 711 err = iser_fast_reg_mr(iser_task, regd_buf, mem,