aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/rds/iw.h5
-rw-r--r--net/rds/iw_rdma.c128
-rw-r--r--net/rds/iw_send.c57
3 files changed, 75 insertions, 115 deletions
diff --git a/net/rds/iw.h b/net/rds/iw.h
index fe858e5dd8d1..5af01d1758b3 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -74,13 +74,12 @@ struct rds_iw_send_work {
74 struct rm_rdma_op *s_op; 74 struct rm_rdma_op *s_op;
75 struct rds_iw_mapping *s_mapping; 75 struct rds_iw_mapping *s_mapping;
76 struct ib_mr *s_mr; 76 struct ib_mr *s_mr;
77 struct ib_fast_reg_page_list *s_page_list;
78 unsigned char s_remap_count; 77 unsigned char s_remap_count;
79 78
80 union { 79 union {
81 struct ib_send_wr s_send_wr; 80 struct ib_send_wr s_send_wr;
82 struct ib_rdma_wr s_rdma_wr; 81 struct ib_rdma_wr s_rdma_wr;
83 struct ib_fast_reg_wr s_fast_reg_wr; 82 struct ib_reg_wr s_reg_wr;
84 }; 83 };
85 struct ib_sge s_sge[RDS_IW_MAX_SGE]; 84 struct ib_sge s_sge[RDS_IW_MAX_SGE];
86 unsigned long s_queued; 85 unsigned long s_queued;
@@ -199,7 +198,7 @@ struct rds_iw_device {
199 198
200/* Magic WR_ID for ACKs */ 199/* Magic WR_ID for ACKs */
201#define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL) 200#define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL)
202#define RDS_IW_FAST_REG_WR_ID ((u64)0xefefefefefefefefULL) 201#define RDS_IW_REG_WR_ID ((u64)0xefefefefefefefefULL)
203#define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL) 202#define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL)
204 203
205struct rds_iw_statistics { 204struct rds_iw_statistics {
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index f8a612cc69e6..47bd68451ff7 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -47,7 +47,6 @@ struct rds_iw_mr {
47 struct rdma_cm_id *cm_id; 47 struct rdma_cm_id *cm_id;
48 48
49 struct ib_mr *mr; 49 struct ib_mr *mr;
50 struct ib_fast_reg_page_list *page_list;
51 50
52 struct rds_iw_mapping mapping; 51 struct rds_iw_mapping mapping;
53 unsigned char remap_count; 52 unsigned char remap_count;
@@ -77,8 +76,8 @@ struct rds_iw_mr_pool {
77 76
78static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all); 77static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
79static void rds_iw_mr_pool_flush_worker(struct work_struct *work); 78static void rds_iw_mr_pool_flush_worker(struct work_struct *work);
80static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 79static int rds_iw_init_reg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
81static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, 80static int rds_iw_map_reg(struct rds_iw_mr_pool *pool,
82 struct rds_iw_mr *ibmr, 81 struct rds_iw_mr *ibmr,
83 struct scatterlist *sg, unsigned int nents); 82 struct scatterlist *sg, unsigned int nents);
84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 83static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
@@ -258,19 +257,18 @@ static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
258 sg->bytes = 0; 257 sg->bytes = 0;
259} 258}
260 259
261static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, 260static int rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
262 struct rds_iw_scatterlist *sg) 261 struct rds_iw_scatterlist *sg)
263{ 262{
264 struct ib_device *dev = rds_iwdev->dev; 263 struct ib_device *dev = rds_iwdev->dev;
265 u64 *dma_pages = NULL; 264 int i, ret;
266 int i, j, ret;
267 265
268 WARN_ON(sg->dma_len); 266 WARN_ON(sg->dma_len);
269 267
270 sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); 268 sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
271 if (unlikely(!sg->dma_len)) { 269 if (unlikely(!sg->dma_len)) {
272 printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n"); 270 printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n");
273 return ERR_PTR(-EBUSY); 271 return -EBUSY;
274 } 272 }
275 273
276 sg->bytes = 0; 274 sg->bytes = 0;
@@ -303,31 +301,14 @@ static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
303 if (sg->dma_npages > fastreg_message_size) 301 if (sg->dma_npages > fastreg_message_size)
304 goto out_unmap; 302 goto out_unmap;
305 303
306 dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC);
307 if (!dma_pages) {
308 ret = -ENOMEM;
309 goto out_unmap;
310 }
311 304
312 for (i = j = 0; i < sg->dma_len; ++i) {
313 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
314 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
315 u64 end_addr;
316 305
317 end_addr = dma_addr + dma_len; 306 return 0;
318 dma_addr &= ~PAGE_MASK;
319 for (; dma_addr < end_addr; dma_addr += PAGE_SIZE)
320 dma_pages[j++] = dma_addr;
321 BUG_ON(j > sg->dma_npages);
322 }
323
324 return dma_pages;
325 307
326out_unmap: 308out_unmap:
327 ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL); 309 ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
328 sg->dma_len = 0; 310 sg->dma_len = 0;
329 kfree(dma_pages); 311 return ret;
330 return ERR_PTR(ret);
331} 312}
332 313
333 314
@@ -440,7 +421,7 @@ static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev)
440 INIT_LIST_HEAD(&ibmr->mapping.m_list); 421 INIT_LIST_HEAD(&ibmr->mapping.m_list);
441 ibmr->mapping.m_mr = ibmr; 422 ibmr->mapping.m_mr = ibmr;
442 423
443 err = rds_iw_init_fastreg(pool, ibmr); 424 err = rds_iw_init_reg(pool, ibmr);
444 if (err) 425 if (err)
445 goto out_no_cigar; 426 goto out_no_cigar;
446 427
@@ -622,7 +603,7 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
622 ibmr->cm_id = cm_id; 603 ibmr->cm_id = cm_id;
623 ibmr->device = rds_iwdev; 604 ibmr->device = rds_iwdev;
624 605
625 ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents); 606 ret = rds_iw_map_reg(rds_iwdev->mr_pool, ibmr, sg, nents);
626 if (ret == 0) 607 if (ret == 0)
627 *key_ret = ibmr->mr->rkey; 608 *key_ret = ibmr->mr->rkey;
628 else 609 else
@@ -638,7 +619,7 @@ out:
638} 619}
639 620
640/* 621/*
641 * iWARP fastreg handling 622 * iWARP reg handling
642 * 623 *
643 * The life cycle of a fastreg registration is a bit different from 624 * The life cycle of a fastreg registration is a bit different from
644 * FMRs. 625 * FMRs.
@@ -650,7 +631,7 @@ out:
650 * This creates a bit of a problem for us, as we do not have the destination 631 * This creates a bit of a problem for us, as we do not have the destination
651 * IP in GET_MR, so the connection must be setup prior to the GET_MR call for 632 * IP in GET_MR, so the connection must be setup prior to the GET_MR call for
652 * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit 633 * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit
653 * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request 634 * will try to queue a LOCAL_INV (if needed) and a REG_MR work request
654 * before queuing the SEND. When completions for these arrive, they are 635 * before queuing the SEND. When completions for these arrive, they are
655 * dispatched to the MR has a bit set showing that RDMa can be performed. 636 * dispatched to the MR has a bit set showing that RDMa can be performed.
656 * 637 *
@@ -659,11 +640,10 @@ out:
659 * The expectation there is that this invalidation step includes ALL 640 * The expectation there is that this invalidation step includes ALL
660 * PREVIOUSLY FREED MRs. 641 * PREVIOUSLY FREED MRs.
661 */ 642 */
662static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, 643static int rds_iw_init_reg(struct rds_iw_mr_pool *pool,
663 struct rds_iw_mr *ibmr) 644 struct rds_iw_mr *ibmr)
664{ 645{
665 struct rds_iw_device *rds_iwdev = pool->device; 646 struct rds_iw_device *rds_iwdev = pool->device;
666 struct ib_fast_reg_page_list *page_list = NULL;
667 struct ib_mr *mr; 647 struct ib_mr *mr;
668 int err; 648 int err;
669 649
@@ -676,56 +656,44 @@ static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
676 return err; 656 return err;
677 } 657 }
678 658
679 /* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages
680 * is not filled in.
681 */
682 page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size);
683 if (IS_ERR(page_list)) {
684 err = PTR_ERR(page_list);
685
686 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err);
687 ib_dereg_mr(mr);
688 return err;
689 }
690
691 ibmr->page_list = page_list;
692 ibmr->mr = mr; 659 ibmr->mr = mr;
693 return 0; 660 return 0;
694} 661}
695 662
696static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) 663static int rds_iw_rdma_reg_mr(struct rds_iw_mapping *mapping)
697{ 664{
698 struct rds_iw_mr *ibmr = mapping->m_mr; 665 struct rds_iw_mr *ibmr = mapping->m_mr;
699 struct ib_fast_reg_wr f_wr; 666 struct rds_iw_scatterlist *m_sg = &mapping->m_sg;
667 struct ib_reg_wr reg_wr;
700 struct ib_send_wr *failed_wr; 668 struct ib_send_wr *failed_wr;
701 int ret; 669 int ret, n;
670
671 n = ib_map_mr_sg_zbva(ibmr->mr, m_sg->list, m_sg->len, PAGE_SIZE);
672 if (unlikely(n != m_sg->len))
673 return n < 0 ? n : -EINVAL;
674
675 reg_wr.wr.next = NULL;
676 reg_wr.wr.opcode = IB_WR_REG_MR;
677 reg_wr.wr.wr_id = RDS_IW_REG_WR_ID;
678 reg_wr.wr.num_sge = 0;
679 reg_wr.mr = ibmr->mr;
680 reg_wr.key = mapping->m_rkey;
681 reg_wr.access = IB_ACCESS_LOCAL_WRITE |
682 IB_ACCESS_REMOTE_READ |
683 IB_ACCESS_REMOTE_WRITE;
702 684
703 /* 685 /*
704 * Perform a WR for the fast_reg_mr. Each individual page 686 * Perform a WR for the reg_mr. Each individual page
705 * in the sg list is added to the fast reg page list and placed 687 * in the sg list is added to the fast reg page list and placed
706 * inside the fast_reg_mr WR. The key used is a rolling 8bit 688 * inside the reg_mr WR. The key used is a rolling 8bit
707 * counter, which should guarantee uniqueness. 689 * counter, which should guarantee uniqueness.
708 */ 690 */
709 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++); 691 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
710 mapping->m_rkey = ibmr->mr->rkey; 692 mapping->m_rkey = ibmr->mr->rkey;
711 693
712 memset(&f_wr, 0, sizeof(f_wr)); 694 failed_wr = &reg_wr.wr;
713 f_wr.wr.wr_id = RDS_IW_FAST_REG_WR_ID; 695 ret = ib_post_send(ibmr->cm_id->qp, &reg_wr.wr, &failed_wr);
714 f_wr.wr.opcode = IB_WR_FAST_REG_MR; 696 BUG_ON(failed_wr != &reg_wr.wr);
715 f_wr.length = mapping->m_sg.bytes;
716 f_wr.rkey = mapping->m_rkey;
717 f_wr.page_list = ibmr->page_list;
718 f_wr.page_list_len = mapping->m_sg.dma_len;
719 f_wr.page_shift = PAGE_SHIFT;
720 f_wr.access_flags = IB_ACCESS_LOCAL_WRITE |
721 IB_ACCESS_REMOTE_READ |
722 IB_ACCESS_REMOTE_WRITE;
723 f_wr.iova_start = 0;
724 f_wr.wr.send_flags = IB_SEND_SIGNALED;
725
726 failed_wr = &f_wr.wr;
727 ret = ib_post_send(ibmr->cm_id->qp, &f_wr.wr, &failed_wr);
728 BUG_ON(failed_wr != &f_wr.wr);
729 if (ret) 697 if (ret)
730 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", 698 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
731 __func__, __LINE__, ret); 699 __func__, __LINE__, ret);
@@ -757,21 +725,20 @@ out:
757 return ret; 725 return ret;
758} 726}
759 727
760static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, 728static int rds_iw_map_reg(struct rds_iw_mr_pool *pool,
761 struct rds_iw_mr *ibmr, 729 struct rds_iw_mr *ibmr,
762 struct scatterlist *sg, 730 struct scatterlist *sg,
763 unsigned int sg_len) 731 unsigned int sg_len)
764{ 732{
765 struct rds_iw_device *rds_iwdev = pool->device; 733 struct rds_iw_device *rds_iwdev = pool->device;
766 struct rds_iw_mapping *mapping = &ibmr->mapping; 734 struct rds_iw_mapping *mapping = &ibmr->mapping;
767 u64 *dma_pages; 735 u64 *dma_pages;
768 int i, ret = 0; 736 int ret = 0;
769 737
770 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); 738 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len);
771 739
772 dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg); 740 ret = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg);
773 if (IS_ERR(dma_pages)) { 741 if (ret) {
774 ret = PTR_ERR(dma_pages);
775 dma_pages = NULL; 742 dma_pages = NULL;
776 goto out; 743 goto out;
777 } 744 }
@@ -781,10 +748,7 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
781 goto out; 748 goto out;
782 } 749 }
783 750
784 for (i = 0; i < mapping->m_sg.dma_npages; ++i) 751 ret = rds_iw_rdma_reg_mr(mapping);
785 ibmr->page_list->page_list[i] = dma_pages[i];
786
787 ret = rds_iw_rdma_build_fastreg(mapping);
788 if (ret) 752 if (ret)
789 goto out; 753 goto out;
790 754
@@ -870,8 +834,6 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
870static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, 834static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool,
871 struct rds_iw_mr *ibmr) 835 struct rds_iw_mr *ibmr)
872{ 836{
873 if (ibmr->page_list)
874 ib_free_fast_reg_page_list(ibmr->page_list);
875 if (ibmr->mr) 837 if (ibmr->mr)
876 ib_dereg_mr(ibmr->mr); 838 ib_dereg_mr(ibmr->mr);
877} 839}
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index f6e23c515b44..e20bd503f4bd 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -159,13 +159,6 @@ void rds_iw_send_init_ring(struct rds_iw_connection *ic)
159 printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed\n"); 159 printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed\n");
160 break; 160 break;
161 } 161 }
162
163 send->s_page_list = ib_alloc_fast_reg_page_list(
164 ic->i_cm_id->device, fastreg_message_size);
165 if (IS_ERR(send->s_page_list)) {
166 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed\n");
167 break;
168 }
169 } 162 }
170} 163}
171 164
@@ -177,8 +170,6 @@ void rds_iw_send_clear_ring(struct rds_iw_connection *ic)
177 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 170 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
178 BUG_ON(!send->s_mr); 171 BUG_ON(!send->s_mr);
179 ib_dereg_mr(send->s_mr); 172 ib_dereg_mr(send->s_mr);
180 BUG_ON(!send->s_page_list);
181 ib_free_fast_reg_page_list(send->s_page_list);
182 if (send->s_send_wr.opcode == 0xdead) 173 if (send->s_send_wr.opcode == 0xdead)
183 continue; 174 continue;
184 if (send->s_rm) 175 if (send->s_rm)
@@ -227,7 +218,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
227 continue; 218 continue;
228 } 219 }
229 220
230 if (wc.opcode == IB_WC_FAST_REG_MR && wc.wr_id == RDS_IW_FAST_REG_WR_ID) { 221 if (wc.opcode == IB_WC_REG_MR && wc.wr_id == RDS_IW_REG_WR_ID) {
231 ic->i_fastreg_posted = 1; 222 ic->i_fastreg_posted = 1;
232 continue; 223 continue;
233 } 224 }
@@ -252,7 +243,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
252 if (send->s_rm) 243 if (send->s_rm)
253 rds_iw_send_unmap_rm(ic, send, wc.status); 244 rds_iw_send_unmap_rm(ic, send, wc.status);
254 break; 245 break;
255 case IB_WR_FAST_REG_MR: 246 case IB_WR_REG_MR:
256 case IB_WR_RDMA_WRITE: 247 case IB_WR_RDMA_WRITE:
257 case IB_WR_RDMA_READ: 248 case IB_WR_RDMA_READ:
258 case IB_WR_RDMA_READ_WITH_INV: 249 case IB_WR_RDMA_READ_WITH_INV:
@@ -770,24 +761,26 @@ out:
770 return ret; 761 return ret;
771} 762}
772 763
773static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rds_iw_connection *ic, struct rds_iw_send_work *send, int nent, int len, u64 sg_addr) 764static int rds_iw_build_send_reg(struct rds_iw_send_work *send,
765 struct scatterlist *sg,
766 int sg_nents)
774{ 767{
775 BUG_ON(nent > send->s_page_list->max_page_list_len); 768 int n;
776 /* 769
777 * Perform a WR for the fast_reg_mr. Each individual page 770 n = ib_map_mr_sg(send->s_mr, sg, sg_nents, PAGE_SIZE);
778 * in the sg list is added to the fast reg page list and placed 771 if (unlikely(n != sg_nents))
779 * inside the fast_reg_mr WR. 772 return n < 0 ? n : -EINVAL;
780 */ 773
781 send->s_fast_reg_wr.wr.opcode = IB_WR_FAST_REG_MR; 774 send->s_reg_wr.wr.opcode = IB_WR_REG_MR;
782 send->s_fast_reg_wr.length = len; 775 send->s_reg_wr.wr.wr_id = 0;
783 send->s_fast_reg_wr.rkey = send->s_mr->rkey; 776 send->s_reg_wr.wr.num_sge = 0;
784 send->s_fast_reg_wr.page_list = send->s_page_list; 777 send->s_reg_wr.mr = send->s_mr;
785 send->s_fast_reg_wr.page_list_len = nent; 778 send->s_reg_wr.key = send->s_mr->rkey;
786 send->s_fast_reg_wr.page_shift = PAGE_SHIFT; 779 send->s_reg_wr.access = IB_ACCESS_REMOTE_WRITE;
787 send->s_fast_reg_wr.access_flags = IB_ACCESS_REMOTE_WRITE;
788 send->s_fast_reg_wr.iova_start = sg_addr;
789 780
790 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); 781 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
782
783 return 0;
791} 784}
792 785
793int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) 786int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
@@ -808,6 +801,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
808 int sent; 801 int sent;
809 int ret; 802 int ret;
810 int num_sge; 803 int num_sge;
804 int sg_nents;
811 805
812 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); 806 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
813 807
@@ -861,6 +855,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
861 scat = &op->op_sg[0]; 855 scat = &op->op_sg[0];
862 sent = 0; 856 sent = 0;
863 num_sge = op->op_count; 857 num_sge = op->op_count;
858 sg_nents = 0;
864 859
865 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { 860 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
866 send->s_rdma_wr.wr.send_flags = 0; 861 send->s_rdma_wr.wr.send_flags = 0;
@@ -904,7 +899,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
904 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 899 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
905 900
906 if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV) 901 if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV)
907 send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); 902 sg_nents++;
908 else { 903 else {
909 send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); 904 send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat);
910 send->s_sge[j].length = len; 905 send->s_sge[j].length = len;
@@ -951,8 +946,12 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
951 * fastreg_mr (or possibly a dma_mr) 946 * fastreg_mr (or possibly a dma_mr)
952 */ 947 */
953 if (!op->op_write) { 948 if (!op->op_write) {
954 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], 949 ret = rds_iw_build_send_reg(&ic->i_sends[fr_pos],
955 op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); 950 &op->op_sg[0], sg_nents);
951 if (ret) {
952 printk(KERN_WARNING "RDS/IW: failed to reg send mem\n");
953 goto out;
954 }
956 work_alloc++; 955 work_alloc++;
957 } 956 }
958 957