aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorDoug Ledford <dledford@redhat.com>2015-10-28 22:23:34 -0400
committerDoug Ledford <dledford@redhat.com>2015-10-28 22:23:34 -0400
commit63e8790d39a2d7c9a0ebeab987a6033d184bc6ba (patch)
tree9436939401b222d344f66e2bda59b445d5b9189f /net/rds
parent95893dde99d9d14f8a6ac99ea3103792a8da5f25 (diff)
parenteb14ab3ba14081e403be93dc6396627567fadf60 (diff)
Merge branch 'wr-cleanup' into k.o/for-4.4
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/ib.h6
-rw-r--r--net/rds/ib_send.c71
-rw-r--r--net/rds/iw.h6
-rw-r--r--net/rds/iw_rdma.c29
-rw-r--r--net/rds/iw_send.c113
5 files changed, 120 insertions, 105 deletions
diff --git a/net/rds/ib.h b/net/rds/ib.h
index aae60fda77f6..301c48385166 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -69,7 +69,11 @@ struct rds_ib_connect_private {
69 69
70struct rds_ib_send_work { 70struct rds_ib_send_work {
71 void *s_op; 71 void *s_op;
72 struct ib_send_wr s_wr; 72 union {
73 struct ib_send_wr s_wr;
74 struct ib_rdma_wr s_rdma_wr;
75 struct ib_atomic_wr s_atomic_wr;
76 };
73 struct ib_sge s_sge[RDS_IB_MAX_SGE]; 77 struct ib_sge s_sge[RDS_IB_MAX_SGE];
74 unsigned long s_queued; 78 unsigned long s_queued;
75}; 79};
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 4e88047086b6..987386e9931b 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -785,23 +785,23 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
785 send->s_queued = jiffies; 785 send->s_queued = jiffies;
786 786
787 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { 787 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
788 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; 788 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
789 send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare; 789 send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
790 send->s_wr.wr.atomic.swap = op->op_m_cswp.swap; 790 send->s_atomic_wr.swap = op->op_m_cswp.swap;
791 send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask; 791 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
792 send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask; 792 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
793 } else { /* FADD */ 793 } else { /* FADD */
794 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; 794 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
795 send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add; 795 send->s_atomic_wr.compare_add = op->op_m_fadd.add;
796 send->s_wr.wr.atomic.swap = 0; 796 send->s_atomic_wr.swap = 0;
797 send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask; 797 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
798 send->s_wr.wr.atomic.swap_mask = 0; 798 send->s_atomic_wr.swap_mask = 0;
799 } 799 }
800 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); 800 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
801 send->s_wr.num_sge = 1; 801 send->s_atomic_wr.wr.num_sge = 1;
802 send->s_wr.next = NULL; 802 send->s_atomic_wr.wr.next = NULL;
803 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr; 803 send->s_atomic_wr.remote_addr = op->op_remote_addr;
804 send->s_wr.wr.atomic.rkey = op->op_rkey; 804 send->s_atomic_wr.rkey = op->op_rkey;
805 send->s_op = op; 805 send->s_op = op;
806 rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); 806 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
807 807
@@ -826,11 +826,11 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
826 if (nr_sig) 826 if (nr_sig)
827 atomic_add(nr_sig, &ic->i_signaled_sends); 827 atomic_add(nr_sig, &ic->i_signaled_sends);
828 828
829 failed_wr = &send->s_wr; 829 failed_wr = &send->s_atomic_wr.wr;
830 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); 830 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
831 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, 831 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
832 send, &send->s_wr, ret, failed_wr); 832 send, &send->s_atomic_wr, ret, failed_wr);
833 BUG_ON(failed_wr != &send->s_wr); 833 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
834 if (ret) { 834 if (ret) {
835 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " 835 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
836 "returned %d\n", &conn->c_faddr, ret); 836 "returned %d\n", &conn->c_faddr, ret);
@@ -839,9 +839,9 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
839 goto out; 839 goto out;
840 } 840 }
841 841
842 if (unlikely(failed_wr != &send->s_wr)) { 842 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
843 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); 843 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
844 BUG_ON(failed_wr != &send->s_wr); 844 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
845 } 845 }
846 846
847out: 847out:
@@ -912,22 +912,23 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
912 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); 912 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
913 913
914 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; 914 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
915 send->s_wr.wr.rdma.remote_addr = remote_addr; 915 send->s_rdma_wr.remote_addr = remote_addr;
916 send->s_wr.wr.rdma.rkey = op->op_rkey; 916 send->s_rdma_wr.rkey = op->op_rkey;
917 917
918 if (num_sge > max_sge) { 918 if (num_sge > max_sge) {
919 send->s_wr.num_sge = max_sge; 919 send->s_rdma_wr.wr.num_sge = max_sge;
920 num_sge -= max_sge; 920 num_sge -= max_sge;
921 } else { 921 } else {
922 send->s_wr.num_sge = num_sge; 922 send->s_rdma_wr.wr.num_sge = num_sge;
923 } 923 }
924 924
925 send->s_wr.next = NULL; 925 send->s_rdma_wr.wr.next = NULL;
926 926
927 if (prev) 927 if (prev)
928 prev->s_wr.next = &send->s_wr; 928 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
929 929
930 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { 930 for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
931 scat != &op->op_sg[op->op_count]; j++) {
931 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 932 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
932 send->s_sge[j].addr = 933 send->s_sge[j].addr =
933 ib_sg_dma_address(ic->i_cm_id->device, scat); 934 ib_sg_dma_address(ic->i_cm_id->device, scat);
@@ -942,7 +943,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
942 } 943 }
943 944
944 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 945 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
945 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 946 &send->s_rdma_wr.wr,
947 send->s_rdma_wr.wr.num_sge,
948 send->s_rdma_wr.wr.next);
946 949
947 prev = send; 950 prev = send;
948 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) 951 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
@@ -963,11 +966,11 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
963 if (nr_sig) 966 if (nr_sig)
964 atomic_add(nr_sig, &ic->i_signaled_sends); 967 atomic_add(nr_sig, &ic->i_signaled_sends);
965 968
966 failed_wr = &first->s_wr; 969 failed_wr = &first->s_rdma_wr.wr;
967 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 970 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
968 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 971 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
969 first, &first->s_wr, ret, failed_wr); 972 first, &first->s_rdma_wr.wr, ret, failed_wr);
970 BUG_ON(failed_wr != &first->s_wr); 973 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
971 if (ret) { 974 if (ret) {
972 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " 975 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
973 "returned %d\n", &conn->c_faddr, ret); 976 "returned %d\n", &conn->c_faddr, ret);
@@ -976,9 +979,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
976 goto out; 979 goto out;
977 } 980 }
978 981
979 if (unlikely(failed_wr != &first->s_wr)) { 982 if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
980 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); 983 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
981 BUG_ON(failed_wr != &first->s_wr); 984 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
982 } 985 }
983 986
984 987
diff --git a/net/rds/iw.h b/net/rds/iw.h
index cbe6674e31ee..fe858e5dd8d1 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -77,7 +77,11 @@ struct rds_iw_send_work {
77 struct ib_fast_reg_page_list *s_page_list; 77 struct ib_fast_reg_page_list *s_page_list;
78 unsigned char s_remap_count; 78 unsigned char s_remap_count;
79 79
80 struct ib_send_wr s_wr; 80 union {
81 struct ib_send_wr s_send_wr;
82 struct ib_rdma_wr s_rdma_wr;
83 struct ib_fast_reg_wr s_fast_reg_wr;
84 };
81 struct ib_sge s_sge[RDS_IW_MAX_SGE]; 85 struct ib_sge s_sge[RDS_IW_MAX_SGE];
82 unsigned long s_queued; 86 unsigned long s_queued;
83}; 87};
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 6a8fbd6e69e7..f8a612cc69e6 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -696,7 +696,8 @@ static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
696static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) 696static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
697{ 697{
698 struct rds_iw_mr *ibmr = mapping->m_mr; 698 struct rds_iw_mr *ibmr = mapping->m_mr;
699 struct ib_send_wr f_wr, *failed_wr; 699 struct ib_fast_reg_wr f_wr;
700 struct ib_send_wr *failed_wr;
700 int ret; 701 int ret;
701 702
702 /* 703 /*
@@ -709,22 +710,22 @@ static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
709 mapping->m_rkey = ibmr->mr->rkey; 710 mapping->m_rkey = ibmr->mr->rkey;
710 711
711 memset(&f_wr, 0, sizeof(f_wr)); 712 memset(&f_wr, 0, sizeof(f_wr));
712 f_wr.wr_id = RDS_IW_FAST_REG_WR_ID; 713 f_wr.wr.wr_id = RDS_IW_FAST_REG_WR_ID;
713 f_wr.opcode = IB_WR_FAST_REG_MR; 714 f_wr.wr.opcode = IB_WR_FAST_REG_MR;
714 f_wr.wr.fast_reg.length = mapping->m_sg.bytes; 715 f_wr.length = mapping->m_sg.bytes;
715 f_wr.wr.fast_reg.rkey = mapping->m_rkey; 716 f_wr.rkey = mapping->m_rkey;
716 f_wr.wr.fast_reg.page_list = ibmr->page_list; 717 f_wr.page_list = ibmr->page_list;
717 f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; 718 f_wr.page_list_len = mapping->m_sg.dma_len;
718 f_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 719 f_wr.page_shift = PAGE_SHIFT;
719 f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | 720 f_wr.access_flags = IB_ACCESS_LOCAL_WRITE |
720 IB_ACCESS_REMOTE_READ | 721 IB_ACCESS_REMOTE_READ |
721 IB_ACCESS_REMOTE_WRITE; 722 IB_ACCESS_REMOTE_WRITE;
722 f_wr.wr.fast_reg.iova_start = 0; 723 f_wr.iova_start = 0;
723 f_wr.send_flags = IB_SEND_SIGNALED; 724 f_wr.wr.send_flags = IB_SEND_SIGNALED;
724 725
725 failed_wr = &f_wr; 726 failed_wr = &f_wr.wr;
726 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); 727 ret = ib_post_send(ibmr->cm_id->qp, &f_wr.wr, &failed_wr);
727 BUG_ON(failed_wr != &f_wr); 728 BUG_ON(failed_wr != &f_wr.wr);
728 if (ret) 729 if (ret)
729 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", 730 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
730 __func__, __LINE__, ret); 731 __func__, __LINE__, ret);
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 86152ec3b887..f6e23c515b44 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -137,13 +137,13 @@ void rds_iw_send_init_ring(struct rds_iw_connection *ic)
137 send->s_op = NULL; 137 send->s_op = NULL;
138 send->s_mapping = NULL; 138 send->s_mapping = NULL;
139 139
140 send->s_wr.next = NULL; 140 send->s_send_wr.next = NULL;
141 send->s_wr.wr_id = i; 141 send->s_send_wr.wr_id = i;
142 send->s_wr.sg_list = send->s_sge; 142 send->s_send_wr.sg_list = send->s_sge;
143 send->s_wr.num_sge = 1; 143 send->s_send_wr.num_sge = 1;
144 send->s_wr.opcode = IB_WR_SEND; 144 send->s_send_wr.opcode = IB_WR_SEND;
145 send->s_wr.send_flags = 0; 145 send->s_send_wr.send_flags = 0;
146 send->s_wr.ex.imm_data = 0; 146 send->s_send_wr.ex.imm_data = 0;
147 147
148 sge = rds_iw_data_sge(ic, send->s_sge); 148 sge = rds_iw_data_sge(ic, send->s_sge);
149 sge->lkey = 0; 149 sge->lkey = 0;
@@ -179,7 +179,7 @@ void rds_iw_send_clear_ring(struct rds_iw_connection *ic)
179 ib_dereg_mr(send->s_mr); 179 ib_dereg_mr(send->s_mr);
180 BUG_ON(!send->s_page_list); 180 BUG_ON(!send->s_page_list);
181 ib_free_fast_reg_page_list(send->s_page_list); 181 ib_free_fast_reg_page_list(send->s_page_list);
182 if (send->s_wr.opcode == 0xdead) 182 if (send->s_send_wr.opcode == 0xdead)
183 continue; 183 continue;
184 if (send->s_rm) 184 if (send->s_rm)
185 rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); 185 rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
@@ -247,7 +247,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
247 send = &ic->i_sends[oldest]; 247 send = &ic->i_sends[oldest];
248 248
249 /* In the error case, wc.opcode sometimes contains garbage */ 249 /* In the error case, wc.opcode sometimes contains garbage */
250 switch (send->s_wr.opcode) { 250 switch (send->s_send_wr.opcode) {
251 case IB_WR_SEND: 251 case IB_WR_SEND:
252 if (send->s_rm) 252 if (send->s_rm)
253 rds_iw_send_unmap_rm(ic, send, wc.status); 253 rds_iw_send_unmap_rm(ic, send, wc.status);
@@ -262,12 +262,12 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
262 default: 262 default:
263 printk_ratelimited(KERN_NOTICE 263 printk_ratelimited(KERN_NOTICE
264 "RDS/IW: %s: unexpected opcode 0x%x in WR!\n", 264 "RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
265 __func__, send->s_wr.opcode); 265 __func__, send->s_send_wr.opcode);
266 break; 266 break;
267 } 267 }
268 268
269 send->s_wr.opcode = 0xdead; 269 send->s_send_wr.opcode = 0xdead;
270 send->s_wr.num_sge = 1; 270 send->s_send_wr.num_sge = 1;
271 if (time_after(jiffies, send->s_queued + HZ/2)) 271 if (time_after(jiffies, send->s_queued + HZ/2))
272 rds_iw_stats_inc(s_iw_tx_stalled); 272 rds_iw_stats_inc(s_iw_tx_stalled);
273 273
@@ -455,10 +455,10 @@ rds_iw_xmit_populate_wr(struct rds_iw_connection *ic,
455 455
456 WARN_ON(pos != send - ic->i_sends); 456 WARN_ON(pos != send - ic->i_sends);
457 457
458 send->s_wr.send_flags = send_flags; 458 send->s_send_wr.send_flags = send_flags;
459 send->s_wr.opcode = IB_WR_SEND; 459 send->s_send_wr.opcode = IB_WR_SEND;
460 send->s_wr.num_sge = 2; 460 send->s_send_wr.num_sge = 2;
461 send->s_wr.next = NULL; 461 send->s_send_wr.next = NULL;
462 send->s_queued = jiffies; 462 send->s_queued = jiffies;
463 send->s_op = NULL; 463 send->s_op = NULL;
464 464
@@ -472,7 +472,7 @@ rds_iw_xmit_populate_wr(struct rds_iw_connection *ic,
472 } else { 472 } else {
473 /* We're sending a packet with no payload. There is only 473 /* We're sending a packet with no payload. There is only
474 * one SGE */ 474 * one SGE */
475 send->s_wr.num_sge = 1; 475 send->s_send_wr.num_sge = 1;
476 sge = &send->s_sge[0]; 476 sge = &send->s_sge[0];
477 } 477 }
478 478
@@ -672,23 +672,23 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
672 */ 672 */
673 if (ic->i_unsignaled_wrs-- == 0) { 673 if (ic->i_unsignaled_wrs-- == 0) {
674 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; 674 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
675 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 675 send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
676 } 676 }
677 677
678 ic->i_unsignaled_bytes -= len; 678 ic->i_unsignaled_bytes -= len;
679 if (ic->i_unsignaled_bytes <= 0) { 679 if (ic->i_unsignaled_bytes <= 0) {
680 ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes; 680 ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes;
681 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 681 send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
682 } 682 }
683 683
684 /* 684 /*
685 * Always signal the last one if we're stopping due to flow control. 685 * Always signal the last one if we're stopping due to flow control.
686 */ 686 */
687 if (flow_controlled && i == (work_alloc-1)) 687 if (flow_controlled && i == (work_alloc-1))
688 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 688 send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
689 689
690 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 690 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
691 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 691 &send->s_send_wr, send->s_send_wr.num_sge, send->s_send_wr.next);
692 692
693 sent += len; 693 sent += len;
694 rm->data.op_dmaoff += len; 694 rm->data.op_dmaoff += len;
@@ -722,7 +722,7 @@ add_header:
722 } 722 }
723 723
724 if (prev) 724 if (prev)
725 prev->s_wr.next = &send->s_wr; 725 prev->s_send_wr.next = &send->s_send_wr;
726 prev = send; 726 prev = send;
727 727
728 pos = (pos + 1) % ic->i_send_ring.w_nr; 728 pos = (pos + 1) % ic->i_send_ring.w_nr;
@@ -736,7 +736,7 @@ add_header:
736 /* if we finished the message then send completion owns it */ 736 /* if we finished the message then send completion owns it */
737 if (scat == &rm->data.op_sg[rm->data.op_count]) { 737 if (scat == &rm->data.op_sg[rm->data.op_count]) {
738 prev->s_rm = ic->i_rm; 738 prev->s_rm = ic->i_rm;
739 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 739 prev->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
740 ic->i_rm = NULL; 740 ic->i_rm = NULL;
741 } 741 }
742 742
@@ -748,11 +748,11 @@ add_header:
748 rds_iw_send_add_credits(conn, credit_alloc - i); 748 rds_iw_send_add_credits(conn, credit_alloc - i);
749 749
750 /* XXX need to worry about failed_wr and partial sends. */ 750 /* XXX need to worry about failed_wr and partial sends. */
751 failed_wr = &first->s_wr; 751 failed_wr = &first->s_send_wr;
752 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 752 ret = ib_post_send(ic->i_cm_id->qp, &first->s_send_wr, &failed_wr);
753 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 753 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
754 first, &first->s_wr, ret, failed_wr); 754 first, &first->s_send_wr, ret, failed_wr);
755 BUG_ON(failed_wr != &first->s_wr); 755 BUG_ON(failed_wr != &first->s_send_wr);
756 if (ret) { 756 if (ret) {
757 printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 " 757 printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 "
758 "returned %d\n", &conn->c_faddr, ret); 758 "returned %d\n", &conn->c_faddr, ret);
@@ -778,14 +778,14 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
778 * in the sg list is added to the fast reg page list and placed 778 * in the sg list is added to the fast reg page list and placed
779 * inside the fast_reg_mr WR. 779 * inside the fast_reg_mr WR.
780 */ 780 */
781 send->s_wr.opcode = IB_WR_FAST_REG_MR; 781 send->s_fast_reg_wr.wr.opcode = IB_WR_FAST_REG_MR;
782 send->s_wr.wr.fast_reg.length = len; 782 send->s_fast_reg_wr.length = len;
783 send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; 783 send->s_fast_reg_wr.rkey = send->s_mr->rkey;
784 send->s_wr.wr.fast_reg.page_list = send->s_page_list; 784 send->s_fast_reg_wr.page_list = send->s_page_list;
785 send->s_wr.wr.fast_reg.page_list_len = nent; 785 send->s_fast_reg_wr.page_list_len = nent;
786 send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 786 send->s_fast_reg_wr.page_shift = PAGE_SHIFT;
787 send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; 787 send->s_fast_reg_wr.access_flags = IB_ACCESS_REMOTE_WRITE;
788 send->s_wr.wr.fast_reg.iova_start = sg_addr; 788 send->s_fast_reg_wr.iova_start = sg_addr;
789 789
790 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); 790 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
791} 791}
@@ -863,7 +863,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
863 num_sge = op->op_count; 863 num_sge = op->op_count;
864 864
865 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { 865 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
866 send->s_wr.send_flags = 0; 866 send->s_rdma_wr.wr.send_flags = 0;
867 send->s_queued = jiffies; 867 send->s_queued = jiffies;
868 868
869 /* 869 /*
@@ -872,7 +872,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
872 */ 872 */
873 if (ic->i_unsignaled_wrs-- == 0) { 873 if (ic->i_unsignaled_wrs-- == 0) {
874 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; 874 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
875 send->s_wr.send_flags = IB_SEND_SIGNALED; 875 send->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED;
876 } 876 }
877 877
878 /* To avoid the need to have the plumbing to invalidate the fastreg_mr used 878 /* To avoid the need to have the plumbing to invalidate the fastreg_mr used
@@ -880,29 +880,30 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
880 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. 880 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
881 */ 881 */
882 if (op->op_write) 882 if (op->op_write)
883 send->s_wr.opcode = IB_WR_RDMA_WRITE; 883 send->s_rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
884 else 884 else
885 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; 885 send->s_rdma_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
886 886
887 send->s_wr.wr.rdma.remote_addr = remote_addr; 887 send->s_rdma_wr.remote_addr = remote_addr;
888 send->s_wr.wr.rdma.rkey = op->op_rkey; 888 send->s_rdma_wr.rkey = op->op_rkey;
889 send->s_op = op; 889 send->s_op = op;
890 890
891 if (num_sge > rds_iwdev->max_sge) { 891 if (num_sge > rds_iwdev->max_sge) {
892 send->s_wr.num_sge = rds_iwdev->max_sge; 892 send->s_rdma_wr.wr.num_sge = rds_iwdev->max_sge;
893 num_sge -= rds_iwdev->max_sge; 893 num_sge -= rds_iwdev->max_sge;
894 } else 894 } else
895 send->s_wr.num_sge = num_sge; 895 send->s_rdma_wr.wr.num_sge = num_sge;
896 896
897 send->s_wr.next = NULL; 897 send->s_rdma_wr.wr.next = NULL;
898 898
899 if (prev) 899 if (prev)
900 prev->s_wr.next = &send->s_wr; 900 prev->s_send_wr.next = &send->s_rdma_wr.wr;
901 901
902 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { 902 for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
903 scat != &op->op_sg[op->op_count]; j++) {
903 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 904 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
904 905
905 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) 906 if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV)
906 send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); 907 send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat);
907 else { 908 else {
908 send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); 909 send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat);
@@ -917,15 +918,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
917 scat++; 918 scat++;
918 } 919 }
919 920
920 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) { 921 if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV) {
921 send->s_wr.num_sge = 1; 922 send->s_rdma_wr.wr.num_sge = 1;
922 send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr; 923 send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr;
923 send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes; 924 send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes;
924 send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey; 925 send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey;
925 } 926 }
926 927
927 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 928 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
928 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 929 &send->s_rdma_wr,
930 send->s_rdma_wr.wr.num_sge,
931 send->s_rdma_wr.wr.next);
929 932
930 prev = send; 933 prev = send;
931 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) 934 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
@@ -934,7 +937,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
934 937
935 /* if we finished the message then send completion owns it */ 938 /* if we finished the message then send completion owns it */
936 if (scat == &op->op_sg[op->op_count]) 939 if (scat == &op->op_sg[op->op_count])
937 first->s_wr.send_flags = IB_SEND_SIGNALED; 940 first->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED;
938 941
939 if (i < work_alloc) { 942 if (i < work_alloc) {
940 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i); 943 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i);
@@ -953,11 +956,11 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
953 work_alloc++; 956 work_alloc++;
954 } 957 }
955 958
956 failed_wr = &first->s_wr; 959 failed_wr = &first->s_rdma_wr.wr;
957 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 960 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
958 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 961 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
959 first, &first->s_wr, ret, failed_wr); 962 first, &first->s_rdma_wr, ret, failed_wr);
960 BUG_ON(failed_wr != &first->s_wr); 963 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
961 if (ret) { 964 if (ret) {
962 printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 " 965 printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 "
963 "returned %d\n", &conn->c_faddr, ret); 966 "returned %d\n", &conn->c_faddr, ret);