aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-01 17:11:53 -0500
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:11:58 -0400
commitf8b3aaf2ba8ca9e27b47f8bfdff07c8b968f2c05 (patch)
tree42de574167e590ca2e413314b6486df916cd1f31 /net/rds
parentd0ab25a83c4a08cd98b73a37d3f4c069f7b4f50b (diff)
RDS: Remove struct rds_rdma_op
A big changeset, but it's all pretty dumb. struct rds_rdma_op was already embedded in struct rm_rdma_op. Remove rds_rdma_op and put its members in rm_rdma_op. Rename members with "op_" prefix instead of "r_", for consistency. Of course this breaks a lot, so fixup the code accordingly. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/ib.h4
-rw-r--r--net/rds/ib_send.c60
-rw-r--r--net/rds/iw.h4
-rw-r--r--net/rds/iw_send.c68
-rw-r--r--net/rds/message.c8
-rw-r--r--net/rds/rdma.c58
-rw-r--r--net/rds/rds.h41
-rw-r--r--net/rds/send.c50
8 files changed, 145 insertions, 148 deletions
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 96769b86a536..d64b5087eefe 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -54,7 +54,7 @@ struct rds_ib_connect_private {
54 54
55struct rds_ib_send_work { 55struct rds_ib_send_work {
56 struct rds_message *s_rm; 56 struct rds_message *s_rm;
57 struct rds_rdma_op *s_op; 57 struct rm_rdma_op *s_op;
58 struct ib_send_wr s_wr; 58 struct ib_send_wr s_wr;
59 struct ib_sge s_sge[RDS_IB_MAX_SGE]; 59 struct ib_sge s_sge[RDS_IB_MAX_SGE];
60 unsigned long s_queued; 60 unsigned long s_queued;
@@ -331,7 +331,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
331void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context); 331void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
332void rds_ib_send_init_ring(struct rds_ib_connection *ic); 332void rds_ib_send_init_ring(struct rds_ib_connection *ic);
333void rds_ib_send_clear_ring(struct rds_ib_connection *ic); 333void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
334int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); 334int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
335void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); 335void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
336void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); 336void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
337int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, 337int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index e6745d827c3a..63981cd1827a 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -79,14 +79,14 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
79 rm->data.m_sg, rm->data.m_nents, 79 rm->data.m_sg, rm->data.m_nents,
80 DMA_TO_DEVICE); 80 DMA_TO_DEVICE);
81 81
82 if (rm->rdma.m_rdma_op.r_active) { 82 if (rm->rdma.op_active) {
83 struct rds_rdma_op *op = &rm->rdma.m_rdma_op; 83 struct rm_rdma_op *op = &rm->rdma;
84 84
85 if (op->r_mapped) { 85 if (op->op_mapped) {
86 ib_dma_unmap_sg(ic->i_cm_id->device, 86 ib_dma_unmap_sg(ic->i_cm_id->device,
87 op->r_sg, op->r_nents, 87 op->op_sg, op->op_nents,
88 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 88 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 op->r_mapped = 0; 89 op->op_mapped = 0;
90 } 90 }
91 91
92 /* If the user asked for a completion notification on this 92 /* If the user asked for a completion notification on this
@@ -111,10 +111,10 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
111 */ 111 */
112 rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete); 112 rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete);
113 113
114 if (rm->rdma.m_rdma_op.r_write) 114 if (rm->rdma.op_write)
115 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); 115 rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
116 else 116 else
117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); 117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
118 } 118 }
119 119
120 if (rm->atomic.op_active) { 120 if (rm->atomic.op_active) {
@@ -540,10 +540,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
540 540
541 /* If it has a RDMA op, tell the peer we did it. This is 541 /* If it has a RDMA op, tell the peer we did it. This is
542 * used by the peer to release use-once RDMA MRs. */ 542 * used by the peer to release use-once RDMA MRs. */
543 if (rm->rdma.m_rdma_op.r_active) { 543 if (rm->rdma.op_active) {
544 struct rds_ext_header_rdma ext_hdr; 544 struct rds_ext_header_rdma ext_hdr;
545 545
546 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key); 546 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
547 rds_message_add_extension(&rm->m_inc.i_hdr, 547 rds_message_add_extension(&rm->m_inc.i_hdr,
548 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 548 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
549 } 549 }
@@ -576,7 +576,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
576 * or when requested by the user. Right now, we let 576 * or when requested by the user. Right now, we let
577 * the application choose. 577 * the application choose.
578 */ 578 */
579 if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) 579 if (rm->rdma.op_active && rm->rdma.op_fence)
580 send_flags = IB_SEND_FENCE; 580 send_flags = IB_SEND_FENCE;
581 581
582 /* Each frag gets a header. Msgs may be 0 bytes */ 582 /* Each frag gets a header. Msgs may be 0 bytes */
@@ -746,7 +746,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rds_message *rm)
746 * we must fill in s_rm ourselves, so we properly clean up 746 * we must fill in s_rm ourselves, so we properly clean up
747 * on completion. 747 * on completion.
748 */ 748 */
749 if (!rm->rdma.m_rdma_op.r_active && !rm->data.op_active) 749 if (!rm->rdma.op_active && !rm->data.op_active)
750 send->s_rm = rm; 750 send->s_rm = rm;
751 751
752 /* map 8 byte retval buffer to the device */ 752 /* map 8 byte retval buffer to the device */
@@ -788,7 +788,7 @@ out:
788 return ret; 788 return ret;
789} 789}
790 790
791int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) 791int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
792{ 792{
793 struct rds_ib_connection *ic = conn->c_transport_data; 793 struct rds_ib_connection *ic = conn->c_transport_data;
794 struct rds_ib_send_work *send = NULL; 794 struct rds_ib_send_work *send = NULL;
@@ -798,7 +798,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
798 struct rds_ib_device *rds_ibdev; 798 struct rds_ib_device *rds_ibdev;
799 struct scatterlist *scat; 799 struct scatterlist *scat;
800 unsigned long len; 800 unsigned long len;
801 u64 remote_addr = op->r_remote_addr; 801 u64 remote_addr = op->op_remote_addr;
802 u32 pos; 802 u32 pos;
803 u32 work_alloc; 803 u32 work_alloc;
804 u32 i; 804 u32 i;
@@ -810,25 +810,25 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
810 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 810 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
811 811
812 /* map the message the first time we see it */ 812 /* map the message the first time we see it */
813 if (!op->r_mapped) { 813 if (!op->op_mapped) {
814 op->r_count = ib_dma_map_sg(ic->i_cm_id->device, 814 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
815 op->r_sg, op->r_nents, (op->r_write) ? 815 op->op_sg, op->op_nents, (op->op_write) ?
816 DMA_TO_DEVICE : DMA_FROM_DEVICE); 816 DMA_TO_DEVICE : DMA_FROM_DEVICE);
817 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); 817 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
818 if (op->r_count == 0) { 818 if (op->op_count == 0) {
819 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 819 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
820 ret = -ENOMEM; /* XXX ? */ 820 ret = -ENOMEM; /* XXX ? */
821 goto out; 821 goto out;
822 } 822 }
823 823
824 op->r_mapped = 1; 824 op->op_mapped = 1;
825 } 825 }
826 826
827 /* 827 /*
828 * Instead of knowing how to return a partial rdma read/write we insist that there 828 * Instead of knowing how to return a partial rdma read/write we insist that there
829 * be enough work requests to send the entire message. 829 * be enough work requests to send the entire message.
830 */ 830 */
831 i = ceil(op->r_count, rds_ibdev->max_sge); 831 i = ceil(op->op_count, rds_ibdev->max_sge);
832 832
833 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 833 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
834 if (work_alloc != i) { 834 if (work_alloc != i) {
@@ -841,19 +841,19 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
841 send = &ic->i_sends[pos]; 841 send = &ic->i_sends[pos];
842 first = send; 842 first = send;
843 prev = NULL; 843 prev = NULL;
844 scat = &op->r_sg[0]; 844 scat = &op->op_sg[0];
845 sent = 0; 845 sent = 0;
846 num_sge = op->r_count; 846 num_sge = op->op_count;
847 847
848 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { 848 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
849 send->s_wr.send_flags = 0; 849 send->s_wr.send_flags = 0;
850 send->s_queued = jiffies; 850 send->s_queued = jiffies;
851 851
852 rds_ib_set_wr_signal_state(ic, send, op->r_notify); 852 rds_ib_set_wr_signal_state(ic, send, op->op_notify);
853 853
854 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; 854 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
855 send->s_wr.wr.rdma.remote_addr = remote_addr; 855 send->s_wr.wr.rdma.remote_addr = remote_addr;
856 send->s_wr.wr.rdma.rkey = op->r_key; 856 send->s_wr.wr.rdma.rkey = op->op_rkey;
857 send->s_op = op; 857 send->s_op = op;
858 858
859 if (num_sge > rds_ibdev->max_sge) { 859 if (num_sge > rds_ibdev->max_sge) {
@@ -868,7 +868,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
868 if (prev) 868 if (prev)
869 prev->s_wr.next = &send->s_wr; 869 prev->s_wr.next = &send->s_wr;
870 870
871 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { 871 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
872 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 872 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
873 send->s_sge[j].addr = 873 send->s_sge[j].addr =
874 ib_sg_dma_address(ic->i_cm_id->device, scat); 874 ib_sg_dma_address(ic->i_cm_id->device, scat);
diff --git a/net/rds/iw.h b/net/rds/iw.h
index 6f08300851ad..f112105faced 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -70,7 +70,7 @@ struct rds_iw_send_work {
70 struct rds_message *s_rm; 70 struct rds_message *s_rm;
71 71
72 /* We should really put these into a union: */ 72 /* We should really put these into a union: */
73 struct rds_rdma_op *s_op; 73 struct rm_rdma_op *s_op;
74 struct rds_iw_mapping *s_mapping; 74 struct rds_iw_mapping *s_mapping;
75 struct ib_mr *s_mr; 75 struct ib_mr *s_mr;
76 struct ib_fast_reg_page_list *s_page_list; 76 struct ib_fast_reg_page_list *s_page_list;
@@ -357,7 +357,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
357void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context); 357void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
358void rds_iw_send_init_ring(struct rds_iw_connection *ic); 358void rds_iw_send_init_ring(struct rds_iw_connection *ic);
359void rds_iw_send_clear_ring(struct rds_iw_connection *ic); 359void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
360int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); 360int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
361void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); 361void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
362void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); 362void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
363int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, 363int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 9b79a1b10445..05ebf16ecad7 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -63,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm,
63} 63}
64 64
65static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, 65static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
66 struct rds_rdma_op *op) 66 struct rm_rdma_op *op)
67{ 67{
68 if (op->r_mapped) { 68 if (op->op_mapped) {
69 ib_dma_unmap_sg(ic->i_cm_id->device, 69 ib_dma_unmap_sg(ic->i_cm_id->device,
70 op->r_sg, op->r_nents, 70 op->op_sg, op->op_nents,
71 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 71 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
72 op->r_mapped = 0; 72 op->op_mapped = 0;
73 } 73 }
74} 74}
75 75
@@ -85,8 +85,8 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
85 rm->data.m_sg, rm->data.m_nents, 85 rm->data.m_sg, rm->data.m_nents,
86 DMA_TO_DEVICE); 86 DMA_TO_DEVICE);
87 87
88 if (rm->rdma.m_rdma_op.r_active) { 88 if (rm->rdma.op_active) {
89 rds_iw_send_unmap_rdma(ic, &rm->rdma.m_rdma_op); 89 rds_iw_send_unmap_rdma(ic, &rm->rdma);
90 90
91 /* If the user asked for a completion notification on this 91 /* If the user asked for a completion notification on this
92 * message, we can implement three different semantics: 92 * message, we can implement three different semantics:
@@ -110,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
110 */ 110 */
111 rds_iw_send_rdma_complete(rm, wc_status); 111 rds_iw_send_rdma_complete(rm, wc_status);
112 112
113 if (rm->rdma.m_rdma_op.r_write) 113 if (rm->rdma.op_write)
114 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); 114 rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
115 else 115 else
116 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); 116 rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
117 } 117 }
118 118
119 /* If anyone waited for this message to get flushed out, wake 119 /* If anyone waited for this message to get flushed out, wake
@@ -591,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
591 591
592 /* If it has a RDMA op, tell the peer we did it. This is 592 /* If it has a RDMA op, tell the peer we did it. This is
593 * used by the peer to release use-once RDMA MRs. */ 593 * used by the peer to release use-once RDMA MRs. */
594 if (rm->rdma.m_rdma_op.r_active) { 594 if (rm->rdma.op_active) {
595 struct rds_ext_header_rdma ext_hdr; 595 struct rds_ext_header_rdma ext_hdr;
596 596
597 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key); 597 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
598 rds_message_add_extension(&rm->m_inc.i_hdr, 598 rds_message_add_extension(&rm->m_inc.i_hdr,
599 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 599 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
600 } 600 }
@@ -632,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
632 * or when requested by the user. Right now, we let 632 * or when requested by the user. Right now, we let
633 * the application choose. 633 * the application choose.
634 */ 634 */
635 if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) 635 if (rm->rdma.op_active && rm->rdma.op_fence)
636 send_flags = IB_SEND_FENCE; 636 send_flags = IB_SEND_FENCE;
637 637
638 /* 638 /*
@@ -785,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
785 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); 785 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
786} 786}
787 787
788int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) 788int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
789{ 789{
790 struct rds_iw_connection *ic = conn->c_transport_data; 790 struct rds_iw_connection *ic = conn->c_transport_data;
791 struct rds_iw_send_work *send = NULL; 791 struct rds_iw_send_work *send = NULL;
@@ -795,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
795 struct rds_iw_device *rds_iwdev; 795 struct rds_iw_device *rds_iwdev;
796 struct scatterlist *scat; 796 struct scatterlist *scat;
797 unsigned long len; 797 unsigned long len;
798 u64 remote_addr = op->r_remote_addr; 798 u64 remote_addr = op->op_remote_addr;
799 u32 pos, fr_pos; 799 u32 pos, fr_pos;
800 u32 work_alloc; 800 u32 work_alloc;
801 u32 i; 801 u32 i;
@@ -807,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
807 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); 807 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
808 808
809 /* map the message the first time we see it */ 809 /* map the message the first time we see it */
810 if (!op->r_mapped) { 810 if (!op->op_mapped) {
811 op->r_count = ib_dma_map_sg(ic->i_cm_id->device, 811 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
812 op->r_sg, op->r_nents, (op->r_write) ? 812 op->op_sg, op->op_nents, (op->op_write) ?
813 DMA_TO_DEVICE : DMA_FROM_DEVICE); 813 DMA_TO_DEVICE : DMA_FROM_DEVICE);
814 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); 814 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
815 if (op->r_count == 0) { 815 if (op->op_count == 0) {
816 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); 816 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
817 ret = -ENOMEM; /* XXX ? */ 817 ret = -ENOMEM; /* XXX ? */
818 goto out; 818 goto out;
819 } 819 }
820 820
821 op->r_mapped = 1; 821 op->op_mapped = 1;
822 } 822 }
823 823
824 if (!op->r_write) { 824 if (!op->op_write) {
825 /* Alloc space on the send queue for the fastreg */ 825 /* Alloc space on the send queue for the fastreg */
826 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); 826 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
827 if (work_alloc != 1) { 827 if (work_alloc != 1) {
@@ -836,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
836 * Instead of knowing how to return a partial rdma read/write we insist that there 836 * Instead of knowing how to return a partial rdma read/write we insist that there
837 * be enough work requests to send the entire message. 837 * be enough work requests to send the entire message.
838 */ 838 */
839 i = ceil(op->r_count, rds_iwdev->max_sge); 839 i = ceil(op->op_count, rds_iwdev->max_sge);
840 840
841 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); 841 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
842 if (work_alloc != i) { 842 if (work_alloc != i) {
@@ -847,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
847 } 847 }
848 848
849 send = &ic->i_sends[pos]; 849 send = &ic->i_sends[pos];
850 if (!op->r_write) { 850 if (!op->op_write) {
851 first = prev = &ic->i_sends[fr_pos]; 851 first = prev = &ic->i_sends[fr_pos];
852 } else { 852 } else {
853 first = send; 853 first = send;
854 prev = NULL; 854 prev = NULL;
855 } 855 }
856 scat = &op->r_sg[0]; 856 scat = &op->op_sg[0];
857 sent = 0; 857 sent = 0;
858 num_sge = op->r_count; 858 num_sge = op->op_count;
859 859
860 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { 860 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
861 send->s_wr.send_flags = 0; 861 send->s_wr.send_flags = 0;
862 send->s_queued = jiffies; 862 send->s_queued = jiffies;
863 863
@@ -874,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
874 * for local access after RDS is finished with it, using 874 * for local access after RDS is finished with it, using
875 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. 875 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
876 */ 876 */
877 if (op->r_write) 877 if (op->op_write)
878 send->s_wr.opcode = IB_WR_RDMA_WRITE; 878 send->s_wr.opcode = IB_WR_RDMA_WRITE;
879 else 879 else
880 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; 880 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
881 881
882 send->s_wr.wr.rdma.remote_addr = remote_addr; 882 send->s_wr.wr.rdma.remote_addr = remote_addr;
883 send->s_wr.wr.rdma.rkey = op->r_key; 883 send->s_wr.wr.rdma.rkey = op->op_rkey;
884 send->s_op = op; 884 send->s_op = op;
885 885
886 if (num_sge > rds_iwdev->max_sge) { 886 if (num_sge > rds_iwdev->max_sge) {
@@ -894,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
894 if (prev) 894 if (prev)
895 prev->s_wr.next = &send->s_wr; 895 prev->s_wr.next = &send->s_wr;
896 896
897 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { 897 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
898 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 898 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
899 899
900 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) 900 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
@@ -928,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
928 } 928 }
929 929
930 /* if we finished the message then send completion owns it */ 930 /* if we finished the message then send completion owns it */
931 if (scat == &op->r_sg[op->r_count]) 931 if (scat == &op->op_sg[op->op_count])
932 first->s_wr.send_flags = IB_SEND_SIGNALED; 932 first->s_wr.send_flags = IB_SEND_SIGNALED;
933 933
934 if (i < work_alloc) { 934 if (i < work_alloc) {
@@ -942,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
942 * adapters do not allow using the lkey for this at all. To bypass this use a 942 * adapters do not allow using the lkey for this at all. To bypass this use a
943 * fastreg_mr (or possibly a dma_mr) 943 * fastreg_mr (or possibly a dma_mr)
944 */ 944 */
945 if (!op->r_write) { 945 if (!op->op_write) {
946 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], 946 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
947 op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); 947 op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
948 work_alloc++; 948 work_alloc++;
949 } 949 }
950 950
diff --git a/net/rds/message.c b/net/rds/message.c
index b53306c3e656..bca7eda6dde9 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -69,10 +69,10 @@ static void rds_message_purge(struct rds_message *rm)
69 } 69 }
70 rm->data.m_nents = 0; 70 rm->data.m_nents = 0;
71 71
72 if (rm->rdma.m_rdma_op.r_active) 72 if (rm->rdma.op_active)
73 rds_rdma_free_op(&rm->rdma.m_rdma_op); 73 rds_rdma_free_op(&rm->rdma);
74 if (rm->rdma.m_rdma_mr) 74 if (rm->rdma.op_rdma_mr)
75 rds_mr_put(rm->rdma.m_rdma_mr); 75 rds_mr_put(rm->rdma.op_rdma_mr);
76 76
77 if (rm->atomic.op_active) 77 if (rm->atomic.op_active)
78 rds_atomic_free_op(&rm->atomic); 78 rds_atomic_free_op(&rm->atomic);
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 0df86a382e2e..8d22999b0471 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -440,26 +440,26 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
440 rds_mr_put(mr); 440 rds_mr_put(mr);
441} 441}
442 442
443void rds_rdma_free_op(struct rds_rdma_op *ro) 443void rds_rdma_free_op(struct rm_rdma_op *ro)
444{ 444{
445 unsigned int i; 445 unsigned int i;
446 446
447 for (i = 0; i < ro->r_nents; i++) { 447 for (i = 0; i < ro->op_nents; i++) {
448 struct page *page = sg_page(&ro->r_sg[i]); 448 struct page *page = sg_page(&ro->op_sg[i]);
449 449
450 /* Mark page dirty if it was possibly modified, which 450 /* Mark page dirty if it was possibly modified, which
451 * is the case for a RDMA_READ which copies from remote 451 * is the case for a RDMA_READ which copies from remote
452 * to local memory */ 452 * to local memory */
453 if (!ro->r_write) { 453 if (!ro->op_write) {
454 BUG_ON(irqs_disabled()); 454 BUG_ON(irqs_disabled());
455 set_page_dirty(page); 455 set_page_dirty(page);
456 } 456 }
457 put_page(page); 457 put_page(page);
458 } 458 }
459 459
460 kfree(ro->r_notifier); 460 kfree(ro->op_notifier);
461 ro->r_notifier = NULL; 461 ro->op_notifier = NULL;
462 ro->r_active = 0; 462 ro->op_active = 0;
463} 463}
464 464
465void rds_atomic_free_op(struct rm_atomic_op *ao) 465void rds_atomic_free_op(struct rm_atomic_op *ao)
@@ -521,7 +521,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
521{ 521{
522 struct rds_rdma_args *args; 522 struct rds_rdma_args *args;
523 struct rds_iovec vec; 523 struct rds_iovec vec;
524 struct rds_rdma_op *op = &rm->rdma.m_rdma_op; 524 struct rm_rdma_op *op = &rm->rdma;
525 unsigned int nr_pages; 525 unsigned int nr_pages;
526 unsigned int nr_bytes; 526 unsigned int nr_bytes;
527 struct page **pages = NULL; 527 struct page **pages = NULL;
@@ -531,7 +531,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
531 int ret = 0; 531 int ret = 0;
532 532
533 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) 533 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
534 || rm->rdma.m_rdma_op.r_active) 534 || rm->rdma.op_active)
535 return -EINVAL; 535 return -EINVAL;
536 536
537 args = CMSG_DATA(cmsg); 537 args = CMSG_DATA(cmsg);
@@ -556,27 +556,27 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
556 goto out; 556 goto out;
557 } 557 }
558 558
559 op->r_write = !!(args->flags & RDS_RDMA_READWRITE); 559 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
560 op->r_fence = !!(args->flags & RDS_RDMA_FENCE); 560 op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
561 op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); 561 op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
562 op->r_active = 1; 562 op->op_active = 1;
563 op->r_recverr = rs->rs_recverr; 563 op->op_recverr = rs->rs_recverr;
564 WARN_ON(!nr_pages); 564 WARN_ON(!nr_pages);
565 op->r_sg = rds_message_alloc_sgs(rm, nr_pages); 565 op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
566 566
567 if (op->r_notify || op->r_recverr) { 567 if (op->op_notify || op->op_recverr) {
568 /* We allocate an uninitialized notifier here, because 568 /* We allocate an uninitialized notifier here, because
569 * we don't want to do that in the completion handler. We 569 * we don't want to do that in the completion handler. We
570 * would have to use GFP_ATOMIC there, and don't want to deal 570 * would have to use GFP_ATOMIC there, and don't want to deal
571 * with failed allocations. 571 * with failed allocations.
572 */ 572 */
573 op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); 573 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
574 if (!op->r_notifier) { 574 if (!op->op_notifier) {
575 ret = -ENOMEM; 575 ret = -ENOMEM;
576 goto out; 576 goto out;
577 } 577 }
578 op->r_notifier->n_user_token = args->user_token; 578 op->op_notifier->n_user_token = args->user_token;
579 op->r_notifier->n_status = RDS_RDMA_SUCCESS; 579 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
580 } 580 }
581 581
582 /* The cookie contains the R_Key of the remote memory region, and 582 /* The cookie contains the R_Key of the remote memory region, and
@@ -586,15 +586,15 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
586 * destination address (which is really an offset into the MR) 586 * destination address (which is really an offset into the MR)
587 * FIXME: We may want to move this into ib_rdma.c 587 * FIXME: We may want to move this into ib_rdma.c
588 */ 588 */
589 op->r_key = rds_rdma_cookie_key(args->cookie); 589 op->op_rkey = rds_rdma_cookie_key(args->cookie);
590 op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); 590 op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
591 591
592 nr_bytes = 0; 592 nr_bytes = 0;
593 593
594 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", 594 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
595 (unsigned long long)args->nr_local, 595 (unsigned long long)args->nr_local,
596 (unsigned long long)args->remote_vec.addr, 596 (unsigned long long)args->remote_vec.addr,
597 op->r_key); 597 op->op_rkey);
598 598
599 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; 599 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
600 600
@@ -617,7 +617,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
617 /* If it's a WRITE operation, we want to pin the pages for reading. 617 /* If it's a WRITE operation, we want to pin the pages for reading.
618 * If it's a READ operation, we need to pin the pages for writing. 618 * If it's a READ operation, we need to pin the pages for writing.
619 */ 619 */
620 ret = rds_pin_pages(vec.addr, nr, pages, !op->r_write); 620 ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write);
621 if (ret < 0) 621 if (ret < 0)
622 goto out; 622 goto out;
623 623
@@ -630,7 +630,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
630 unsigned int offset = vec.addr & ~PAGE_MASK; 630 unsigned int offset = vec.addr & ~PAGE_MASK;
631 struct scatterlist *sg; 631 struct scatterlist *sg;
632 632
633 sg = &op->r_sg[op->r_nents + j]; 633 sg = &op->op_sg[op->op_nents + j];
634 sg_set_page(sg, pages[j], 634 sg_set_page(sg, pages[j],
635 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), 635 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
636 offset); 636 offset);
@@ -642,7 +642,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
642 vec.bytes -= sg->length; 642 vec.bytes -= sg->length;
643 } 643 }
644 644
645 op->r_nents += nr; 645 op->op_nents += nr;
646 } 646 }
647 647
648 if (nr_bytes > args->remote_vec.bytes) { 648 if (nr_bytes > args->remote_vec.bytes) {
@@ -652,7 +652,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
652 ret = -EINVAL; 652 ret = -EINVAL;
653 goto out; 653 goto out;
654 } 654 }
655 op->r_bytes = nr_bytes; 655 op->op_bytes = nr_bytes;
656 656
657 ret = 0; 657 ret = 0;
658out: 658out:
@@ -700,7 +700,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
700 700
701 if (mr) { 701 if (mr) {
702 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); 702 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
703 rm->rdma.m_rdma_mr = mr; 703 rm->rdma.op_rdma_mr = mr;
704 } 704 }
705 return err; 705 return err;
706} 706}
@@ -718,7 +718,7 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
718 rm->m_rdma_cookie != 0) 718 rm->m_rdma_cookie != 0)
719 return -EINVAL; 719 return -EINVAL;
720 720
721 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.m_rdma_mr); 721 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
722} 722}
723 723
724/* 724/*
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 32b3d46aea36..76eeb5988b5f 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -230,22 +230,6 @@ struct rds_mr {
230/* Flags for mr->r_state */ 230/* Flags for mr->r_state */
231#define RDS_MR_DEAD 0 231#define RDS_MR_DEAD 0
232 232
233struct rds_rdma_op {
234 u32 r_key;
235 u64 r_remote_addr;
236 unsigned int r_write:1;
237 unsigned int r_fence:1;
238 unsigned int r_notify:1;
239 unsigned int r_recverr:1;
240 unsigned int r_mapped:1;
241 unsigned int r_active:1;
242 struct rds_notifier *r_notifier;
243 unsigned int r_bytes;
244 unsigned int r_nents;
245 unsigned int r_count;
246 struct scatterlist *r_sg;
247};
248
249static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset) 233static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
250{ 234{
251 return r_key | (((u64) offset) << 32); 235 return r_key | (((u64) offset) << 32);
@@ -331,14 +315,27 @@ struct rds_message {
331 unsigned int op_recverr:1; 315 unsigned int op_recverr:1;
332 unsigned int op_mapped:1; 316 unsigned int op_mapped:1;
333 unsigned int op_active:1; 317 unsigned int op_active:1;
334 struct rds_notifier *op_notifier;
335 struct scatterlist *op_sg; 318 struct scatterlist *op_sg;
319 struct rds_notifier *op_notifier;
336 320
337 struct rds_mr *op_rdma_mr; 321 struct rds_mr *op_rdma_mr;
338 } atomic; 322 } atomic;
339 struct rm_rdma_op { 323 struct rm_rdma_op {
340 struct rds_rdma_op m_rdma_op; 324 u32 op_rkey;
341 struct rds_mr *m_rdma_mr; 325 u64 op_remote_addr;
326 unsigned int op_write:1;
327 unsigned int op_fence:1;
328 unsigned int op_notify:1;
329 unsigned int op_recverr:1;
330 unsigned int op_mapped:1;
331 unsigned int op_active:1;
332 unsigned int op_bytes;
333 unsigned int op_nents;
334 unsigned int op_count;
335 struct scatterlist *op_sg;
336 struct rds_notifier *op_notifier;
337
338 struct rds_mr *op_rdma_mr;
342 } rdma; 339 } rdma;
343 struct rm_data_op { 340 struct rm_data_op {
344 unsigned int op_active:1; 341 unsigned int op_active:1;
@@ -418,7 +415,7 @@ struct rds_transport {
418 unsigned int hdr_off, unsigned int sg, unsigned int off); 415 unsigned int hdr_off, unsigned int sg, unsigned int off);
419 int (*xmit_cong_map)(struct rds_connection *conn, 416 int (*xmit_cong_map)(struct rds_connection *conn,
420 struct rds_cong_map *map, unsigned long offset); 417 struct rds_cong_map *map, unsigned long offset);
421 int (*xmit_rdma)(struct rds_connection *conn, struct rds_rdma_op *op); 418 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
422 int (*xmit_atomic)(struct rds_connection *conn, struct rds_message *rm); 419 int (*xmit_atomic)(struct rds_connection *conn, struct rds_message *rm);
423 int (*recv)(struct rds_connection *conn); 420 int (*recv)(struct rds_connection *conn);
424 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov, 421 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
@@ -727,7 +724,7 @@ int rds_send_acked_before(struct rds_connection *conn, u64 seq);
727void rds_send_remove_from_sock(struct list_head *messages, int status); 724void rds_send_remove_from_sock(struct list_head *messages, int status);
728int rds_send_pong(struct rds_connection *conn, __be16 dport); 725int rds_send_pong(struct rds_connection *conn, __be16 dport);
729struct rds_message *rds_send_get_message(struct rds_connection *, 726struct rds_message *rds_send_get_message(struct rds_connection *,
730 struct rds_rdma_op *); 727 struct rm_rdma_op *);
731 728
732/* rdma.c */ 729/* rdma.c */
733void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); 730void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
@@ -744,7 +741,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
744 struct cmsghdr *cmsg); 741 struct cmsghdr *cmsg);
745int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, 742int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
746 struct cmsghdr *cmsg); 743 struct cmsghdr *cmsg);
747void rds_rdma_free_op(struct rds_rdma_op *ro); 744void rds_rdma_free_op(struct rm_rdma_op *ro);
748void rds_atomic_free_op(struct rm_atomic_op *ao); 745void rds_atomic_free_op(struct rm_atomic_op *ao);
749void rds_rdma_send_complete(struct rds_message *rm, int wc_status); 746void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
750void rds_atomic_send_complete(struct rds_message *rm, int wc_status); 747void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
diff --git a/net/rds/send.c b/net/rds/send.c
index 42fb934293be..08df279ced2a 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -237,7 +237,7 @@ int rds_send_xmit(struct rds_connection *conn)
237 * connection. 237 * connection.
238 * Therefore, we never retransmit messages with RDMA ops. 238 * Therefore, we never retransmit messages with RDMA ops.
239 */ 239 */
240 if (rm->rdma.m_rdma_op.r_active && 240 if (rm->rdma.op_active &&
241 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { 241 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
242 spin_lock_irqsave(&conn->c_lock, flags); 242 spin_lock_irqsave(&conn->c_lock, flags);
243 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 243 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
@@ -280,8 +280,8 @@ int rds_send_xmit(struct rds_connection *conn)
280 * keep this simple and require that the transport either 280 * keep this simple and require that the transport either
281 * send the whole rdma or none of it. 281 * send the whole rdma or none of it.
282 */ 282 */
283 if (rm->rdma.m_rdma_op.r_active && !conn->c_xmit_rdma_sent) { 283 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
284 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma.m_rdma_op); 284 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
285 if (ret) 285 if (ret)
286 break; 286 break;
287 conn->c_xmit_rdma_sent = 1; 287 conn->c_xmit_rdma_sent = 1;
@@ -430,16 +430,16 @@ int rds_send_acked_before(struct rds_connection *conn, u64 seq)
430void rds_rdma_send_complete(struct rds_message *rm, int status) 430void rds_rdma_send_complete(struct rds_message *rm, int status)
431{ 431{
432 struct rds_sock *rs = NULL; 432 struct rds_sock *rs = NULL;
433 struct rds_rdma_op *ro; 433 struct rm_rdma_op *ro;
434 struct rds_notifier *notifier; 434 struct rds_notifier *notifier;
435 unsigned long flags; 435 unsigned long flags;
436 436
437 spin_lock_irqsave(&rm->m_rs_lock, flags); 437 spin_lock_irqsave(&rm->m_rs_lock, flags);
438 438
439 ro = &rm->rdma.m_rdma_op; 439 ro = &rm->rdma;
440 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && 440 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
441 ro->r_active && ro->r_notify && ro->r_notifier) { 441 ro->op_active && ro->op_notify && ro->op_notifier) {
442 notifier = ro->r_notifier; 442 notifier = ro->op_notifier;
443 rs = rm->m_rs; 443 rs = rm->m_rs;
444 sock_hold(rds_rs_to_sk(rs)); 444 sock_hold(rds_rs_to_sk(rs));
445 445
@@ -448,7 +448,7 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
448 list_add_tail(&notifier->n_list, &rs->rs_notify_queue); 448 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
449 spin_unlock(&rs->rs_lock); 449 spin_unlock(&rs->rs_lock);
450 450
451 ro->r_notifier = NULL; 451 ro->op_notifier = NULL;
452 } 452 }
453 453
454 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 454 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
@@ -503,13 +503,13 @@ EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
503static inline void 503static inline void
504__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) 504__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
505{ 505{
506 struct rds_rdma_op *ro; 506 struct rm_rdma_op *ro;
507 507
508 ro = &rm->rdma.m_rdma_op; 508 ro = &rm->rdma;
509 if (ro->r_active && ro->r_notify && ro->r_notifier) { 509 if (ro->op_active && ro->op_notify && ro->op_notifier) {
510 ro->r_notifier->n_status = status; 510 ro->op_notifier->n_status = status;
511 list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue); 511 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
512 ro->r_notifier = NULL; 512 ro->op_notifier = NULL;
513 } 513 }
514 514
515 /* No need to wake the app - caller does this */ 515 /* No need to wake the app - caller does this */
@@ -521,7 +521,7 @@ __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status
521 * So speed is not an issue here. 521 * So speed is not an issue here.
522 */ 522 */
523struct rds_message *rds_send_get_message(struct rds_connection *conn, 523struct rds_message *rds_send_get_message(struct rds_connection *conn,
524 struct rds_rdma_op *op) 524 struct rm_rdma_op *op)
525{ 525{
526 struct rds_message *rm, *tmp, *found = NULL; 526 struct rds_message *rm, *tmp, *found = NULL;
527 unsigned long flags; 527 unsigned long flags;
@@ -529,7 +529,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
529 spin_lock_irqsave(&conn->c_lock, flags); 529 spin_lock_irqsave(&conn->c_lock, flags);
530 530
531 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 531 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
532 if (&rm->rdma.m_rdma_op == op) { 532 if (&rm->rdma == op) {
533 atomic_inc(&rm->m_refcount); 533 atomic_inc(&rm->m_refcount);
534 found = rm; 534 found = rm;
535 goto out; 535 goto out;
@@ -537,7 +537,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
537 } 537 }
538 538
539 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { 539 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
540 if (&rm->rdma.m_rdma_op == op) { 540 if (&rm->rdma == op) {
541 atomic_inc(&rm->m_refcount); 541 atomic_inc(&rm->m_refcount);
542 found = rm; 542 found = rm;
543 break; 543 break;
@@ -597,20 +597,20 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
597 spin_lock(&rs->rs_lock); 597 spin_lock(&rs->rs_lock);
598 598
599 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { 599 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
600 struct rds_rdma_op *ro = &rm->rdma.m_rdma_op; 600 struct rm_rdma_op *ro = &rm->rdma;
601 struct rds_notifier *notifier; 601 struct rds_notifier *notifier;
602 602
603 list_del_init(&rm->m_sock_item); 603 list_del_init(&rm->m_sock_item);
604 rds_send_sndbuf_remove(rs, rm); 604 rds_send_sndbuf_remove(rs, rm);
605 605
606 if (ro->r_active && ro->r_notifier && 606 if (ro->op_active && ro->op_notifier &&
607 (ro->r_notify || (ro->r_recverr && status))) { 607 (ro->op_notify || (ro->op_recverr && status))) {
608 notifier = ro->r_notifier; 608 notifier = ro->op_notifier;
609 list_add_tail(&notifier->n_list, 609 list_add_tail(&notifier->n_list,
610 &rs->rs_notify_queue); 610 &rs->rs_notify_queue);
611 if (!notifier->n_status) 611 if (!notifier->n_status)
612 notifier->n_status = status; 612 notifier->n_status = status;
613 rm->rdma.m_rdma_op.r_notifier = NULL; 613 rm->rdma.op_notifier = NULL;
614 } 614 }
615 was_on_sock = 1; 615 was_on_sock = 1;
616 rm->m_rs = NULL; 616 rm->m_rs = NULL;
@@ -987,11 +987,11 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
987 if (ret) 987 if (ret)
988 goto out; 988 goto out;
989 989
990 if ((rm->m_rdma_cookie || rm->rdma.m_rdma_op.r_active) && 990 if ((rm->m_rdma_cookie || rm->rdma.op_active) &&
991 !conn->c_trans->xmit_rdma) { 991 !conn->c_trans->xmit_rdma) {
992 if (printk_ratelimit()) 992 if (printk_ratelimit())
993 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", 993 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
994 &rm->rdma.m_rdma_op, conn->c_trans->xmit_rdma); 994 &rm->rdma, conn->c_trans->xmit_rdma);
995 ret = -EOPNOTSUPP; 995 ret = -EOPNOTSUPP;
996 goto out; 996 goto out;
997 } 997 }