aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/iw_send.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-01 17:11:53 -0500
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:11:58 -0400
commitf8b3aaf2ba8ca9e27b47f8bfdff07c8b968f2c05 (patch)
tree42de574167e590ca2e413314b6486df916cd1f31 /net/rds/iw_send.c
parentd0ab25a83c4a08cd98b73a37d3f4c069f7b4f50b (diff)
RDS: Remove struct rds_rdma_op
A big changeset, but it's all pretty dumb. struct rds_rdma_op was already embedded in struct rm_rdma_op. Remove rds_rdma_op and put its members in rm_rdma_op. Rename members with "op_" prefix instead of "r_", for consistency. Of course this breaks a lot, so fixup the code accordingly. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/iw_send.c')
-rw-r--r--net/rds/iw_send.c68
1 files changed, 34 insertions, 34 deletions
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 9b79a1b10445..05ebf16ecad7 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -63,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm,
63} 63}
64 64
65static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, 65static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
66 struct rds_rdma_op *op) 66 struct rm_rdma_op *op)
67{ 67{
68 if (op->r_mapped) { 68 if (op->op_mapped) {
69 ib_dma_unmap_sg(ic->i_cm_id->device, 69 ib_dma_unmap_sg(ic->i_cm_id->device,
70 op->r_sg, op->r_nents, 70 op->op_sg, op->op_nents,
71 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 71 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
72 op->r_mapped = 0; 72 op->op_mapped = 0;
73 } 73 }
74} 74}
75 75
@@ -85,8 +85,8 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
85 rm->data.m_sg, rm->data.m_nents, 85 rm->data.m_sg, rm->data.m_nents,
86 DMA_TO_DEVICE); 86 DMA_TO_DEVICE);
87 87
88 if (rm->rdma.m_rdma_op.r_active) { 88 if (rm->rdma.op_active) {
89 rds_iw_send_unmap_rdma(ic, &rm->rdma.m_rdma_op); 89 rds_iw_send_unmap_rdma(ic, &rm->rdma);
90 90
91 /* If the user asked for a completion notification on this 91 /* If the user asked for a completion notification on this
92 * message, we can implement three different semantics: 92 * message, we can implement three different semantics:
@@ -110,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
110 */ 110 */
111 rds_iw_send_rdma_complete(rm, wc_status); 111 rds_iw_send_rdma_complete(rm, wc_status);
112 112
113 if (rm->rdma.m_rdma_op.r_write) 113 if (rm->rdma.op_write)
114 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); 114 rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
115 else 115 else
116 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); 116 rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
117 } 117 }
118 118
119 /* If anyone waited for this message to get flushed out, wake 119 /* If anyone waited for this message to get flushed out, wake
@@ -591,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
591 591
592 /* If it has a RDMA op, tell the peer we did it. This is 592 /* If it has a RDMA op, tell the peer we did it. This is
593 * used by the peer to release use-once RDMA MRs. */ 593 * used by the peer to release use-once RDMA MRs. */
594 if (rm->rdma.m_rdma_op.r_active) { 594 if (rm->rdma.op_active) {
595 struct rds_ext_header_rdma ext_hdr; 595 struct rds_ext_header_rdma ext_hdr;
596 596
597 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key); 597 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
598 rds_message_add_extension(&rm->m_inc.i_hdr, 598 rds_message_add_extension(&rm->m_inc.i_hdr,
599 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 599 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
600 } 600 }
@@ -632,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
632 * or when requested by the user. Right now, we let 632 * or when requested by the user. Right now, we let
633 * the application choose. 633 * the application choose.
634 */ 634 */
635 if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) 635 if (rm->rdma.op_active && rm->rdma.op_fence)
636 send_flags = IB_SEND_FENCE; 636 send_flags = IB_SEND_FENCE;
637 637
638 /* 638 /*
@@ -785,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
785 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); 785 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
786} 786}
787 787
788int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) 788int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
789{ 789{
790 struct rds_iw_connection *ic = conn->c_transport_data; 790 struct rds_iw_connection *ic = conn->c_transport_data;
791 struct rds_iw_send_work *send = NULL; 791 struct rds_iw_send_work *send = NULL;
@@ -795,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
795 struct rds_iw_device *rds_iwdev; 795 struct rds_iw_device *rds_iwdev;
796 struct scatterlist *scat; 796 struct scatterlist *scat;
797 unsigned long len; 797 unsigned long len;
798 u64 remote_addr = op->r_remote_addr; 798 u64 remote_addr = op->op_remote_addr;
799 u32 pos, fr_pos; 799 u32 pos, fr_pos;
800 u32 work_alloc; 800 u32 work_alloc;
801 u32 i; 801 u32 i;
@@ -807,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
807 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); 807 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
808 808
809 /* map the message the first time we see it */ 809 /* map the message the first time we see it */
810 if (!op->r_mapped) { 810 if (!op->op_mapped) {
811 op->r_count = ib_dma_map_sg(ic->i_cm_id->device, 811 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
812 op->r_sg, op->r_nents, (op->r_write) ? 812 op->op_sg, op->op_nents, (op->op_write) ?
813 DMA_TO_DEVICE : DMA_FROM_DEVICE); 813 DMA_TO_DEVICE : DMA_FROM_DEVICE);
814 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); 814 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
815 if (op->r_count == 0) { 815 if (op->op_count == 0) {
816 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); 816 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
817 ret = -ENOMEM; /* XXX ? */ 817 ret = -ENOMEM; /* XXX ? */
818 goto out; 818 goto out;
819 } 819 }
820 820
821 op->r_mapped = 1; 821 op->op_mapped = 1;
822 } 822 }
823 823
824 if (!op->r_write) { 824 if (!op->op_write) {
825 /* Alloc space on the send queue for the fastreg */ 825 /* Alloc space on the send queue for the fastreg */
826 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); 826 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
827 if (work_alloc != 1) { 827 if (work_alloc != 1) {
@@ -836,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
836 * Instead of knowing how to return a partial rdma read/write we insist that there 836 * Instead of knowing how to return a partial rdma read/write we insist that there
837 * be enough work requests to send the entire message. 837 * be enough work requests to send the entire message.
838 */ 838 */
839 i = ceil(op->r_count, rds_iwdev->max_sge); 839 i = ceil(op->op_count, rds_iwdev->max_sge);
840 840
841 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); 841 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
842 if (work_alloc != i) { 842 if (work_alloc != i) {
@@ -847,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
847 } 847 }
848 848
849 send = &ic->i_sends[pos]; 849 send = &ic->i_sends[pos];
850 if (!op->r_write) { 850 if (!op->op_write) {
851 first = prev = &ic->i_sends[fr_pos]; 851 first = prev = &ic->i_sends[fr_pos];
852 } else { 852 } else {
853 first = send; 853 first = send;
854 prev = NULL; 854 prev = NULL;
855 } 855 }
856 scat = &op->r_sg[0]; 856 scat = &op->op_sg[0];
857 sent = 0; 857 sent = 0;
858 num_sge = op->r_count; 858 num_sge = op->op_count;
859 859
860 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { 860 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
861 send->s_wr.send_flags = 0; 861 send->s_wr.send_flags = 0;
862 send->s_queued = jiffies; 862 send->s_queued = jiffies;
863 863
@@ -874,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
874 * for local access after RDS is finished with it, using 874 * for local access after RDS is finished with it, using
875 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. 875 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
876 */ 876 */
877 if (op->r_write) 877 if (op->op_write)
878 send->s_wr.opcode = IB_WR_RDMA_WRITE; 878 send->s_wr.opcode = IB_WR_RDMA_WRITE;
879 else 879 else
880 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; 880 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
881 881
882 send->s_wr.wr.rdma.remote_addr = remote_addr; 882 send->s_wr.wr.rdma.remote_addr = remote_addr;
883 send->s_wr.wr.rdma.rkey = op->r_key; 883 send->s_wr.wr.rdma.rkey = op->op_rkey;
884 send->s_op = op; 884 send->s_op = op;
885 885
886 if (num_sge > rds_iwdev->max_sge) { 886 if (num_sge > rds_iwdev->max_sge) {
@@ -894,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
894 if (prev) 894 if (prev)
895 prev->s_wr.next = &send->s_wr; 895 prev->s_wr.next = &send->s_wr;
896 896
897 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { 897 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
898 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 898 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
899 899
900 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) 900 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
@@ -928,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
928 } 928 }
929 929
930 /* if we finished the message then send completion owns it */ 930 /* if we finished the message then send completion owns it */
931 if (scat == &op->r_sg[op->r_count]) 931 if (scat == &op->op_sg[op->op_count])
932 first->s_wr.send_flags = IB_SEND_SIGNALED; 932 first->s_wr.send_flags = IB_SEND_SIGNALED;
933 933
934 if (i < work_alloc) { 934 if (i < work_alloc) {
@@ -942,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
942 * adapters do not allow using the lkey for this at all. To bypass this use a 942 * adapters do not allow using the lkey for this at all. To bypass this use a
943 * fastreg_mr (or possibly a dma_mr) 943 * fastreg_mr (or possibly a dma_mr)
944 */ 944 */
945 if (!op->r_write) { 945 if (!op->op_write) {
946 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], 946 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
947 op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); 947 op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
948 work_alloc++; 948 work_alloc++;
949 } 949 }
950 950