aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib_send.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-01 17:11:53 -0500
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:11:58 -0400
commitf8b3aaf2ba8ca9e27b47f8bfdff07c8b968f2c05 (patch)
tree42de574167e590ca2e413314b6486df916cd1f31 /net/rds/ib_send.c
parentd0ab25a83c4a08cd98b73a37d3f4c069f7b4f50b (diff)
RDS: Remove struct rds_rdma_op
A big changeset, but it's all pretty dumb. struct rds_rdma_op was already embedded in struct rm_rdma_op. Remove rds_rdma_op and put its members in rm_rdma_op. Rename members with "op_" prefix instead of "r_", for consistency. Of course this breaks a lot, so fixup the code accordingly. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/ib_send.c')
-rw-r--r--net/rds/ib_send.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index e6745d827c3a..63981cd1827a 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -79,14 +79,14 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
79 rm->data.m_sg, rm->data.m_nents, 79 rm->data.m_sg, rm->data.m_nents,
80 DMA_TO_DEVICE); 80 DMA_TO_DEVICE);
81 81
82 if (rm->rdma.m_rdma_op.r_active) { 82 if (rm->rdma.op_active) {
83 struct rds_rdma_op *op = &rm->rdma.m_rdma_op; 83 struct rm_rdma_op *op = &rm->rdma;
84 84
85 if (op->r_mapped) { 85 if (op->op_mapped) {
86 ib_dma_unmap_sg(ic->i_cm_id->device, 86 ib_dma_unmap_sg(ic->i_cm_id->device,
87 op->r_sg, op->r_nents, 87 op->op_sg, op->op_nents,
88 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 88 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 op->r_mapped = 0; 89 op->op_mapped = 0;
90 } 90 }
91 91
92 /* If the user asked for a completion notification on this 92 /* If the user asked for a completion notification on this
@@ -111,10 +111,10 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
111 */ 111 */
112 rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete); 112 rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete);
113 113
114 if (rm->rdma.m_rdma_op.r_write) 114 if (rm->rdma.op_write)
115 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); 115 rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
116 else 116 else
117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); 117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
118 } 118 }
119 119
120 if (rm->atomic.op_active) { 120 if (rm->atomic.op_active) {
@@ -540,10 +540,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
540 540
541 /* If it has a RDMA op, tell the peer we did it. This is 541 /* If it has a RDMA op, tell the peer we did it. This is
542 * used by the peer to release use-once RDMA MRs. */ 542 * used by the peer to release use-once RDMA MRs. */
543 if (rm->rdma.m_rdma_op.r_active) { 543 if (rm->rdma.op_active) {
544 struct rds_ext_header_rdma ext_hdr; 544 struct rds_ext_header_rdma ext_hdr;
545 545
546 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key); 546 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
547 rds_message_add_extension(&rm->m_inc.i_hdr, 547 rds_message_add_extension(&rm->m_inc.i_hdr,
548 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 548 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
549 } 549 }
@@ -576,7 +576,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
576 * or when requested by the user. Right now, we let 576 * or when requested by the user. Right now, we let
577 * the application choose. 577 * the application choose.
578 */ 578 */
579 if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) 579 if (rm->rdma.op_active && rm->rdma.op_fence)
580 send_flags = IB_SEND_FENCE; 580 send_flags = IB_SEND_FENCE;
581 581
582 /* Each frag gets a header. Msgs may be 0 bytes */ 582 /* Each frag gets a header. Msgs may be 0 bytes */
@@ -746,7 +746,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rds_message *rm)
746 * we must fill in s_rm ourselves, so we properly clean up 746 * we must fill in s_rm ourselves, so we properly clean up
747 * on completion. 747 * on completion.
748 */ 748 */
749 if (!rm->rdma.m_rdma_op.r_active && !rm->data.op_active) 749 if (!rm->rdma.op_active && !rm->data.op_active)
750 send->s_rm = rm; 750 send->s_rm = rm;
751 751
752 /* map 8 byte retval buffer to the device */ 752 /* map 8 byte retval buffer to the device */
@@ -788,7 +788,7 @@ out:
788 return ret; 788 return ret;
789} 789}
790 790
791int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) 791int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
792{ 792{
793 struct rds_ib_connection *ic = conn->c_transport_data; 793 struct rds_ib_connection *ic = conn->c_transport_data;
794 struct rds_ib_send_work *send = NULL; 794 struct rds_ib_send_work *send = NULL;
@@ -798,7 +798,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
798 struct rds_ib_device *rds_ibdev; 798 struct rds_ib_device *rds_ibdev;
799 struct scatterlist *scat; 799 struct scatterlist *scat;
800 unsigned long len; 800 unsigned long len;
801 u64 remote_addr = op->r_remote_addr; 801 u64 remote_addr = op->op_remote_addr;
802 u32 pos; 802 u32 pos;
803 u32 work_alloc; 803 u32 work_alloc;
804 u32 i; 804 u32 i;
@@ -810,25 +810,25 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
810 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 810 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
811 811
812 /* map the message the first time we see it */ 812 /* map the message the first time we see it */
813 if (!op->r_mapped) { 813 if (!op->op_mapped) {
814 op->r_count = ib_dma_map_sg(ic->i_cm_id->device, 814 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
815 op->r_sg, op->r_nents, (op->r_write) ? 815 op->op_sg, op->op_nents, (op->op_write) ?
816 DMA_TO_DEVICE : DMA_FROM_DEVICE); 816 DMA_TO_DEVICE : DMA_FROM_DEVICE);
817 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); 817 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
818 if (op->r_count == 0) { 818 if (op->op_count == 0) {
819 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 819 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
820 ret = -ENOMEM; /* XXX ? */ 820 ret = -ENOMEM; /* XXX ? */
821 goto out; 821 goto out;
822 } 822 }
823 823
824 op->r_mapped = 1; 824 op->op_mapped = 1;
825 } 825 }
826 826
827 /* 827 /*
828 * Instead of knowing how to return a partial rdma read/write we insist that there 828 * Instead of knowing how to return a partial rdma read/write we insist that there
829 * be enough work requests to send the entire message. 829 * be enough work requests to send the entire message.
830 */ 830 */
831 i = ceil(op->r_count, rds_ibdev->max_sge); 831 i = ceil(op->op_count, rds_ibdev->max_sge);
832 832
833 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 833 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
834 if (work_alloc != i) { 834 if (work_alloc != i) {
@@ -841,19 +841,19 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
841 send = &ic->i_sends[pos]; 841 send = &ic->i_sends[pos];
842 first = send; 842 first = send;
843 prev = NULL; 843 prev = NULL;
844 scat = &op->r_sg[0]; 844 scat = &op->op_sg[0];
845 sent = 0; 845 sent = 0;
846 num_sge = op->r_count; 846 num_sge = op->op_count;
847 847
848 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { 848 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
849 send->s_wr.send_flags = 0; 849 send->s_wr.send_flags = 0;
850 send->s_queued = jiffies; 850 send->s_queued = jiffies;
851 851
852 rds_ib_set_wr_signal_state(ic, send, op->r_notify); 852 rds_ib_set_wr_signal_state(ic, send, op->op_notify);
853 853
854 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; 854 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
855 send->s_wr.wr.rdma.remote_addr = remote_addr; 855 send->s_wr.wr.rdma.remote_addr = remote_addr;
856 send->s_wr.wr.rdma.rkey = op->r_key; 856 send->s_wr.wr.rdma.rkey = op->op_rkey;
857 send->s_op = op; 857 send->s_op = op;
858 858
859 if (num_sge > rds_ibdev->max_sge) { 859 if (num_sge > rds_ibdev->max_sge) {
@@ -868,7 +868,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
868 if (prev) 868 if (prev)
869 prev->s_wr.next = &send->s_wr; 869 prev->s_wr.next = &send->s_wr;
870 870
871 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { 871 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
872 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 872 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
873 send->s_sge[j].addr = 873 send->s_sge[j].addr =
874 ib_sg_dma_address(ic->i_cm_id->device, scat); 874 ib_sg_dma_address(ic->i_cm_id->device, scat);