aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib_send.c
diff options
context:
space:
mode:
authorZach Brown <zach.brown@oracle.com>2010-05-18 18:44:50 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:15:16 -0400
commit89bf9d4158b5a1b6bd00960eb2e47601ec8cc138 (patch)
treee11c5ea0b69fb1bc53a03f83570e160dbe3b005f /net/rds/ib_send.c
parenta46ca94e7fb2c93a59e08b42fd77d8c478fda5fc (diff)
RDS/IB: get the xmit max_sge from the RDS IB device on the connection
rds_ib_xmit_rdma() was calling ib_get_client_data() to get at the rds_ibdevice just to get the max_sge for the transmit. This patch instead has it get it directly off the rds_ibdev which is stored on the connection. The current code won't free the rds_ibdev until all the IB connections that use it are freed. So it's safe to reference the rds_ibdev this way. In the future it also makes it easier to support proper reference counting of the rds_ibdev struct. As an additional bonus, this gets rid of the performance hit of calling in to the IB stack to look up the rds_ibdev. The current implementation in the IB stack acquires an interrupt blocking spinlock to protect the registration of client callback data. Signed-off-by: Zach Brown <zach.brown@oracle.com>
Diffstat (limited to 'net/rds/ib_send.c')
-rw-r--r--net/rds/ib_send.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 209dbc6d159d..3f91e794eae9 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -806,10 +806,10 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
806 struct rds_ib_send_work *first; 806 struct rds_ib_send_work *first;
807 struct rds_ib_send_work *prev; 807 struct rds_ib_send_work *prev;
808 struct ib_send_wr *failed_wr; 808 struct ib_send_wr *failed_wr;
809 struct rds_ib_device *rds_ibdev;
810 struct scatterlist *scat; 809 struct scatterlist *scat;
811 unsigned long len; 810 unsigned long len;
812 u64 remote_addr = op->op_remote_addr; 811 u64 remote_addr = op->op_remote_addr;
812 u32 max_sge = ic->rds_ibdev->max_sge;
813 u32 pos; 813 u32 pos;
814 u32 work_alloc; 814 u32 work_alloc;
815 u32 i; 815 u32 i;
@@ -818,8 +818,6 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
818 int ret; 818 int ret;
819 int num_sge; 819 int num_sge;
820 820
821 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
822
823 /* map the op the first time we see it */ 821 /* map the op the first time we see it */
824 if (!op->op_mapped) { 822 if (!op->op_mapped) {
825 op->op_count = ib_dma_map_sg(ic->i_cm_id->device, 823 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
@@ -839,7 +837,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
839 * Instead of knowing how to return a partial rdma read/write we insist that there 837 * Instead of knowing how to return a partial rdma read/write we insist that there
840 * be enough work requests to send the entire message. 838 * be enough work requests to send the entire message.
841 */ 839 */
842 i = ceil(op->op_count, rds_ibdev->max_sge); 840 i = ceil(op->op_count, max_sge);
843 841
844 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 842 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
845 if (work_alloc != i) { 843 if (work_alloc != i) {
@@ -867,9 +865,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
867 send->s_wr.wr.rdma.remote_addr = remote_addr; 865 send->s_wr.wr.rdma.remote_addr = remote_addr;
868 send->s_wr.wr.rdma.rkey = op->op_rkey; 866 send->s_wr.wr.rdma.rkey = op->op_rkey;
869 867
870 if (num_sge > rds_ibdev->max_sge) { 868 if (num_sge > max_sge) {
871 send->s_wr.num_sge = rds_ibdev->max_sge; 869 send->s_wr.num_sge = max_sge;
872 num_sge -= rds_ibdev->max_sge; 870 num_sge -= max_sge;
873 } else { 871 } else {
874 send->s_wr.num_sge = num_sge; 872 send->s_wr.num_sge = num_sge;
875 } 873 }