diff options
Diffstat (limited to 'net/rds/iw_send.c')
-rw-r--r-- | net/rds/iw_send.c | 93 |
1 files changed, 47 insertions, 46 deletions
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 52182ff7519..6280ea020d4 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/dmapool.h> | 36 | #include <linux/dmapool.h> |
37 | 37 | ||
38 | #include "rds.h" | 38 | #include "rds.h" |
39 | #include "rdma.h" | ||
40 | #include "iw.h" | 39 | #include "iw.h" |
41 | 40 | ||
42 | static void rds_iw_send_rdma_complete(struct rds_message *rm, | 41 | static void rds_iw_send_rdma_complete(struct rds_message *rm, |
@@ -64,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm, | |||
64 | } | 63 | } |
65 | 64 | ||
66 | static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, | 65 | static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, |
67 | struct rds_rdma_op *op) | 66 | struct rm_rdma_op *op) |
68 | { | 67 | { |
69 | if (op->r_mapped) { | 68 | if (op->op_mapped) { |
70 | ib_dma_unmap_sg(ic->i_cm_id->device, | 69 | ib_dma_unmap_sg(ic->i_cm_id->device, |
71 | op->r_sg, op->r_nents, | 70 | op->op_sg, op->op_nents, |
72 | op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 71 | op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
73 | op->r_mapped = 0; | 72 | op->op_mapped = 0; |
74 | } | 73 | } |
75 | } | 74 | } |
76 | 75 | ||
@@ -83,11 +82,11 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic, | |||
83 | rdsdebug("ic %p send %p rm %p\n", ic, send, rm); | 82 | rdsdebug("ic %p send %p rm %p\n", ic, send, rm); |
84 | 83 | ||
85 | ib_dma_unmap_sg(ic->i_cm_id->device, | 84 | ib_dma_unmap_sg(ic->i_cm_id->device, |
86 | rm->m_sg, rm->m_nents, | 85 | rm->data.op_sg, rm->data.op_nents, |
87 | DMA_TO_DEVICE); | 86 | DMA_TO_DEVICE); |
88 | 87 | ||
89 | if (rm->m_rdma_op != NULL) { | 88 | if (rm->rdma.op_active) { |
90 | rds_iw_send_unmap_rdma(ic, rm->m_rdma_op); | 89 | rds_iw_send_unmap_rdma(ic, &rm->rdma); |
91 | 90 | ||
92 | /* If the user asked for a completion notification on this | 91 | /* If the user asked for a completion notification on this |
93 | * message, we can implement three different semantics: | 92 | * message, we can implement three different semantics: |
@@ -111,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic, | |||
111 | */ | 110 | */ |
112 | rds_iw_send_rdma_complete(rm, wc_status); | 111 | rds_iw_send_rdma_complete(rm, wc_status); |
113 | 112 | ||
114 | if (rm->m_rdma_op->r_write) | 113 | if (rm->rdma.op_write) |
115 | rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); | 114 | rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes); |
116 | else | 115 | else |
117 | rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); | 116 | rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes); |
118 | } | 117 | } |
119 | 118 | ||
120 | /* If anyone waited for this message to get flushed out, wake | 119 | /* If anyone waited for this message to get flushed out, wake |
@@ -556,25 +555,27 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
556 | } | 555 | } |
557 | 556 | ||
558 | /* map the message the first time we see it */ | 557 | /* map the message the first time we see it */ |
559 | if (ic->i_rm == NULL) { | 558 | if (!ic->i_rm) { |
560 | /* | 559 | /* |
561 | printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n", | 560 | printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n", |
562 | be16_to_cpu(rm->m_inc.i_hdr.h_dport), | 561 | be16_to_cpu(rm->m_inc.i_hdr.h_dport), |
563 | rm->m_inc.i_hdr.h_flags, | 562 | rm->m_inc.i_hdr.h_flags, |
564 | be32_to_cpu(rm->m_inc.i_hdr.h_len)); | 563 | be32_to_cpu(rm->m_inc.i_hdr.h_len)); |
565 | */ | 564 | */ |
566 | if (rm->m_nents) { | 565 | if (rm->data.op_nents) { |
567 | rm->m_count = ib_dma_map_sg(dev, | 566 | rm->data.op_count = ib_dma_map_sg(dev, |
568 | rm->m_sg, rm->m_nents, DMA_TO_DEVICE); | 567 | rm->data.op_sg, |
569 | rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count); | 568 | rm->data.op_nents, |
570 | if (rm->m_count == 0) { | 569 | DMA_TO_DEVICE); |
570 | rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count); | ||
571 | if (rm->data.op_count == 0) { | ||
571 | rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); | 572 | rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); |
572 | rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); | 573 | rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); |
573 | ret = -ENOMEM; /* XXX ? */ | 574 | ret = -ENOMEM; /* XXX ? */ |
574 | goto out; | 575 | goto out; |
575 | } | 576 | } |
576 | } else { | 577 | } else { |
577 | rm->m_count = 0; | 578 | rm->data.op_count = 0; |
578 | } | 579 | } |
579 | 580 | ||
580 | ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; | 581 | ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; |
@@ -590,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
590 | 591 | ||
591 | /* If it has a RDMA op, tell the peer we did it. This is | 592 | /* If it has a RDMA op, tell the peer we did it. This is |
592 | * used by the peer to release use-once RDMA MRs. */ | 593 | * used by the peer to release use-once RDMA MRs. */ |
593 | if (rm->m_rdma_op) { | 594 | if (rm->rdma.op_active) { |
594 | struct rds_ext_header_rdma ext_hdr; | 595 | struct rds_ext_header_rdma ext_hdr; |
595 | 596 | ||
596 | ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); | 597 | ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); |
597 | rds_message_add_extension(&rm->m_inc.i_hdr, | 598 | rds_message_add_extension(&rm->m_inc.i_hdr, |
598 | RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); | 599 | RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); |
599 | } | 600 | } |
@@ -621,7 +622,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
621 | send = &ic->i_sends[pos]; | 622 | send = &ic->i_sends[pos]; |
622 | first = send; | 623 | first = send; |
623 | prev = NULL; | 624 | prev = NULL; |
624 | scat = &rm->m_sg[sg]; | 625 | scat = &rm->data.op_sg[sg]; |
625 | sent = 0; | 626 | sent = 0; |
626 | i = 0; | 627 | i = 0; |
627 | 628 | ||
@@ -631,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
631 | * or when requested by the user. Right now, we let | 632 | * or when requested by the user. Right now, we let |
632 | * the application choose. | 633 | * the application choose. |
633 | */ | 634 | */ |
634 | if (rm->m_rdma_op && rm->m_rdma_op->r_fence) | 635 | if (rm->rdma.op_active && rm->rdma.op_fence) |
635 | send_flags = IB_SEND_FENCE; | 636 | send_flags = IB_SEND_FENCE; |
636 | 637 | ||
637 | /* | 638 | /* |
@@ -650,7 +651,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
650 | } | 651 | } |
651 | 652 | ||
652 | /* if there's data reference it with a chain of work reqs */ | 653 | /* if there's data reference it with a chain of work reqs */ |
653 | for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { | 654 | for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) { |
654 | unsigned int len; | 655 | unsigned int len; |
655 | 656 | ||
656 | send = &ic->i_sends[pos]; | 657 | send = &ic->i_sends[pos]; |
@@ -728,7 +729,7 @@ add_header: | |||
728 | sent += sizeof(struct rds_header); | 729 | sent += sizeof(struct rds_header); |
729 | 730 | ||
730 | /* if we finished the message then send completion owns it */ | 731 | /* if we finished the message then send completion owns it */ |
731 | if (scat == &rm->m_sg[rm->m_count]) { | 732 | if (scat == &rm->data.op_sg[rm->data.op_count]) { |
732 | prev->s_rm = ic->i_rm; | 733 | prev->s_rm = ic->i_rm; |
733 | prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 734 | prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
734 | ic->i_rm = NULL; | 735 | ic->i_rm = NULL; |
@@ -784,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd | |||
784 | ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); | 785 | ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); |
785 | } | 786 | } |
786 | 787 | ||
787 | int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | 788 | int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) |
788 | { | 789 | { |
789 | struct rds_iw_connection *ic = conn->c_transport_data; | 790 | struct rds_iw_connection *ic = conn->c_transport_data; |
790 | struct rds_iw_send_work *send = NULL; | 791 | struct rds_iw_send_work *send = NULL; |
@@ -794,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | |||
794 | struct rds_iw_device *rds_iwdev; | 795 | struct rds_iw_device *rds_iwdev; |
795 | struct scatterlist *scat; | 796 | struct scatterlist *scat; |
796 | unsigned long len; | 797 | unsigned long len; |
797 | u64 remote_addr = op->r_remote_addr; | 798 | u64 remote_addr = op->op_remote_addr; |
798 | u32 pos, fr_pos; | 799 | u32 pos, fr_pos; |
799 | u32 work_alloc; | 800 | u32 work_alloc; |
800 | u32 i; | 801 | u32 i; |
@@ -806,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | |||
806 | rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); | 807 | rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); |
807 | 808 | ||
808 | /* map the message the first time we see it */ | 809 | /* map the message the first time we see it */ |
809 | if (!op->r_mapped) { | 810 | if (!op->op_mapped) { |
810 | op->r_count = ib_dma_map_sg(ic->i_cm_id->device, | 811 | op->op_count = ib_dma_map_sg(ic->i_cm_id->device, |
811 | op->r_sg, op->r_nents, (op->r_write) ? | 812 | op->op_sg, op->op_nents, (op->op_write) ? |
812 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 813 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
813 | rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); | 814 | rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); |
814 | if (op->r_count == 0) { | 815 | if (op->op_count == 0) { |
815 | rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); | 816 | rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); |
816 | ret = -ENOMEM; /* XXX ? */ | 817 | ret = -ENOMEM; /* XXX ? */ |
817 | goto out; | 818 | goto out; |
818 | } | 819 | } |
819 | 820 | ||
820 | op->r_mapped = 1; | 821 | op->op_mapped = 1; |
821 | } | 822 | } |
822 | 823 | ||
823 | if (!op->r_write) { | 824 | if (!op->op_write) { |
824 | /* Alloc space on the send queue for the fastreg */ | 825 | /* Alloc space on the send queue for the fastreg */ |
825 | work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); | 826 | work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); |
826 | if (work_alloc != 1) { | 827 | if (work_alloc != 1) { |
@@ -835,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | |||
835 | * Instead of knowing how to return a partial rdma read/write we insist that there | 836 | * Instead of knowing how to return a partial rdma read/write we insist that there |
836 | * be enough work requests to send the entire message. | 837 | * be enough work requests to send the entire message. |
837 | */ | 838 | */ |
838 | i = ceil(op->r_count, rds_iwdev->max_sge); | 839 | i = ceil(op->op_count, rds_iwdev->max_sge); |
839 | 840 | ||
840 | work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); | 841 | work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); |
841 | if (work_alloc != i) { | 842 | if (work_alloc != i) { |
@@ -846,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | |||
846 | } | 847 | } |
847 | 848 | ||
848 | send = &ic->i_sends[pos]; | 849 | send = &ic->i_sends[pos]; |
849 | if (!op->r_write) { | 850 | if (!op->op_write) { |
850 | first = prev = &ic->i_sends[fr_pos]; | 851 | first = prev = &ic->i_sends[fr_pos]; |
851 | } else { | 852 | } else { |
852 | first = send; | 853 | first = send; |
853 | prev = NULL; | 854 | prev = NULL; |
854 | } | 855 | } |
855 | scat = &op->r_sg[0]; | 856 | scat = &op->op_sg[0]; |
856 | sent = 0; | 857 | sent = 0; |
857 | num_sge = op->r_count; | 858 | num_sge = op->op_count; |
858 | 859 | ||
859 | for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { | 860 | for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { |
860 | send->s_wr.send_flags = 0; | 861 | send->s_wr.send_flags = 0; |
861 | send->s_queued = jiffies; | 862 | send->s_queued = jiffies; |
862 | 863 | ||
@@ -873,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | |||
873 | * for local access after RDS is finished with it, using | 874 | * for local access after RDS is finished with it, using |
874 | * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. | 875 | * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. |
875 | */ | 876 | */ |
876 | if (op->r_write) | 877 | if (op->op_write) |
877 | send->s_wr.opcode = IB_WR_RDMA_WRITE; | 878 | send->s_wr.opcode = IB_WR_RDMA_WRITE; |
878 | else | 879 | else |
879 | send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; | 880 | send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; |
880 | 881 | ||
881 | send->s_wr.wr.rdma.remote_addr = remote_addr; | 882 | send->s_wr.wr.rdma.remote_addr = remote_addr; |
882 | send->s_wr.wr.rdma.rkey = op->r_key; | 883 | send->s_wr.wr.rdma.rkey = op->op_rkey; |
883 | send->s_op = op; | 884 | send->s_op = op; |
884 | 885 | ||
885 | if (num_sge > rds_iwdev->max_sge) { | 886 | if (num_sge > rds_iwdev->max_sge) { |
@@ -893,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | |||
893 | if (prev) | 894 | if (prev) |
894 | prev->s_wr.next = &send->s_wr; | 895 | prev->s_wr.next = &send->s_wr; |
895 | 896 | ||
896 | for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { | 897 | for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { |
897 | len = ib_sg_dma_len(ic->i_cm_id->device, scat); | 898 | len = ib_sg_dma_len(ic->i_cm_id->device, scat); |
898 | 899 | ||
899 | if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) | 900 | if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) |
@@ -927,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | |||
927 | } | 928 | } |
928 | 929 | ||
929 | /* if we finished the message then send completion owns it */ | 930 | /* if we finished the message then send completion owns it */ |
930 | if (scat == &op->r_sg[op->r_count]) | 931 | if (scat == &op->op_sg[op->op_count]) |
931 | first->s_wr.send_flags = IB_SEND_SIGNALED; | 932 | first->s_wr.send_flags = IB_SEND_SIGNALED; |
932 | 933 | ||
933 | if (i < work_alloc) { | 934 | if (i < work_alloc) { |
@@ -941,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) | |||
941 | * adapters do not allow using the lkey for this at all. To bypass this use a | 942 | * adapters do not allow using the lkey for this at all. To bypass this use a |
942 | * fastreg_mr (or possibly a dma_mr) | 943 | * fastreg_mr (or possibly a dma_mr) |
943 | */ | 944 | */ |
944 | if (!op->r_write) { | 945 | if (!op->op_write) { |
945 | rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], | 946 | rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], |
946 | op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); | 947 | op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); |
947 | work_alloc++; | 948 | work_alloc++; |
948 | } | 949 | } |
949 | 950 | ||