diff options
Diffstat (limited to 'net/rds/ib_recv.c')
-rw-r--r-- | net/rds/ib_recv.c | 48 |
1 files changed, 33 insertions, 15 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index cd7a6cfcab03..c7dd11b835f0 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
35 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
36 | #include <rdma/rdma_cm.h> | 37 | #include <rdma/rdma_cm.h> |
@@ -143,15 +144,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn, | |||
143 | int ret = -ENOMEM; | 144 | int ret = -ENOMEM; |
144 | 145 | ||
145 | if (recv->r_ibinc == NULL) { | 146 | if (recv->r_ibinc == NULL) { |
146 | if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) { | 147 | if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) { |
147 | rds_ib_stats_inc(s_ib_rx_alloc_limit); | 148 | rds_ib_stats_inc(s_ib_rx_alloc_limit); |
148 | goto out; | 149 | goto out; |
149 | } | 150 | } |
150 | recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, | 151 | recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, |
151 | kptr_gfp); | 152 | kptr_gfp); |
152 | if (recv->r_ibinc == NULL) | 153 | if (recv->r_ibinc == NULL) { |
154 | atomic_dec(&rds_ib_allocation); | ||
153 | goto out; | 155 | goto out; |
154 | atomic_inc(&rds_ib_allocation); | 156 | } |
155 | INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); | 157 | INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); |
156 | rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); | 158 | rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); |
157 | } | 159 | } |
@@ -229,8 +231,8 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, | |||
229 | int ret = 0; | 231 | int ret = 0; |
230 | u32 pos; | 232 | u32 pos; |
231 | 233 | ||
232 | while ((prefill || rds_conn_up(conn)) | 234 | while ((prefill || rds_conn_up(conn)) && |
233 | && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { | 235 | rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { |
234 | if (pos >= ic->i_recv_ring.w_nr) { | 236 | if (pos >= ic->i_recv_ring.w_nr) { |
235 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", | 237 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", |
236 | pos); | 238 | pos); |
@@ -770,10 +772,10 @@ static void rds_ib_process_recv(struct rds_connection *conn, | |||
770 | hdr = &ibinc->ii_inc.i_hdr; | 772 | hdr = &ibinc->ii_inc.i_hdr; |
771 | /* We can't just use memcmp here; fragments of a | 773 | /* We can't just use memcmp here; fragments of a |
772 | * single message may carry different ACKs */ | 774 | * single message may carry different ACKs */ |
773 | if (hdr->h_sequence != ihdr->h_sequence | 775 | if (hdr->h_sequence != ihdr->h_sequence || |
774 | || hdr->h_len != ihdr->h_len | 776 | hdr->h_len != ihdr->h_len || |
775 | || hdr->h_sport != ihdr->h_sport | 777 | hdr->h_sport != ihdr->h_sport || |
776 | || hdr->h_dport != ihdr->h_dport) { | 778 | hdr->h_dport != ihdr->h_dport) { |
777 | rds_ib_conn_error(conn, | 779 | rds_ib_conn_error(conn, |
778 | "fragment header mismatch; forcing reconnect\n"); | 780 | "fragment header mismatch; forcing reconnect\n"); |
779 | return; | 781 | return; |
@@ -824,17 +826,22 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
824 | { | 826 | { |
825 | struct rds_connection *conn = context; | 827 | struct rds_connection *conn = context; |
826 | struct rds_ib_connection *ic = conn->c_transport_data; | 828 | struct rds_ib_connection *ic = conn->c_transport_data; |
827 | struct ib_wc wc; | ||
828 | struct rds_ib_ack_state state = { 0, }; | ||
829 | struct rds_ib_recv_work *recv; | ||
830 | 829 | ||
831 | rdsdebug("conn %p cq %p\n", conn, cq); | 830 | rdsdebug("conn %p cq %p\n", conn, cq); |
832 | 831 | ||
833 | rds_ib_stats_inc(s_ib_rx_cq_call); | 832 | rds_ib_stats_inc(s_ib_rx_cq_call); |
834 | 833 | ||
835 | ib_req_notify_cq(cq, IB_CQ_SOLICITED); | 834 | tasklet_schedule(&ic->i_recv_tasklet); |
835 | } | ||
836 | |||
837 | static inline void rds_poll_cq(struct rds_ib_connection *ic, | ||
838 | struct rds_ib_ack_state *state) | ||
839 | { | ||
840 | struct rds_connection *conn = ic->conn; | ||
841 | struct ib_wc wc; | ||
842 | struct rds_ib_recv_work *recv; | ||
836 | 843 | ||
837 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 844 | while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { |
838 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", | 845 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", |
839 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, | 846 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, |
840 | be32_to_cpu(wc.ex.imm_data)); | 847 | be32_to_cpu(wc.ex.imm_data)); |
@@ -852,7 +859,7 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
852 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) { | 859 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) { |
853 | /* We expect errors as the qp is drained during shutdown */ | 860 | /* We expect errors as the qp is drained during shutdown */ |
854 | if (wc.status == IB_WC_SUCCESS) { | 861 | if (wc.status == IB_WC_SUCCESS) { |
855 | rds_ib_process_recv(conn, recv, wc.byte_len, &state); | 862 | rds_ib_process_recv(conn, recv, wc.byte_len, state); |
856 | } else { | 863 | } else { |
857 | rds_ib_conn_error(conn, "recv completion on " | 864 | rds_ib_conn_error(conn, "recv completion on " |
858 | "%pI4 had status %u, disconnecting and " | 865 | "%pI4 had status %u, disconnecting and " |
@@ -863,6 +870,17 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
863 | 870 | ||
864 | rds_ib_ring_free(&ic->i_recv_ring, 1); | 871 | rds_ib_ring_free(&ic->i_recv_ring, 1); |
865 | } | 872 | } |
873 | } | ||
874 | |||
875 | void rds_ib_recv_tasklet_fn(unsigned long data) | ||
876 | { | ||
877 | struct rds_ib_connection *ic = (struct rds_ib_connection *) data; | ||
878 | struct rds_connection *conn = ic->conn; | ||
879 | struct rds_ib_ack_state state = { 0, }; | ||
880 | |||
881 | rds_poll_cq(ic, &state); | ||
882 | ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); | ||
883 | rds_poll_cq(ic, &state); | ||
866 | 884 | ||
867 | if (state.ack_next_valid) | 885 | if (state.ack_next_valid) |
868 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); | 886 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); |