aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib_recv.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rds/ib_recv.c')
-rw-r--r--net/rds/ib_recv.c47
1 files changed, 32 insertions, 15 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index cd7a6cfcab03..04dc0d3f3c95 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -143,15 +143,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
143 int ret = -ENOMEM; 143 int ret = -ENOMEM;
144 144
145 if (recv->r_ibinc == NULL) { 145 if (recv->r_ibinc == NULL) {
146 if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) { 146 if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
147 rds_ib_stats_inc(s_ib_rx_alloc_limit); 147 rds_ib_stats_inc(s_ib_rx_alloc_limit);
148 goto out; 148 goto out;
149 } 149 }
150 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, 150 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
151 kptr_gfp); 151 kptr_gfp);
152 if (recv->r_ibinc == NULL) 152 if (recv->r_ibinc == NULL) {
153 atomic_dec(&rds_ib_allocation);
153 goto out; 154 goto out;
154 atomic_inc(&rds_ib_allocation); 155 }
155 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); 156 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
156 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); 157 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
157 } 158 }
@@ -229,8 +230,8 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
229 int ret = 0; 230 int ret = 0;
230 u32 pos; 231 u32 pos;
231 232
232 while ((prefill || rds_conn_up(conn)) 233 while ((prefill || rds_conn_up(conn)) &&
233 && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { 234 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
234 if (pos >= ic->i_recv_ring.w_nr) { 235 if (pos >= ic->i_recv_ring.w_nr) {
235 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", 236 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
236 pos); 237 pos);
@@ -770,10 +771,10 @@ static void rds_ib_process_recv(struct rds_connection *conn,
770 hdr = &ibinc->ii_inc.i_hdr; 771 hdr = &ibinc->ii_inc.i_hdr;
771 /* We can't just use memcmp here; fragments of a 772 /* We can't just use memcmp here; fragments of a
772 * single message may carry different ACKs */ 773 * single message may carry different ACKs */
773 if (hdr->h_sequence != ihdr->h_sequence 774 if (hdr->h_sequence != ihdr->h_sequence ||
774 || hdr->h_len != ihdr->h_len 775 hdr->h_len != ihdr->h_len ||
775 || hdr->h_sport != ihdr->h_sport 776 hdr->h_sport != ihdr->h_sport ||
776 || hdr->h_dport != ihdr->h_dport) { 777 hdr->h_dport != ihdr->h_dport) {
777 rds_ib_conn_error(conn, 778 rds_ib_conn_error(conn,
778 "fragment header mismatch; forcing reconnect\n"); 779 "fragment header mismatch; forcing reconnect\n");
779 return; 780 return;
@@ -824,17 +825,22 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
824{ 825{
825 struct rds_connection *conn = context; 826 struct rds_connection *conn = context;
826 struct rds_ib_connection *ic = conn->c_transport_data; 827 struct rds_ib_connection *ic = conn->c_transport_data;
827 struct ib_wc wc;
828 struct rds_ib_ack_state state = { 0, };
829 struct rds_ib_recv_work *recv;
830 828
831 rdsdebug("conn %p cq %p\n", conn, cq); 829 rdsdebug("conn %p cq %p\n", conn, cq);
832 830
833 rds_ib_stats_inc(s_ib_rx_cq_call); 831 rds_ib_stats_inc(s_ib_rx_cq_call);
834 832
835 ib_req_notify_cq(cq, IB_CQ_SOLICITED); 833 tasklet_schedule(&ic->i_recv_tasklet);
834}
835
836static inline void rds_poll_cq(struct rds_ib_connection *ic,
837 struct rds_ib_ack_state *state)
838{
839 struct rds_connection *conn = ic->conn;
840 struct ib_wc wc;
841 struct rds_ib_recv_work *recv;
836 842
837 while (ib_poll_cq(cq, 1, &wc) > 0) { 843 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
838 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 844 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
839 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 845 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
840 be32_to_cpu(wc.ex.imm_data)); 846 be32_to_cpu(wc.ex.imm_data));
@@ -852,7 +858,7 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
852 if (rds_conn_up(conn) || rds_conn_connecting(conn)) { 858 if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
853 /* We expect errors as the qp is drained during shutdown */ 859 /* We expect errors as the qp is drained during shutdown */
854 if (wc.status == IB_WC_SUCCESS) { 860 if (wc.status == IB_WC_SUCCESS) {
855 rds_ib_process_recv(conn, recv, wc.byte_len, &state); 861 rds_ib_process_recv(conn, recv, wc.byte_len, state);
856 } else { 862 } else {
857 rds_ib_conn_error(conn, "recv completion on " 863 rds_ib_conn_error(conn, "recv completion on "
858 "%pI4 had status %u, disconnecting and " 864 "%pI4 had status %u, disconnecting and "
@@ -863,6 +869,17 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
863 869
864 rds_ib_ring_free(&ic->i_recv_ring, 1); 870 rds_ib_ring_free(&ic->i_recv_ring, 1);
865 } 871 }
872}
873
874void rds_ib_recv_tasklet_fn(unsigned long data)
875{
876 struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
877 struct rds_connection *conn = ic->conn;
878 struct rds_ib_ack_state state = { 0, };
879
880 rds_poll_cq(ic, &state);
881 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
882 rds_poll_cq(ic, &state);
866 883
867 if (state.ack_next_valid) 884 if (state.ack_next_valid)
868 rds_ib_set_ack(ic, state.ack_next, state.ack_required); 885 rds_ib_set_ack(ic, state.ack_next, state.ack_required);