aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/iw_recv.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rds/iw_recv.c')
-rw-r--r--net/rds/iw_recv.c48
1 files changed, 33 insertions, 15 deletions
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 8683f5f66c4b..da43ee840ca3 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <rdma/rdma_cm.h> 37#include <rdma/rdma_cm.h>
@@ -143,15 +144,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
143 int ret = -ENOMEM; 144 int ret = -ENOMEM;
144 145
145 if (recv->r_iwinc == NULL) { 146 if (recv->r_iwinc == NULL) {
146 if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) { 147 if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
147 rds_iw_stats_inc(s_iw_rx_alloc_limit); 148 rds_iw_stats_inc(s_iw_rx_alloc_limit);
148 goto out; 149 goto out;
149 } 150 }
150 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, 151 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
151 kptr_gfp); 152 kptr_gfp);
152 if (recv->r_iwinc == NULL) 153 if (recv->r_iwinc == NULL) {
154 atomic_dec(&rds_iw_allocation);
153 goto out; 155 goto out;
154 atomic_inc(&rds_iw_allocation); 156 }
155 INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); 157 INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
156 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); 158 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
157 } 159 }
@@ -229,8 +231,8 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
229 int ret = 0; 231 int ret = 0;
230 u32 pos; 232 u32 pos;
231 233
232 while ((prefill || rds_conn_up(conn)) 234 while ((prefill || rds_conn_up(conn)) &&
233 && rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) { 235 rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
234 if (pos >= ic->i_recv_ring.w_nr) { 236 if (pos >= ic->i_recv_ring.w_nr) {
235 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", 237 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
236 pos); 238 pos);
@@ -729,10 +731,10 @@ static void rds_iw_process_recv(struct rds_connection *conn,
729 hdr = &iwinc->ii_inc.i_hdr; 731 hdr = &iwinc->ii_inc.i_hdr;
730 /* We can't just use memcmp here; fragments of a 732 /* We can't just use memcmp here; fragments of a
731 * single message may carry different ACKs */ 733 * single message may carry different ACKs */
732 if (hdr->h_sequence != ihdr->h_sequence 734 if (hdr->h_sequence != ihdr->h_sequence ||
733 || hdr->h_len != ihdr->h_len 735 hdr->h_len != ihdr->h_len ||
734 || hdr->h_sport != ihdr->h_sport 736 hdr->h_sport != ihdr->h_sport ||
735 || hdr->h_dport != ihdr->h_dport) { 737 hdr->h_dport != ihdr->h_dport) {
736 rds_iw_conn_error(conn, 738 rds_iw_conn_error(conn,
737 "fragment header mismatch; forcing reconnect\n"); 739 "fragment header mismatch; forcing reconnect\n");
738 return; 740 return;
@@ -783,17 +785,22 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
783{ 785{
784 struct rds_connection *conn = context; 786 struct rds_connection *conn = context;
785 struct rds_iw_connection *ic = conn->c_transport_data; 787 struct rds_iw_connection *ic = conn->c_transport_data;
786 struct ib_wc wc;
787 struct rds_iw_ack_state state = { 0, };
788 struct rds_iw_recv_work *recv;
789 788
790 rdsdebug("conn %p cq %p\n", conn, cq); 789 rdsdebug("conn %p cq %p\n", conn, cq);
791 790
792 rds_iw_stats_inc(s_iw_rx_cq_call); 791 rds_iw_stats_inc(s_iw_rx_cq_call);
793 792
794 ib_req_notify_cq(cq, IB_CQ_SOLICITED); 793 tasklet_schedule(&ic->i_recv_tasklet);
794}
795
796static inline void rds_poll_cq(struct rds_iw_connection *ic,
797 struct rds_iw_ack_state *state)
798{
799 struct rds_connection *conn = ic->conn;
800 struct ib_wc wc;
801 struct rds_iw_recv_work *recv;
795 802
796 while (ib_poll_cq(cq, 1, &wc) > 0) { 803 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
797 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 804 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
798 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 805 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
799 be32_to_cpu(wc.ex.imm_data)); 806 be32_to_cpu(wc.ex.imm_data));
@@ -811,7 +818,7 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
811 if (rds_conn_up(conn) || rds_conn_connecting(conn)) { 818 if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
812 /* We expect errors as the qp is drained during shutdown */ 819 /* We expect errors as the qp is drained during shutdown */
813 if (wc.status == IB_WC_SUCCESS) { 820 if (wc.status == IB_WC_SUCCESS) {
814 rds_iw_process_recv(conn, recv, wc.byte_len, &state); 821 rds_iw_process_recv(conn, recv, wc.byte_len, state);
815 } else { 822 } else {
816 rds_iw_conn_error(conn, "recv completion on " 823 rds_iw_conn_error(conn, "recv completion on "
817 "%pI4 had status %u, disconnecting and " 824 "%pI4 had status %u, disconnecting and "
@@ -822,6 +829,17 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
822 829
823 rds_iw_ring_free(&ic->i_recv_ring, 1); 830 rds_iw_ring_free(&ic->i_recv_ring, 1);
824 } 831 }
832}
833
834void rds_iw_recv_tasklet_fn(unsigned long data)
835{
836 struct rds_iw_connection *ic = (struct rds_iw_connection *) data;
837 struct rds_connection *conn = ic->conn;
838 struct rds_iw_ack_state state = { 0, };
839
840 rds_poll_cq(ic, &state);
841 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
842 rds_poll_cq(ic, &state);
825 843
826 if (state.ack_next_valid) 844 if (state.ack_next_valid)
827 rds_iw_set_ack(ic, state.ack_next, state.ack_required); 845 rds_iw_set_ack(ic, state.ack_next, state.ack_required);