aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-18 20:19:52 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:12:11 -0400
commitf17a1a55fb672d7f64be7f2e940ef5669e5efa0a (patch)
treeed1651f64ade2676101766af764d11ef98e30f41 /net/rds
parent77dd550e5547846604ff6f90c4dc6bba4414e485 (diff)
RDS: Refill recv ring directly from tasklet
Performance is better if we use allocations that don't block to refill the receive ring. Since the whole reason we were kicking out to the worker thread was so we could do blocking allocs, we no longer need to do this. Remove gfp params from rds_ib_recv_refill(); we always use GFP_NOWAIT. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/ib.h3
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/ib_recv.c22
3 files changed, 10 insertions, 17 deletions
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 202140a84f0c..4c2ee2401979 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -297,8 +297,7 @@ void rds_ib_flush_mrs(void);
297int __init rds_ib_recv_init(void); 297int __init rds_ib_recv_init(void);
298void rds_ib_recv_exit(void); 298void rds_ib_recv_exit(void);
299int rds_ib_recv(struct rds_connection *conn); 299int rds_ib_recv(struct rds_connection *conn);
300int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 300int rds_ib_recv_refill(struct rds_connection *conn, int prefill);
301 gfp_t page_gfp, int prefill);
302void rds_ib_inc_free(struct rds_incoming *inc); 301void rds_ib_inc_free(struct rds_incoming *inc);
303int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 302int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
304 size_t size); 303 size_t size);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 1a91af75f4c7..75eda9c82135 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -135,7 +135,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
135 rds_ib_recv_init_ring(ic); 135 rds_ib_recv_init_ring(ic);
136 /* Post receive buffers - as a side effect, this will update 136 /* Post receive buffers - as a side effect, this will update
137 * the posted credit count. */ 137 * the posted credit count. */
138 rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); 138 rds_ib_recv_refill(conn, 1);
139 139
140 /* Tune RNR behavior */ 140 /* Tune RNR behavior */
141 rds_ib_tune_rnr(ic, &qp_attr); 141 rds_ib_tune_rnr(ic, &qp_attr);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index e9fe08a86c40..8f041f7954a2 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -135,8 +135,7 @@ void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
135} 135}
136 136
137static int rds_ib_recv_refill_one(struct rds_connection *conn, 137static int rds_ib_recv_refill_one(struct rds_connection *conn,
138 struct rds_ib_recv_work *recv, 138 struct rds_ib_recv_work *recv)
139 gfp_t kptr_gfp, gfp_t page_gfp)
140{ 139{
141 struct rds_ib_connection *ic = conn->c_transport_data; 140 struct rds_ib_connection *ic = conn->c_transport_data;
142 dma_addr_t dma_addr; 141 dma_addr_t dma_addr;
@@ -148,8 +147,7 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
148 rds_ib_stats_inc(s_ib_rx_alloc_limit); 147 rds_ib_stats_inc(s_ib_rx_alloc_limit);
149 goto out; 148 goto out;
150 } 149 }
151 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, 150 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, GFP_NOWAIT);
152 kptr_gfp);
153 if (!recv->r_ibinc) { 151 if (!recv->r_ibinc) {
154 atomic_dec(&rds_ib_allocation); 152 atomic_dec(&rds_ib_allocation);
155 goto out; 153 goto out;
@@ -159,7 +157,7 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
159 } 157 }
160 158
161 if (!recv->r_frag) { 159 if (!recv->r_frag) {
162 recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp); 160 recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, GFP_NOWAIT);
163 if (!recv->r_frag) 161 if (!recv->r_frag)
164 goto out; 162 goto out;
165 INIT_LIST_HEAD(&recv->r_frag->f_item); 163 INIT_LIST_HEAD(&recv->r_frag->f_item);
@@ -167,7 +165,7 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
167 } 165 }
168 166
169 if (!ic->i_frag.f_page) { 167 if (!ic->i_frag.f_page) {
170 ic->i_frag.f_page = alloc_page(page_gfp); 168 ic->i_frag.f_page = alloc_page(GFP_NOWAIT);
171 if (!ic->i_frag.f_page) 169 if (!ic->i_frag.f_page)
172 goto out; 170 goto out;
173 ic->i_frag.f_offset = 0; 171 ic->i_frag.f_offset = 0;
@@ -221,8 +219,7 @@ out:
221 * 219 *
222 * -1 is returned if posting fails due to temporary resource exhaustion. 220 * -1 is returned if posting fails due to temporary resource exhaustion.
223 */ 221 */
224int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 222int rds_ib_recv_refill(struct rds_connection *conn, int prefill)
225 gfp_t page_gfp, int prefill)
226{ 223{
227 struct rds_ib_connection *ic = conn->c_transport_data; 224 struct rds_ib_connection *ic = conn->c_transport_data;
228 struct rds_ib_recv_work *recv; 225 struct rds_ib_recv_work *recv;
@@ -241,7 +238,7 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
241 } 238 }
242 239
243 recv = &ic->i_recvs[pos]; 240 recv = &ic->i_recvs[pos];
244 ret = rds_ib_recv_refill_one(conn, recv, kptr_gfp, page_gfp); 241 ret = rds_ib_recv_refill_one(conn, recv);
245 if (ret) { 242 if (ret) {
246 ret = -1; 243 ret = -1;
247 break; 244 break;
@@ -856,11 +853,8 @@ void rds_ib_recv_tasklet_fn(unsigned long data)
856 if (rds_ib_ring_empty(&ic->i_recv_ring)) 853 if (rds_ib_ring_empty(&ic->i_recv_ring))
857 rds_ib_stats_inc(s_ib_rx_ring_empty); 854 rds_ib_stats_inc(s_ib_rx_ring_empty);
858 855
859 /*
860 * If the ring is running low, then schedule the thread to refill.
861 */
862 if (rds_ib_ring_low(&ic->i_recv_ring)) 856 if (rds_ib_ring_low(&ic->i_recv_ring))
863 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 857 rds_ib_recv_refill(conn, 0);
864} 858}
865 859
866int rds_ib_recv(struct rds_connection *conn) 860int rds_ib_recv(struct rds_connection *conn)
@@ -875,7 +869,7 @@ int rds_ib_recv(struct rds_connection *conn)
875 * we're really low and we want the caller to back off for a bit. 869 * we're really low and we want the caller to back off for a bit.
876 */ 870 */
877 mutex_lock(&ic->i_recv_mutex); 871 mutex_lock(&ic->i_recv_mutex);
878 if (rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0)) 872 if (rds_ib_recv_refill(conn, 0))
879 ret = -ENOMEM; 873 ret = -ENOMEM;
880 else 874 else
881 rds_ib_stats_inc(s_ib_rx_refill_from_thread); 875 rds_ib_stats_inc(s_ib_rx_refill_from_thread);