aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-05-27 00:45:06 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:15:24 -0400
commit037f18a3074753991656189a091a5fa371999107 (patch)
tree5452afd53bc7d05d712a5cdd9f937aa3591c75d3
parent33244125871734ebc0d8d147680a0d7e99385e0b (diff)
RDS: use friendly gfp masks for prefill
When prefilling the rds frags, we end up doing a lot of allocations. We're not in atomic context here, and so there's no reason to dip into atomic reserves. This changes the prefills to use masks that allow waiting. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--net/rds/ib_recv.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 5b429b7fd81c..1add097fe198 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -238,7 +238,8 @@ void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
238 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); 238 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
239} 239}
240 240
241static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic) 241static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
242 gfp_t slab_mask)
242{ 243{
243 struct rds_ib_incoming *ibinc; 244 struct rds_ib_incoming *ibinc;
244 struct list_head *cache_item; 245 struct list_head *cache_item;
@@ -254,7 +255,7 @@ static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *i
254 rds_ib_stats_inc(s_ib_rx_alloc_limit); 255 rds_ib_stats_inc(s_ib_rx_alloc_limit);
255 return NULL; 256 return NULL;
256 } 257 }
257 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, GFP_NOWAIT); 258 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
258 if (!ibinc) { 259 if (!ibinc) {
259 atomic_dec(&rds_ib_allocation); 260 atomic_dec(&rds_ib_allocation);
260 return NULL; 261 return NULL;
@@ -266,7 +267,8 @@ static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *i
266 return ibinc; 267 return ibinc;
267} 268}
268 269
269static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic) 270static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
271 gfp_t slab_mask, gfp_t page_mask)
270{ 272{
271 struct rds_page_frag *frag; 273 struct rds_page_frag *frag;
272 struct list_head *cache_item; 274 struct list_head *cache_item;
@@ -276,12 +278,12 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic
276 if (cache_item) { 278 if (cache_item) {
277 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry); 279 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
278 } else { 280 } else {
279 frag = kmem_cache_alloc(rds_ib_frag_slab, GFP_NOWAIT); 281 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
280 if (!frag) 282 if (!frag)
281 return NULL; 283 return NULL;
282 284
283 ret = rds_page_remainder_alloc(&frag->f_sg, 285 ret = rds_page_remainder_alloc(&frag->f_sg,
284 RDS_FRAG_SIZE, GFP_NOWAIT); 286 RDS_FRAG_SIZE, page_mask);
285 if (ret) { 287 if (ret) {
286 kmem_cache_free(rds_ib_frag_slab, frag); 288 kmem_cache_free(rds_ib_frag_slab, frag);
287 return NULL; 289 return NULL;
@@ -294,11 +296,18 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic
294} 296}
295 297
296static int rds_ib_recv_refill_one(struct rds_connection *conn, 298static int rds_ib_recv_refill_one(struct rds_connection *conn,
297 struct rds_ib_recv_work *recv) 299 struct rds_ib_recv_work *recv, int prefill)
298{ 300{
299 struct rds_ib_connection *ic = conn->c_transport_data; 301 struct rds_ib_connection *ic = conn->c_transport_data;
300 struct ib_sge *sge; 302 struct ib_sge *sge;
301 int ret = -ENOMEM; 303 int ret = -ENOMEM;
304 gfp_t slab_mask = GFP_NOWAIT;
305 gfp_t page_mask = GFP_NOWAIT;
306
307 if (prefill) {
308 slab_mask = GFP_KERNEL;
309 page_mask = GFP_HIGHUSER;
310 }
302 311
303 if (!ic->i_cache_incs.ready) 312 if (!ic->i_cache_incs.ready)
304 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); 313 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
@@ -310,13 +319,13 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
310 * recvs that were continuations will still have this allocated. 319 * recvs that were continuations will still have this allocated.
311 */ 320 */
312 if (!recv->r_ibinc) { 321 if (!recv->r_ibinc) {
313 recv->r_ibinc = rds_ib_refill_one_inc(ic); 322 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
314 if (!recv->r_ibinc) 323 if (!recv->r_ibinc)
315 goto out; 324 goto out;
316 } 325 }
317 326
318 WARN_ON(recv->r_frag); /* leak! */ 327 WARN_ON(recv->r_frag); /* leak! */
319 recv->r_frag = rds_ib_refill_one_frag(ic); 328 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
320 if (!recv->r_frag) 329 if (!recv->r_frag)
321 goto out; 330 goto out;
322 331
@@ -363,7 +372,7 @@ int rds_ib_recv_refill(struct rds_connection *conn, int prefill)
363 } 372 }
364 373
365 recv = &ic->i_recvs[pos]; 374 recv = &ic->i_recvs[pos];
366 ret = rds_ib_recv_refill_one(conn, recv); 375 ret = rds_ib_recv_refill_one(conn, recv, prefill);
367 if (ret) { 376 if (ret) {
368 ret = -1; 377 ret = -1;
369 break; 378 break;