aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib.h
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-18 20:19:52 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:12:11 -0400
commitf17a1a55fb672d7f64be7f2e940ef5669e5efa0a (patch)
treeed1651f64ade2676101766af764d11ef98e30f41 /net/rds/ib.h
parent77dd550e5547846604ff6f90c4dc6bba4414e485 (diff)
RDS: Refill recv ring directly from tasklet
Performance is better if we use allocations that don't block to refill the receive ring. Since the whole reason we were kicking out to the worker thread was so we could do blocking allocs, we no longer need to do this. Remove gfp params from rds_ib_recv_refill(); we always use GFP_NOWAIT. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/ib.h')
-rw-r--r--net/rds/ib.h3
1 files changed, 1 insertions, 2 deletions
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 202140a84f0c..4c2ee2401979 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -297,8 +297,7 @@ void rds_ib_flush_mrs(void);
297int __init rds_ib_recv_init(void); 297int __init rds_ib_recv_init(void);
298void rds_ib_recv_exit(void); 298void rds_ib_recv_exit(void);
299int rds_ib_recv(struct rds_connection *conn); 299int rds_ib_recv(struct rds_connection *conn);
300int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 300int rds_ib_recv_refill(struct rds_connection *conn, int prefill);
301 gfp_t page_gfp, int prefill);
302void rds_ib_inc_free(struct rds_incoming *inc); 301void rds_ib_inc_free(struct rds_incoming *inc);
303int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 302int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
304 size_t size); 303 size_t size);