diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2015-05-26 11:53:13 -0400 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2015-06-12 13:10:37 -0400 |
commit | 58d1dcf5a8ebb0ce8a521286a99efdd636012bf0 (patch) | |
tree | 8fcc6ca4d9a5f1234f9f73acdd0fa0bd20a5e06c /net | |
parent | 7e53df111beea8db2543424d07bdee2a630698c3 (diff) |
xprtrdma: Split rb_lock
/proc/lock_stat showed contention between rpcrdma_buffer_get/put
and the MR allocation functions during I/O intensive workloads.
Now that MRs are no longer allocated in rpcrdma_buffer_get(),
there's no reason the rb_mws list has to be managed using the
same lock as the send/receive buffers. Split that lock. The
new lock does not need to disable interrupts because buffer
get/put is never called in an interrupt context.
struct rpcrdma_buffer is re-arranged to ensure rb_mwlock and rb_mws
are always in a different cacheline than rb_lock and the buffer
pointers.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Tested-By: Devesh Sharma <devesh.sharma@avagotech.com>
Reviewed-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprtrdma/fmr_ops.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/frwr_ops.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 10 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 16 |
4 files changed, 15 insertions, 13 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 5dd77dac094c..52f9ad5fe19b 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
@@ -65,6 +65,7 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt) | |||
65 | struct rpcrdma_mw *r; | 65 | struct rpcrdma_mw *r; |
66 | int i, rc; | 66 | int i, rc; |
67 | 67 | ||
68 | spin_lock_init(&buf->rb_mwlock); | ||
68 | INIT_LIST_HEAD(&buf->rb_mws); | 69 | INIT_LIST_HEAD(&buf->rb_mws); |
69 | INIT_LIST_HEAD(&buf->rb_all); | 70 | INIT_LIST_HEAD(&buf->rb_all); |
70 | 71 | ||
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 862279267fb8..18b7305d249f 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -266,6 +266,7 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt) | |||
266 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; | 266 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; |
267 | int i; | 267 | int i; |
268 | 268 | ||
269 | spin_lock_init(&buf->rb_mwlock); | ||
269 | INIT_LIST_HEAD(&buf->rb_mws); | 270 | INIT_LIST_HEAD(&buf->rb_mws); |
270 | INIT_LIST_HEAD(&buf->rb_all); | 271 | INIT_LIST_HEAD(&buf->rb_all); |
271 | 272 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index cc1a52609974..234083560d0e 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1173,15 +1173,14 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) | |||
1173 | { | 1173 | { |
1174 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | 1174 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1175 | struct rpcrdma_mw *mw = NULL; | 1175 | struct rpcrdma_mw *mw = NULL; |
1176 | unsigned long flags; | ||
1177 | 1176 | ||
1178 | spin_lock_irqsave(&buf->rb_lock, flags); | 1177 | spin_lock(&buf->rb_mwlock); |
1179 | if (!list_empty(&buf->rb_mws)) { | 1178 | if (!list_empty(&buf->rb_mws)) { |
1180 | mw = list_first_entry(&buf->rb_mws, | 1179 | mw = list_first_entry(&buf->rb_mws, |
1181 | struct rpcrdma_mw, mw_list); | 1180 | struct rpcrdma_mw, mw_list); |
1182 | list_del_init(&mw->mw_list); | 1181 | list_del_init(&mw->mw_list); |
1183 | } | 1182 | } |
1184 | spin_unlock_irqrestore(&buf->rb_lock, flags); | 1183 | spin_unlock(&buf->rb_mwlock); |
1185 | 1184 | ||
1186 | if (!mw) | 1185 | if (!mw) |
1187 | pr_err("RPC: %s: no MWs available\n", __func__); | 1186 | pr_err("RPC: %s: no MWs available\n", __func__); |
@@ -1192,11 +1191,10 @@ void | |||
1192 | rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) | 1191 | rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) |
1193 | { | 1192 | { |
1194 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | 1193 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1195 | unsigned long flags; | ||
1196 | 1194 | ||
1197 | spin_lock_irqsave(&buf->rb_lock, flags); | 1195 | spin_lock(&buf->rb_mwlock); |
1198 | list_add_tail(&mw->mw_list, &buf->rb_mws); | 1196 | list_add_tail(&mw->mw_list, &buf->rb_mws); |
1199 | spin_unlock_irqrestore(&buf->rb_lock, flags); | 1197 | spin_unlock(&buf->rb_mwlock); |
1200 | } | 1198 | } |
1201 | 1199 | ||
1202 | static void | 1200 | static void |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 3ecee38bf1a0..df92884400c4 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -282,15 +282,17 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) | |||
282 | * One of these is associated with a transport instance | 282 | * One of these is associated with a transport instance |
283 | */ | 283 | */ |
284 | struct rpcrdma_buffer { | 284 | struct rpcrdma_buffer { |
285 | spinlock_t rb_lock; /* protects indexes */ | 285 | spinlock_t rb_mwlock; /* protect rb_mws list */ |
286 | u32 rb_max_requests;/* client max requests */ | 286 | struct list_head rb_mws; |
287 | struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ | 287 | struct list_head rb_all; |
288 | struct list_head rb_all; | 288 | char *rb_pool; |
289 | int rb_send_index; | 289 | |
290 | spinlock_t rb_lock; /* protect buf arrays */ | ||
291 | u32 rb_max_requests; | ||
292 | int rb_send_index; | ||
293 | int rb_recv_index; | ||
290 | struct rpcrdma_req **rb_send_bufs; | 294 | struct rpcrdma_req **rb_send_bufs; |
291 | int rb_recv_index; | ||
292 | struct rpcrdma_rep **rb_recv_bufs; | 295 | struct rpcrdma_rep **rb_recv_bufs; |
293 | char *rb_pool; | ||
294 | }; | 296 | }; |
295 | #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) | 297 | #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) |
296 | 298 | ||