diff options
author | Ricardo Labiaga <Ricardo.Labiaga@netapp.com> | 2009-06-18 22:01:24 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2009-06-20 14:55:39 -0400 |
commit | e9f029855865e917821ef6034b31e340a4cfc815 (patch) | |
tree | 91eac0e44cbcd73e421497100316cc8e7ae102bb | |
parent | 578e4585685410cacd1a4ac86b7e3c12805be918 (diff) |
nfs41: sunrpc: xprt_alloc_bc_request() should not use spin_lock_bh()
xprt_alloc_bc_request() is always called in soft interrupt context.
Grab the spin_lock instead of the bottom half spin_lock. Softirqs
do not preempt other softirqs running on the same processor, so there
is no need to disable bottom halves.
Signed-off-by: Ricardo Labiaga <Ricardo.Labiaga@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r-- | net/sunrpc/backchannel_rqst.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 5a7d342e3087..553621fb2c41 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
@@ -211,6 +211,9 @@ EXPORT_SYMBOL(xprt_destroy_backchannel); | |||
211 | * has been preallocated as well. Use xprt_alloc_bc_request to allocate | 211 | * has been preallocated as well. Use xprt_alloc_bc_request to allocate |
212 | * to this request. Use xprt_free_bc_request to return it. | 212 | * to this request. Use xprt_free_bc_request to return it. |
213 | * | 213 | * |
214 | * We know that we're called in soft interrupt context, grab the spin_lock | ||
215 | * since there is no need to grab the bottom half spin_lock. | ||
216 | * | ||
214 | * Return an available rpc_rqst, otherwise NULL if non are available. | 217 | * Return an available rpc_rqst, otherwise NULL if non are available. |
215 | */ | 218 | */ |
216 | struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) | 219 | struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) |
@@ -218,7 +221,7 @@ struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) | |||
218 | struct rpc_rqst *req; | 221 | struct rpc_rqst *req; |
219 | 222 | ||
220 | dprintk("RPC: allocate a backchannel request\n"); | 223 | dprintk("RPC: allocate a backchannel request\n"); |
221 | spin_lock_bh(&xprt->bc_pa_lock); | 224 | spin_lock(&xprt->bc_pa_lock); |
222 | if (!list_empty(&xprt->bc_pa_list)) { | 225 | if (!list_empty(&xprt->bc_pa_list)) { |
223 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, | 226 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, |
224 | rq_bc_pa_list); | 227 | rq_bc_pa_list); |
@@ -226,7 +229,7 @@ struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) | |||
226 | } else { | 229 | } else { |
227 | req = NULL; | 230 | req = NULL; |
228 | } | 231 | } |
229 | spin_unlock_bh(&xprt->bc_pa_lock); | 232 | spin_unlock(&xprt->bc_pa_lock); |
230 | 233 | ||
231 | if (req != NULL) { | 234 | if (req != NULL) { |
232 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | 235 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |