aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2008-05-28 14:20:24 -0400
committerTom Tucker <tom@opengridcomputing.com>2008-07-02 16:01:57 -0400
commit94dba4918d4570bfa98776e54a5fa527c848dc78 (patch)
treec861aac6e5eb5eec55be012c71655ba7178445ae
parent87295b6c5c7fd7bbc0ce3e7f42d2adbbac7352b9 (diff)
svcrdma: Remove unneeded spin locks from __svc_rdma_free
At the time __svc_rdma_free is called, we are guaranteed that all references to this transport are gone. There is, therefore, no need to protect the resource lists with a spin lock. Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6fddd588c031..7647789a1f68 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1027,7 +1027,6 @@ static void __svc_rdma_free(struct work_struct *work)
1027 * cm_id because the device ptr is needed to unmap the dma in 1027 * cm_id because the device ptr is needed to unmap the dma in
1028 * svc_rdma_put_context. 1028 * svc_rdma_put_context.
1029 */ 1029 */
1030 spin_lock_bh(&rdma->sc_read_complete_lock);
1031 while (!list_empty(&rdma->sc_read_complete_q)) { 1030 while (!list_empty(&rdma->sc_read_complete_q)) {
1032 struct svc_rdma_op_ctxt *ctxt; 1031 struct svc_rdma_op_ctxt *ctxt;
1033 ctxt = list_entry(rdma->sc_read_complete_q.next, 1032 ctxt = list_entry(rdma->sc_read_complete_q.next,
@@ -1036,10 +1035,8 @@ static void __svc_rdma_free(struct work_struct *work)
1036 list_del_init(&ctxt->dto_q); 1035 list_del_init(&ctxt->dto_q);
1037 svc_rdma_put_context(ctxt, 1); 1036 svc_rdma_put_context(ctxt, 1);
1038 } 1037 }
1039 spin_unlock_bh(&rdma->sc_read_complete_lock);
1040 1038
1041 /* Destroy queued, but not processed recv completions */ 1039 /* Destroy queued, but not processed recv completions */
1042 spin_lock_bh(&rdma->sc_rq_dto_lock);
1043 while (!list_empty(&rdma->sc_rq_dto_q)) { 1040 while (!list_empty(&rdma->sc_rq_dto_q)) {
1044 struct svc_rdma_op_ctxt *ctxt; 1041 struct svc_rdma_op_ctxt *ctxt;
1045 ctxt = list_entry(rdma->sc_rq_dto_q.next, 1042 ctxt = list_entry(rdma->sc_rq_dto_q.next,
@@ -1048,7 +1045,6 @@ static void __svc_rdma_free(struct work_struct *work)
1048 list_del_init(&ctxt->dto_q); 1045 list_del_init(&ctxt->dto_q);
1049 svc_rdma_put_context(ctxt, 1); 1046 svc_rdma_put_context(ctxt, 1);
1050 } 1047 }
1051 spin_unlock_bh(&rdma->sc_rq_dto_lock);
1052 1048
1053 /* Warn if we leaked a resource or under-referenced */ 1049 /* Warn if we leaked a resource or under-referenced */
1054 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); 1050 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);