diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2008-05-01 12:25:02 -0400 |
---|---|---|
committer | Tom Tucker <tom@opengridcomputing.com> | 2008-05-19 08:33:57 -0400 |
commit | 356d0a1519867422c3f17f79e2183f8c2d44f8ee (patch) | |
tree | 69c60d0d9605d617e90afbccf9fb054c56958c3b /net/sunrpc/xprtrdma | |
parent | 1711386c62c97f7fb086a2247d44cdb1f8867640 (diff) |
svcrdma: Cleanup queued, but unprocessed I/O in svc_rdma_free
When the transport is closing, the DTO tasklet may queue data
that never gets processed. Clean up resources associated with
this I/O.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 38 |
1 files changed, 35 insertions, 3 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index b412a49c46fc..b1ff08d7da6c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -976,13 +976,42 @@ static void __svc_rdma_free(struct work_struct *work) | |||
976 | /* We should only be called from kref_put */ | 976 | /* We should only be called from kref_put */ |
977 | BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); | 977 | BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); |
978 | 978 | ||
979 | /* | ||
980 | * Destroy queued, but not processed read completions. Note | ||
981 | * that this cleanup has to be done before destroying the | ||
982 | * cm_id because the device ptr is needed to unmap the dma in | ||
983 | * svc_rdma_put_context. | ||
984 | */ | ||
985 | spin_lock_bh(&rdma->sc_read_complete_lock); | ||
986 | while (!list_empty(&rdma->sc_read_complete_q)) { | ||
987 | struct svc_rdma_op_ctxt *ctxt; | ||
988 | ctxt = list_entry(rdma->sc_read_complete_q.next, | ||
989 | struct svc_rdma_op_ctxt, | ||
990 | dto_q); | ||
991 | list_del_init(&ctxt->dto_q); | ||
992 | svc_rdma_put_context(ctxt, 1); | ||
993 | } | ||
994 | spin_unlock_bh(&rdma->sc_read_complete_lock); | ||
995 | |||
996 | /* Destroy queued, but not processed recv completions */ | ||
997 | spin_lock_bh(&rdma->sc_rq_dto_lock); | ||
998 | while (!list_empty(&rdma->sc_rq_dto_q)) { | ||
999 | struct svc_rdma_op_ctxt *ctxt; | ||
1000 | ctxt = list_entry(rdma->sc_rq_dto_q.next, | ||
1001 | struct svc_rdma_op_ctxt, | ||
1002 | dto_q); | ||
1003 | list_del_init(&ctxt->dto_q); | ||
1004 | svc_rdma_put_context(ctxt, 1); | ||
1005 | } | ||
1006 | spin_unlock_bh(&rdma->sc_rq_dto_lock); | ||
1007 | |||
1008 | /* Warn if we leaked a resource or under-referenced */ | ||
1009 | WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); | ||
1010 | |||
979 | /* Destroy the QP if present (not a listener) */ | 1011 | /* Destroy the QP if present (not a listener) */ |
980 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) | 1012 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) |
981 | ib_destroy_qp(rdma->sc_qp); | 1013 | ib_destroy_qp(rdma->sc_qp); |
982 | 1014 | ||
983 | /* Destroy the CM ID */ | ||
984 | rdma_destroy_id(rdma->sc_cm_id); | ||
985 | |||
986 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) | 1015 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) |
987 | ib_destroy_cq(rdma->sc_sq_cq); | 1016 | ib_destroy_cq(rdma->sc_sq_cq); |
988 | 1017 | ||
@@ -995,6 +1024,9 @@ static void __svc_rdma_free(struct work_struct *work) | |||
995 | if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) | 1024 | if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) |
996 | ib_dealloc_pd(rdma->sc_pd); | 1025 | ib_dealloc_pd(rdma->sc_pd); |
997 | 1026 | ||
1027 | /* Destroy the CM ID */ | ||
1028 | rdma_destroy_id(rdma->sc_cm_id); | ||
1029 | |||
998 | destroy_context_cache(rdma); | 1030 | destroy_context_cache(rdma); |
999 | kfree(rdma); | 1031 | kfree(rdma); |
1000 | } | 1032 | } |