diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2008-04-30 23:00:46 -0400 |
---|---|---|
committer | Tom Tucker <tom@opengridcomputing.com> | 2008-05-19 08:33:54 -0400 |
commit | 8da91ea8de873ee8be82377ff18637d05e882058 (patch) | |
tree | 9cfb692f27dde588a360a0f612b8ea14b04a76a4 | |
parent | 47698e083e40bbd3ef87f5561390ae33abb13cd0 (diff) |
svcrdma: Move destroy to kernel thread
Some providers may wait while destroying adapter resources.
Since it is possible that the last reference is put on the
dto_tasklet, the actual destroy must be scheduled as a work item.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
-rw-r--r-- | include/linux/sunrpc/svc_rdma.h | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 17 |
2 files changed, 15 insertions, 3 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 701439064d21..f5f15ae2438b 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
@@ -124,6 +124,7 @@ struct svcxprt_rdma { | |||
124 | struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ | 124 | struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ |
125 | struct list_head sc_read_complete_q; | 125 | struct list_head sc_read_complete_q; |
126 | spinlock_t sc_read_complete_lock; | 126 | spinlock_t sc_read_complete_lock; |
127 | struct work_struct sc_work; | ||
127 | }; | 128 | }; |
128 | /* sc_flags */ | 129 | /* sc_flags */ |
129 | #define RDMAXPRT_RQ_PENDING 1 | 130 | #define RDMAXPRT_RQ_PENDING 1 |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 78303f0fad92..028c6cf89364 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -963,12 +963,15 @@ static void svc_rdma_detach(struct svc_xprt *xprt) | |||
963 | rdma_destroy_id(rdma->sc_cm_id); | 963 | rdma_destroy_id(rdma->sc_cm_id); |
964 | } | 964 | } |
965 | 965 | ||
966 | static void svc_rdma_free(struct svc_xprt *xprt) | 966 | static void __svc_rdma_free(struct work_struct *work) |
967 | { | 967 | { |
968 | struct svcxprt_rdma *rdma = (struct svcxprt_rdma *)xprt; | 968 | struct svcxprt_rdma *rdma = |
969 | container_of(work, struct svcxprt_rdma, sc_work); | ||
969 | dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); | 970 | dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); |
971 | |||
970 | /* We should only be called from kref_put */ | 972 | /* We should only be called from kref_put */ |
971 | BUG_ON(atomic_read(&xprt->xpt_ref.refcount) != 0); | 973 | BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); |
974 | |||
972 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) | 975 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) |
973 | ib_destroy_cq(rdma->sc_sq_cq); | 976 | ib_destroy_cq(rdma->sc_sq_cq); |
974 | 977 | ||
@@ -985,6 +988,14 @@ static void svc_rdma_free(struct svc_xprt *xprt) | |||
985 | kfree(rdma); | 988 | kfree(rdma); |
986 | } | 989 | } |
987 | 990 | ||
991 | static void svc_rdma_free(struct svc_xprt *xprt) | ||
992 | { | ||
993 | struct svcxprt_rdma *rdma = | ||
994 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | ||
995 | INIT_WORK(&rdma->sc_work, __svc_rdma_free); | ||
996 | schedule_work(&rdma->sc_work); | ||
997 | } | ||
998 | |||
988 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) | 999 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) |
989 | { | 1000 | { |
990 | struct svcxprt_rdma *rdma = | 1001 | struct svcxprt_rdma *rdma = |