aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-07-19 16:32:20 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-08-07 15:13:16 -0400
commitb247bbf1da69ce376aa1ceb8057331214589e366 (patch)
tree95451eff3963389d5cb2b93bb74b71f89826bb68 /net/sunrpc
parent4a2a4df7b6db25df8f3d5cc6dd0b096119359d92 (diff)
SUNRPC: Fix a race in rpciod_down()
The commit 4ada539ed77c7a2bbcb75cafbbd7bd8d2b9bef7b lead to the unpleasant possibility of an asynchronous rpc_task being required to call rpciod_down() when it is complete. This again means that the rpciod workqueue may get to call destroy_workqueue on itself -> hang... Change rpciod_up/rpciod_down to just get/put the module, and then create/destroy the workqueues on module load/unload. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/sched.c57
1 files changed, 23 insertions, 34 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b5723c262a3e..954d7ec86c7e 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -50,8 +50,6 @@ static RPC_WAITQ(delay_queue, "delayq");
50/* 50/*
51 * rpciod-related stuff 51 * rpciod-related stuff
52 */ 52 */
53static DEFINE_MUTEX(rpciod_mutex);
54static atomic_t rpciod_users = ATOMIC_INIT(0);
55struct workqueue_struct *rpciod_workqueue; 53struct workqueue_struct *rpciod_workqueue;
56 54
57/* 55/*
@@ -961,60 +959,49 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
961 spin_unlock(&clnt->cl_lock); 959 spin_unlock(&clnt->cl_lock);
962} 960}
963 961
962int rpciod_up(void)
963{
964 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
965}
966
967void rpciod_down(void)
968{
969 module_put(THIS_MODULE);
970}
971
964/* 972/*
965 * Start up the rpciod process if it's not already running. 973 * Start up the rpciod workqueue.
966 */ 974 */
967int 975static int rpciod_start(void)
968rpciod_up(void)
969{ 976{
970 struct workqueue_struct *wq; 977 struct workqueue_struct *wq;
971 int error = 0;
972
973 if (atomic_inc_not_zero(&rpciod_users))
974 return 0;
975
976 mutex_lock(&rpciod_mutex);
977 978
978 /* Guard against races with rpciod_down() */
979 if (rpciod_workqueue != NULL)
980 goto out_ok;
981 /* 979 /*
982 * Create the rpciod thread and wait for it to start. 980 * Create the rpciod thread and wait for it to start.
983 */ 981 */
984 dprintk("RPC: creating workqueue rpciod\n"); 982 dprintk("RPC: creating workqueue rpciod\n");
985 error = -ENOMEM;
986 wq = create_workqueue("rpciod"); 983 wq = create_workqueue("rpciod");
987 if (wq == NULL)
988 goto out;
989
990 rpciod_workqueue = wq; 984 rpciod_workqueue = wq;
991 error = 0; 985 return rpciod_workqueue != NULL;
992out_ok:
993 atomic_inc(&rpciod_users);
994out:
995 mutex_unlock(&rpciod_mutex);
996 return error;
997} 986}
998 987
999void 988static void rpciod_stop(void)
1000rpciod_down(void)
1001{ 989{
1002 if (!atomic_dec_and_test(&rpciod_users)) 990 struct workqueue_struct *wq = NULL;
1003 return;
1004 991
1005 mutex_lock(&rpciod_mutex); 992 if (rpciod_workqueue == NULL)
993 return;
1006 dprintk("RPC: destroying workqueue rpciod\n"); 994 dprintk("RPC: destroying workqueue rpciod\n");
1007 995
1008 if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) { 996 wq = rpciod_workqueue;
1009 destroy_workqueue(rpciod_workqueue); 997 rpciod_workqueue = NULL;
1010 rpciod_workqueue = NULL; 998 destroy_workqueue(wq);
1011 }
1012 mutex_unlock(&rpciod_mutex);
1013} 999}
1014 1000
1015void 1001void
1016rpc_destroy_mempool(void) 1002rpc_destroy_mempool(void)
1017{ 1003{
1004 rpciod_stop();
1018 if (rpc_buffer_mempool) 1005 if (rpc_buffer_mempool)
1019 mempool_destroy(rpc_buffer_mempool); 1006 mempool_destroy(rpc_buffer_mempool);
1020 if (rpc_task_mempool) 1007 if (rpc_task_mempool)
@@ -1048,6 +1035,8 @@ rpc_init_mempool(void)
1048 rpc_buffer_slabp); 1035 rpc_buffer_slabp);
1049 if (!rpc_buffer_mempool) 1036 if (!rpc_buffer_mempool)
1050 goto err_nomem; 1037 goto err_nomem;
1038 if (!rpciod_start())
1039 goto err_nomem;
1051 return 0; 1040 return 0;
1052err_nomem: 1041err_nomem:
1053 rpc_destroy_mempool(); 1042 rpc_destroy_mempool();