diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-09 11:38:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-09 11:38:14 -0400 |
commit | 8b80fc02b829a59602b0f53eb9393ffb2db2659d (patch) | |
tree | 9cce02e07ed1b30d6f6bd236a2d6015e5681eeb9 /net | |
parent | 6a0ed91e361a93ee1efb4c20c4967024ed2a8dd7 (diff) | |
parent | 4011cd97886dd04b90fef8b671b9936cd39ab983 (diff) |
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6:
SUNRPC: Replace flush_workqueue() with cancel_work_sync() and friends
NFS: Replace flush_scheduled_work with cancel_work_sync() and friends
SUNRPC: Don't call gss_delete_sec_context() from an rcu context
NFSv4: Don't call put_rpccred() from an rcu callback
NFS: Fix NFSv4 open stateid regressions
NFSv4: Fix a locking regression in nfs4_set_mode_locked()
NFS: Fix put_nfs_open_context
SUNRPC: Fix a race in rpciod_down()
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 9 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 3 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 3 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 57 |
4 files changed, 31 insertions, 41 deletions
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 4bbc59cc237c..53995af9ca4b 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -736,9 +736,6 @@ gss_do_free_ctx(struct gss_cl_ctx *ctx) | |||
736 | { | 736 | { |
737 | dprintk("RPC: gss_free_ctx\n"); | 737 | dprintk("RPC: gss_free_ctx\n"); |
738 | 738 | ||
739 | if (ctx->gc_gss_ctx) | ||
740 | gss_delete_sec_context(&ctx->gc_gss_ctx); | ||
741 | |||
742 | kfree(ctx->gc_wire_ctx.data); | 739 | kfree(ctx->gc_wire_ctx.data); |
743 | kfree(ctx); | 740 | kfree(ctx); |
744 | } | 741 | } |
@@ -753,7 +750,13 @@ gss_free_ctx_callback(struct rcu_head *head) | |||
753 | static void | 750 | static void |
754 | gss_free_ctx(struct gss_cl_ctx *ctx) | 751 | gss_free_ctx(struct gss_cl_ctx *ctx) |
755 | { | 752 | { |
753 | struct gss_ctx *gc_gss_ctx; | ||
754 | |||
755 | gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx); | ||
756 | rcu_assign_pointer(ctx->gc_gss_ctx, NULL); | ||
756 | call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); | 757 | call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); |
758 | if (gc_gss_ctx) | ||
759 | gss_delete_sec_context(&gc_gss_ctx); | ||
757 | } | 760 | } |
758 | 761 | ||
759 | static void | 762 | static void |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 01c3c4105204..ebe344f34d1a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -371,8 +371,7 @@ int cache_unregister(struct cache_detail *cd) | |||
371 | } | 371 | } |
372 | if (list_empty(&cache_list)) { | 372 | if (list_empty(&cache_list)) { |
373 | /* module must be being unloaded so its safe to kill the worker */ | 373 | /* module must be being unloaded so its safe to kill the worker */ |
374 | cancel_delayed_work(&cache_cleaner); | 374 | cancel_delayed_work_sync(&cache_cleaner); |
375 | flush_scheduled_work(); | ||
376 | } | 375 | } |
377 | return 0; | 376 | return 0; |
378 | } | 377 | } |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 650af064ff8d..669e12a4ed18 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -132,8 +132,7 @@ rpc_close_pipes(struct inode *inode) | |||
132 | rpci->nwriters = 0; | 132 | rpci->nwriters = 0; |
133 | if (ops->release_pipe) | 133 | if (ops->release_pipe) |
134 | ops->release_pipe(inode); | 134 | ops->release_pipe(inode); |
135 | cancel_delayed_work(&rpci->queue_timeout); | 135 | cancel_delayed_work_sync(&rpci->queue_timeout); |
136 | flush_workqueue(rpciod_workqueue); | ||
137 | } | 136 | } |
138 | rpc_inode_setowner(inode, NULL); | 137 | rpc_inode_setowner(inode, NULL); |
139 | mutex_unlock(&inode->i_mutex); | 138 | mutex_unlock(&inode->i_mutex); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index b5723c262a3e..954d7ec86c7e 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -50,8 +50,6 @@ static RPC_WAITQ(delay_queue, "delayq"); | |||
50 | /* | 50 | /* |
51 | * rpciod-related stuff | 51 | * rpciod-related stuff |
52 | */ | 52 | */ |
53 | static DEFINE_MUTEX(rpciod_mutex); | ||
54 | static atomic_t rpciod_users = ATOMIC_INIT(0); | ||
55 | struct workqueue_struct *rpciod_workqueue; | 53 | struct workqueue_struct *rpciod_workqueue; |
56 | 54 | ||
57 | /* | 55 | /* |
@@ -961,60 +959,49 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) | |||
961 | spin_unlock(&clnt->cl_lock); | 959 | spin_unlock(&clnt->cl_lock); |
962 | } | 960 | } |
963 | 961 | ||
962 | int rpciod_up(void) | ||
963 | { | ||
964 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | ||
965 | } | ||
966 | |||
967 | void rpciod_down(void) | ||
968 | { | ||
969 | module_put(THIS_MODULE); | ||
970 | } | ||
971 | |||
964 | /* | 972 | /* |
965 | * Start up the rpciod process if it's not already running. | 973 | * Start up the rpciod workqueue. |
966 | */ | 974 | */ |
967 | int | 975 | static int rpciod_start(void) |
968 | rpciod_up(void) | ||
969 | { | 976 | { |
970 | struct workqueue_struct *wq; | 977 | struct workqueue_struct *wq; |
971 | int error = 0; | ||
972 | |||
973 | if (atomic_inc_not_zero(&rpciod_users)) | ||
974 | return 0; | ||
975 | |||
976 | mutex_lock(&rpciod_mutex); | ||
977 | 978 | ||
978 | /* Guard against races with rpciod_down() */ | ||
979 | if (rpciod_workqueue != NULL) | ||
980 | goto out_ok; | ||
981 | /* | 979 | /* |
982 | * Create the rpciod thread and wait for it to start. | 980 | * Create the rpciod thread and wait for it to start. |
983 | */ | 981 | */ |
984 | dprintk("RPC: creating workqueue rpciod\n"); | 982 | dprintk("RPC: creating workqueue rpciod\n"); |
985 | error = -ENOMEM; | ||
986 | wq = create_workqueue("rpciod"); | 983 | wq = create_workqueue("rpciod"); |
987 | if (wq == NULL) | ||
988 | goto out; | ||
989 | |||
990 | rpciod_workqueue = wq; | 984 | rpciod_workqueue = wq; |
991 | error = 0; | 985 | return rpciod_workqueue != NULL; |
992 | out_ok: | ||
993 | atomic_inc(&rpciod_users); | ||
994 | out: | ||
995 | mutex_unlock(&rpciod_mutex); | ||
996 | return error; | ||
997 | } | 986 | } |
998 | 987 | ||
999 | void | 988 | static void rpciod_stop(void) |
1000 | rpciod_down(void) | ||
1001 | { | 989 | { |
1002 | if (!atomic_dec_and_test(&rpciod_users)) | 990 | struct workqueue_struct *wq = NULL; |
1003 | return; | ||
1004 | 991 | ||
1005 | mutex_lock(&rpciod_mutex); | 992 | if (rpciod_workqueue == NULL) |
993 | return; | ||
1006 | dprintk("RPC: destroying workqueue rpciod\n"); | 994 | dprintk("RPC: destroying workqueue rpciod\n"); |
1007 | 995 | ||
1008 | if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) { | 996 | wq = rpciod_workqueue; |
1009 | destroy_workqueue(rpciod_workqueue); | 997 | rpciod_workqueue = NULL; |
1010 | rpciod_workqueue = NULL; | 998 | destroy_workqueue(wq); |
1011 | } | ||
1012 | mutex_unlock(&rpciod_mutex); | ||
1013 | } | 999 | } |
1014 | 1000 | ||
1015 | void | 1001 | void |
1016 | rpc_destroy_mempool(void) | 1002 | rpc_destroy_mempool(void) |
1017 | { | 1003 | { |
1004 | rpciod_stop(); | ||
1018 | if (rpc_buffer_mempool) | 1005 | if (rpc_buffer_mempool) |
1019 | mempool_destroy(rpc_buffer_mempool); | 1006 | mempool_destroy(rpc_buffer_mempool); |
1020 | if (rpc_task_mempool) | 1007 | if (rpc_task_mempool) |
@@ -1048,6 +1035,8 @@ rpc_init_mempool(void) | |||
1048 | rpc_buffer_slabp); | 1035 | rpc_buffer_slabp); |
1049 | if (!rpc_buffer_mempool) | 1036 | if (!rpc_buffer_mempool) |
1050 | goto err_nomem; | 1037 | goto err_nomem; |
1038 | if (!rpciod_start()) | ||
1039 | goto err_nomem; | ||
1051 | return 0; | 1040 | return 0; |
1052 | err_nomem: | 1041 | err_nomem: |
1053 | rpc_destroy_mempool(); | 1042 | rpc_destroy_mempool(); |