diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-09 11:38:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-09 11:38:14 -0400 |
commit | 8b80fc02b829a59602b0f53eb9393ffb2db2659d (patch) | |
tree | 9cce02e07ed1b30d6f6bd236a2d6015e5681eeb9 | |
parent | 6a0ed91e361a93ee1efb4c20c4967024ed2a8dd7 (diff) | |
parent | 4011cd97886dd04b90fef8b671b9936cd39ab983 (diff) |
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6:
SUNRPC: Replace flush_workqueue() with cancel_work_sync() and friends
NFS: Replace flush_scheduled_work with cancel_work_sync() and friends
SUNRPC: Don't call gss_delete_sec_context() from an rcu context
NFSv4: Don't call put_rpccred() from an rcu callback
NFS: Fix NFSv4 open stateid regressions
NFSv4: Fix a locking regression in nfs4_set_mode_locked()
NFS: Fix put_nfs_open_context
SUNRPC: Fix a race in rpciod_down()
-rw-r--r-- | fs/nfs/delegation.c | 21 | ||||
-rw-r--r-- | fs/nfs/inode.c | 24 | ||||
-rw-r--r-- | fs/nfs/namespace.c | 6 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 16 | ||||
-rw-r--r-- | fs/nfs/nfs4renewd.c | 5 | ||||
-rw-r--r-- | fs/nfs/nfs4state.c | 5 | ||||
-rw-r--r-- | include/linux/nfs_fs.h | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 9 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 3 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 3 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 57 |
11 files changed, 67 insertions, 84 deletions
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 20ac403469a0..c55a761c22bb 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -20,10 +20,8 @@ | |||
20 | #include "delegation.h" | 20 | #include "delegation.h" |
21 | #include "internal.h" | 21 | #include "internal.h" |
22 | 22 | ||
23 | static void nfs_free_delegation(struct nfs_delegation *delegation) | 23 | static void nfs_do_free_delegation(struct nfs_delegation *delegation) |
24 | { | 24 | { |
25 | if (delegation->cred) | ||
26 | put_rpccred(delegation->cred); | ||
27 | kfree(delegation); | 25 | kfree(delegation); |
28 | } | 26 | } |
29 | 27 | ||
@@ -31,7 +29,18 @@ static void nfs_free_delegation_callback(struct rcu_head *head) | |||
31 | { | 29 | { |
32 | struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu); | 30 | struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu); |
33 | 31 | ||
34 | nfs_free_delegation(delegation); | 32 | nfs_do_free_delegation(delegation); |
33 | } | ||
34 | |||
35 | static void nfs_free_delegation(struct nfs_delegation *delegation) | ||
36 | { | ||
37 | struct rpc_cred *cred; | ||
38 | |||
39 | cred = rcu_dereference(delegation->cred); | ||
40 | rcu_assign_pointer(delegation->cred, NULL); | ||
41 | call_rcu(&delegation->rcu, nfs_free_delegation_callback); | ||
42 | if (cred) | ||
43 | put_rpccred(cred); | ||
35 | } | 44 | } |
36 | 45 | ||
37 | static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) | 46 | static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) |
@@ -166,7 +175,7 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation * | |||
166 | int res = 0; | 175 | int res = 0; |
167 | 176 | ||
168 | res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid); | 177 | res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid); |
169 | call_rcu(&delegation->rcu, nfs_free_delegation_callback); | 178 | nfs_free_delegation(delegation); |
170 | return res; | 179 | return res; |
171 | } | 180 | } |
172 | 181 | ||
@@ -448,7 +457,7 @@ restart: | |||
448 | spin_unlock(&clp->cl_lock); | 457 | spin_unlock(&clp->cl_lock); |
449 | rcu_read_unlock(); | 458 | rcu_read_unlock(); |
450 | if (delegation != NULL) | 459 | if (delegation != NULL) |
451 | call_rcu(&delegation->rcu, nfs_free_delegation_callback); | 460 | nfs_free_delegation(delegation); |
452 | goto restart; | 461 | goto restart; |
453 | } | 462 | } |
454 | rcu_read_unlock(); | 463 | rcu_read_unlock(); |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index bca6cdcb9f0d..71a49c3acabd 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -468,7 +468,7 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str | |||
468 | ctx->lockowner = current->files; | 468 | ctx->lockowner = current->files; |
469 | ctx->error = 0; | 469 | ctx->error = 0; |
470 | ctx->dir_cookie = 0; | 470 | ctx->dir_cookie = 0; |
471 | kref_init(&ctx->kref); | 471 | atomic_set(&ctx->count, 1); |
472 | } | 472 | } |
473 | return ctx; | 473 | return ctx; |
474 | } | 474 | } |
@@ -476,21 +476,18 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str | |||
476 | struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) | 476 | struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) |
477 | { | 477 | { |
478 | if (ctx != NULL) | 478 | if (ctx != NULL) |
479 | kref_get(&ctx->kref); | 479 | atomic_inc(&ctx->count); |
480 | return ctx; | 480 | return ctx; |
481 | } | 481 | } |
482 | 482 | ||
483 | static void nfs_free_open_context(struct kref *kref) | 483 | void put_nfs_open_context(struct nfs_open_context *ctx) |
484 | { | 484 | { |
485 | struct nfs_open_context *ctx = container_of(kref, | 485 | struct inode *inode = ctx->path.dentry->d_inode; |
486 | struct nfs_open_context, kref); | ||
487 | 486 | ||
488 | if (!list_empty(&ctx->list)) { | 487 | if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock)) |
489 | struct inode *inode = ctx->path.dentry->d_inode; | 488 | return; |
490 | spin_lock(&inode->i_lock); | 489 | list_del(&ctx->list); |
491 | list_del(&ctx->list); | 490 | spin_unlock(&inode->i_lock); |
492 | spin_unlock(&inode->i_lock); | ||
493 | } | ||
494 | if (ctx->state != NULL) | 491 | if (ctx->state != NULL) |
495 | nfs4_close_state(&ctx->path, ctx->state, ctx->mode); | 492 | nfs4_close_state(&ctx->path, ctx->state, ctx->mode); |
496 | if (ctx->cred != NULL) | 493 | if (ctx->cred != NULL) |
@@ -500,11 +497,6 @@ static void nfs_free_open_context(struct kref *kref) | |||
500 | kfree(ctx); | 497 | kfree(ctx); |
501 | } | 498 | } |
502 | 499 | ||
503 | void put_nfs_open_context(struct nfs_open_context *ctx) | ||
504 | { | ||
505 | kref_put(&ctx->kref, nfs_free_open_context); | ||
506 | } | ||
507 | |||
508 | /* | 500 | /* |
509 | * Ensure that mmap has a recent RPC credential for use when writing out | 501 | * Ensure that mmap has a recent RPC credential for use when writing out |
510 | * shared pages | 502 | * shared pages |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 7f86e65182e4..aea76d0e5fbd 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -175,10 +175,8 @@ static void nfs_expire_automounts(struct work_struct *work) | |||
175 | 175 | ||
176 | void nfs_release_automount_timer(void) | 176 | void nfs_release_automount_timer(void) |
177 | { | 177 | { |
178 | if (list_empty(&nfs_automount_list)) { | 178 | if (list_empty(&nfs_automount_list)) |
179 | cancel_delayed_work(&nfs_automount_task); | 179 | cancel_delayed_work_sync(&nfs_automount_task); |
180 | flush_scheduled_work(); | ||
181 | } | ||
182 | } | 180 | } |
183 | 181 | ||
184 | /* | 182 | /* |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6ca2795ccd9c..62b3ae280310 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -332,11 +332,9 @@ static int can_open_cached(struct nfs4_state *state, int mode) | |||
332 | switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) { | 332 | switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) { |
333 | case FMODE_READ: | 333 | case FMODE_READ: |
334 | ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0; | 334 | ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0; |
335 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; | ||
336 | break; | 335 | break; |
337 | case FMODE_WRITE: | 336 | case FMODE_WRITE: |
338 | ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0; | 337 | ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0; |
339 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; | ||
340 | break; | 338 | break; |
341 | case FMODE_READ|FMODE_WRITE: | 339 | case FMODE_READ|FMODE_WRITE: |
342 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; | 340 | ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; |
@@ -1260,7 +1258,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) | |||
1260 | nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid); | 1258 | nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid); |
1261 | switch (task->tk_status) { | 1259 | switch (task->tk_status) { |
1262 | case 0: | 1260 | case 0: |
1263 | nfs_set_open_stateid(state, &calldata->res.stateid, calldata->arg.open_flags); | 1261 | nfs_set_open_stateid(state, &calldata->res.stateid, 0); |
1264 | renew_lease(server, calldata->timestamp); | 1262 | renew_lease(server, calldata->timestamp); |
1265 | break; | 1263 | break; |
1266 | case -NFS4ERR_STALE_STATEID: | 1264 | case -NFS4ERR_STALE_STATEID: |
@@ -1286,23 +1284,19 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
1286 | .rpc_cred = state->owner->so_cred, | 1284 | .rpc_cred = state->owner->so_cred, |
1287 | }; | 1285 | }; |
1288 | int clear_rd, clear_wr, clear_rdwr; | 1286 | int clear_rd, clear_wr, clear_rdwr; |
1289 | int mode; | ||
1290 | 1287 | ||
1291 | if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) | 1288 | if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) |
1292 | return; | 1289 | return; |
1293 | 1290 | ||
1294 | mode = FMODE_READ|FMODE_WRITE; | ||
1295 | clear_rd = clear_wr = clear_rdwr = 0; | 1291 | clear_rd = clear_wr = clear_rdwr = 0; |
1296 | spin_lock(&state->owner->so_lock); | 1292 | spin_lock(&state->owner->so_lock); |
1297 | /* Calculate the change in open mode */ | 1293 | /* Calculate the change in open mode */ |
1298 | if (state->n_rdwr == 0) { | 1294 | if (state->n_rdwr == 0) { |
1299 | if (state->n_rdonly == 0) { | 1295 | if (state->n_rdonly == 0) { |
1300 | mode &= ~FMODE_READ; | ||
1301 | clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags); | 1296 | clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags); |
1302 | clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); | 1297 | clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); |
1303 | } | 1298 | } |
1304 | if (state->n_wronly == 0) { | 1299 | if (state->n_wronly == 0) { |
1305 | mode &= ~FMODE_WRITE; | ||
1306 | clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags); | 1300 | clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags); |
1307 | clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); | 1301 | clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); |
1308 | } | 1302 | } |
@@ -1314,9 +1308,13 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
1314 | return; | 1308 | return; |
1315 | } | 1309 | } |
1316 | nfs_fattr_init(calldata->res.fattr); | 1310 | nfs_fattr_init(calldata->res.fattr); |
1317 | if (mode != 0) | 1311 | if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) { |
1318 | msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; | 1312 | msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; |
1319 | calldata->arg.open_flags = mode; | 1313 | calldata->arg.open_flags = FMODE_READ; |
1314 | } else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) { | ||
1315 | msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; | ||
1316 | calldata->arg.open_flags = FMODE_WRITE; | ||
1317 | } | ||
1320 | calldata->timestamp = jiffies; | 1318 | calldata->timestamp = jiffies; |
1321 | rpc_call_setup(task, &msg, 0); | 1319 | rpc_call_setup(task, &msg, 0); |
1322 | } | 1320 | } |
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 0505ca124034..3ea352d82eba 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c | |||
@@ -127,16 +127,15 @@ nfs4_schedule_state_renewal(struct nfs_client *clp) | |||
127 | void | 127 | void |
128 | nfs4_renewd_prepare_shutdown(struct nfs_server *server) | 128 | nfs4_renewd_prepare_shutdown(struct nfs_server *server) |
129 | { | 129 | { |
130 | flush_scheduled_work(); | 130 | cancel_delayed_work(&server->nfs_client->cl_renewd); |
131 | } | 131 | } |
132 | 132 | ||
133 | void | 133 | void |
134 | nfs4_kill_renewd(struct nfs_client *clp) | 134 | nfs4_kill_renewd(struct nfs_client *clp) |
135 | { | 135 | { |
136 | down_read(&clp->cl_sem); | 136 | down_read(&clp->cl_sem); |
137 | cancel_delayed_work(&clp->cl_renewd); | 137 | cancel_delayed_work_sync(&clp->cl_renewd); |
138 | up_read(&clp->cl_sem); | 138 | up_read(&clp->cl_sem); |
139 | flush_scheduled_work(); | ||
140 | } | 139 | } |
141 | 140 | ||
142 | /* | 141 | /* |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e9662ba81d86..3e4adf8c8312 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -341,8 +341,6 @@ nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode) | |||
341 | else | 341 | else |
342 | list_move_tail(&state->open_states, &state->owner->so_states); | 342 | list_move_tail(&state->open_states, &state->owner->so_states); |
343 | } | 343 | } |
344 | if (mode == 0) | ||
345 | list_del_init(&state->inode_states); | ||
346 | state->state = mode; | 344 | state->state = mode; |
347 | } | 345 | } |
348 | 346 | ||
@@ -415,8 +413,7 @@ void nfs4_put_open_state(struct nfs4_state *state) | |||
415 | if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) | 413 | if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) |
416 | return; | 414 | return; |
417 | spin_lock(&inode->i_lock); | 415 | spin_lock(&inode->i_lock); |
418 | if (!list_empty(&state->inode_states)) | 416 | list_del(&state->inode_states); |
419 | list_del(&state->inode_states); | ||
420 | list_del(&state->open_states); | 417 | list_del(&state->open_states); |
421 | spin_unlock(&inode->i_lock); | 418 | spin_unlock(&inode->i_lock); |
422 | spin_unlock(&owner->so_lock); | 419 | spin_unlock(&owner->so_lock); |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 9ba4aec37c50..157dcb055b5c 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -71,7 +71,7 @@ struct nfs_access_entry { | |||
71 | 71 | ||
72 | struct nfs4_state; | 72 | struct nfs4_state; |
73 | struct nfs_open_context { | 73 | struct nfs_open_context { |
74 | struct kref kref; | 74 | atomic_t count; |
75 | struct path path; | 75 | struct path path; |
76 | struct rpc_cred *cred; | 76 | struct rpc_cred *cred; |
77 | struct nfs4_state *state; | 77 | struct nfs4_state *state; |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 4bbc59cc237c..53995af9ca4b 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -736,9 +736,6 @@ gss_do_free_ctx(struct gss_cl_ctx *ctx) | |||
736 | { | 736 | { |
737 | dprintk("RPC: gss_free_ctx\n"); | 737 | dprintk("RPC: gss_free_ctx\n"); |
738 | 738 | ||
739 | if (ctx->gc_gss_ctx) | ||
740 | gss_delete_sec_context(&ctx->gc_gss_ctx); | ||
741 | |||
742 | kfree(ctx->gc_wire_ctx.data); | 739 | kfree(ctx->gc_wire_ctx.data); |
743 | kfree(ctx); | 740 | kfree(ctx); |
744 | } | 741 | } |
@@ -753,7 +750,13 @@ gss_free_ctx_callback(struct rcu_head *head) | |||
753 | static void | 750 | static void |
754 | gss_free_ctx(struct gss_cl_ctx *ctx) | 751 | gss_free_ctx(struct gss_cl_ctx *ctx) |
755 | { | 752 | { |
753 | struct gss_ctx *gc_gss_ctx; | ||
754 | |||
755 | gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx); | ||
756 | rcu_assign_pointer(ctx->gc_gss_ctx, NULL); | ||
756 | call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); | 757 | call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); |
758 | if (gc_gss_ctx) | ||
759 | gss_delete_sec_context(&gc_gss_ctx); | ||
757 | } | 760 | } |
758 | 761 | ||
759 | static void | 762 | static void |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 01c3c4105204..ebe344f34d1a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -371,8 +371,7 @@ int cache_unregister(struct cache_detail *cd) | |||
371 | } | 371 | } |
372 | if (list_empty(&cache_list)) { | 372 | if (list_empty(&cache_list)) { |
373 | /* module must be being unloaded so its safe to kill the worker */ | 373 | /* module must be being unloaded so its safe to kill the worker */ |
374 | cancel_delayed_work(&cache_cleaner); | 374 | cancel_delayed_work_sync(&cache_cleaner); |
375 | flush_scheduled_work(); | ||
376 | } | 375 | } |
377 | return 0; | 376 | return 0; |
378 | } | 377 | } |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 650af064ff8d..669e12a4ed18 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -132,8 +132,7 @@ rpc_close_pipes(struct inode *inode) | |||
132 | rpci->nwriters = 0; | 132 | rpci->nwriters = 0; |
133 | if (ops->release_pipe) | 133 | if (ops->release_pipe) |
134 | ops->release_pipe(inode); | 134 | ops->release_pipe(inode); |
135 | cancel_delayed_work(&rpci->queue_timeout); | 135 | cancel_delayed_work_sync(&rpci->queue_timeout); |
136 | flush_workqueue(rpciod_workqueue); | ||
137 | } | 136 | } |
138 | rpc_inode_setowner(inode, NULL); | 137 | rpc_inode_setowner(inode, NULL); |
139 | mutex_unlock(&inode->i_mutex); | 138 | mutex_unlock(&inode->i_mutex); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index b5723c262a3e..954d7ec86c7e 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -50,8 +50,6 @@ static RPC_WAITQ(delay_queue, "delayq"); | |||
50 | /* | 50 | /* |
51 | * rpciod-related stuff | 51 | * rpciod-related stuff |
52 | */ | 52 | */ |
53 | static DEFINE_MUTEX(rpciod_mutex); | ||
54 | static atomic_t rpciod_users = ATOMIC_INIT(0); | ||
55 | struct workqueue_struct *rpciod_workqueue; | 53 | struct workqueue_struct *rpciod_workqueue; |
56 | 54 | ||
57 | /* | 55 | /* |
@@ -961,60 +959,49 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) | |||
961 | spin_unlock(&clnt->cl_lock); | 959 | spin_unlock(&clnt->cl_lock); |
962 | } | 960 | } |
963 | 961 | ||
962 | int rpciod_up(void) | ||
963 | { | ||
964 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | ||
965 | } | ||
966 | |||
967 | void rpciod_down(void) | ||
968 | { | ||
969 | module_put(THIS_MODULE); | ||
970 | } | ||
971 | |||
964 | /* | 972 | /* |
965 | * Start up the rpciod process if it's not already running. | 973 | * Start up the rpciod workqueue. |
966 | */ | 974 | */ |
967 | int | 975 | static int rpciod_start(void) |
968 | rpciod_up(void) | ||
969 | { | 976 | { |
970 | struct workqueue_struct *wq; | 977 | struct workqueue_struct *wq; |
971 | int error = 0; | ||
972 | |||
973 | if (atomic_inc_not_zero(&rpciod_users)) | ||
974 | return 0; | ||
975 | |||
976 | mutex_lock(&rpciod_mutex); | ||
977 | 978 | ||
978 | /* Guard against races with rpciod_down() */ | ||
979 | if (rpciod_workqueue != NULL) | ||
980 | goto out_ok; | ||
981 | /* | 979 | /* |
982 | * Create the rpciod thread and wait for it to start. | 980 | * Create the rpciod thread and wait for it to start. |
983 | */ | 981 | */ |
984 | dprintk("RPC: creating workqueue rpciod\n"); | 982 | dprintk("RPC: creating workqueue rpciod\n"); |
985 | error = -ENOMEM; | ||
986 | wq = create_workqueue("rpciod"); | 983 | wq = create_workqueue("rpciod"); |
987 | if (wq == NULL) | ||
988 | goto out; | ||
989 | |||
990 | rpciod_workqueue = wq; | 984 | rpciod_workqueue = wq; |
991 | error = 0; | 985 | return rpciod_workqueue != NULL; |
992 | out_ok: | ||
993 | atomic_inc(&rpciod_users); | ||
994 | out: | ||
995 | mutex_unlock(&rpciod_mutex); | ||
996 | return error; | ||
997 | } | 986 | } |
998 | 987 | ||
999 | void | 988 | static void rpciod_stop(void) |
1000 | rpciod_down(void) | ||
1001 | { | 989 | { |
1002 | if (!atomic_dec_and_test(&rpciod_users)) | 990 | struct workqueue_struct *wq = NULL; |
1003 | return; | ||
1004 | 991 | ||
1005 | mutex_lock(&rpciod_mutex); | 992 | if (rpciod_workqueue == NULL) |
993 | return; | ||
1006 | dprintk("RPC: destroying workqueue rpciod\n"); | 994 | dprintk("RPC: destroying workqueue rpciod\n"); |
1007 | 995 | ||
1008 | if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) { | 996 | wq = rpciod_workqueue; |
1009 | destroy_workqueue(rpciod_workqueue); | 997 | rpciod_workqueue = NULL; |
1010 | rpciod_workqueue = NULL; | 998 | destroy_workqueue(wq); |
1011 | } | ||
1012 | mutex_unlock(&rpciod_mutex); | ||
1013 | } | 999 | } |
1014 | 1000 | ||
1015 | void | 1001 | void |
1016 | rpc_destroy_mempool(void) | 1002 | rpc_destroy_mempool(void) |
1017 | { | 1003 | { |
1004 | rpciod_stop(); | ||
1018 | if (rpc_buffer_mempool) | 1005 | if (rpc_buffer_mempool) |
1019 | mempool_destroy(rpc_buffer_mempool); | 1006 | mempool_destroy(rpc_buffer_mempool); |
1020 | if (rpc_task_mempool) | 1007 | if (rpc_task_mempool) |
@@ -1048,6 +1035,8 @@ rpc_init_mempool(void) | |||
1048 | rpc_buffer_slabp); | 1035 | rpc_buffer_slabp); |
1049 | if (!rpc_buffer_mempool) | 1036 | if (!rpc_buffer_mempool) |
1050 | goto err_nomem; | 1037 | goto err_nomem; |
1038 | if (!rpciod_start()) | ||
1039 | goto err_nomem; | ||
1051 | return 0; | 1040 | return 0; |
1052 | err_nomem: | 1041 | err_nomem: |
1053 | rpc_destroy_mempool(); | 1042 | rpc_destroy_mempool(); |