diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2010-05-13 12:51:01 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2010-05-14 15:09:33 -0400 |
commit | 8535b2be5181fc3019e4150567ef53210fe3b04f (patch) | |
tree | ba366f5304f12876f1e45d3c2b423d12f9ec0c90 /fs/nfs/nfs4state.c | |
parent | 712a4338669d7d57f952244abb608e6ac07e39da (diff) |
NFSv4: Don't use GFP_KERNEL allocations in state recovery
We do not want to have the state recovery thread kick off and wait for a
memory reclaim, since that may deadlock when the writebacks end up
waiting for the state recovery thread to complete.
The safe thing is therefore to use GFP_NOFS in all open, close,
delegation return, lock, etc. operations that may be called by the
state recovery thread.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r-- | fs/nfs/nfs4state.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index cd2d90400d46..34acf5926fdc 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -366,7 +366,7 @@ nfs4_alloc_state_owner(void) | |||
366 | { | 366 | { |
367 | struct nfs4_state_owner *sp; | 367 | struct nfs4_state_owner *sp; |
368 | 368 | ||
369 | sp = kzalloc(sizeof(*sp),GFP_KERNEL); | 369 | sp = kzalloc(sizeof(*sp),GFP_NOFS); |
370 | if (!sp) | 370 | if (!sp) |
371 | return NULL; | 371 | return NULL; |
372 | spin_lock_init(&sp->so_lock); | 372 | spin_lock_init(&sp->so_lock); |
@@ -440,7 +440,7 @@ nfs4_alloc_open_state(void) | |||
440 | { | 440 | { |
441 | struct nfs4_state *state; | 441 | struct nfs4_state *state; |
442 | 442 | ||
443 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 443 | state = kzalloc(sizeof(*state), GFP_NOFS); |
444 | if (!state) | 444 | if (!state) |
445 | return NULL; | 445 | return NULL; |
446 | atomic_set(&state->count, 1); | 446 | atomic_set(&state->count, 1); |
@@ -542,7 +542,8 @@ void nfs4_put_open_state(struct nfs4_state *state) | |||
542 | /* | 542 | /* |
543 | * Close the current file. | 543 | * Close the current file. |
544 | */ | 544 | */ |
545 | static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait) | 545 | static void __nfs4_close(struct path *path, struct nfs4_state *state, |
546 | fmode_t fmode, gfp_t gfp_mask, int wait) | ||
546 | { | 547 | { |
547 | struct nfs4_state_owner *owner = state->owner; | 548 | struct nfs4_state_owner *owner = state->owner; |
548 | int call_close = 0; | 549 | int call_close = 0; |
@@ -583,17 +584,17 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fm | |||
583 | nfs4_put_open_state(state); | 584 | nfs4_put_open_state(state); |
584 | nfs4_put_state_owner(owner); | 585 | nfs4_put_state_owner(owner); |
585 | } else | 586 | } else |
586 | nfs4_do_close(path, state, wait); | 587 | nfs4_do_close(path, state, gfp_mask, wait); |
587 | } | 588 | } |
588 | 589 | ||
589 | void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) | 590 | void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) |
590 | { | 591 | { |
591 | __nfs4_close(path, state, fmode, 0); | 592 | __nfs4_close(path, state, fmode, GFP_NOFS, 0); |
592 | } | 593 | } |
593 | 594 | ||
594 | void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) | 595 | void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) |
595 | { | 596 | { |
596 | __nfs4_close(path, state, fmode, 1); | 597 | __nfs4_close(path, state, fmode, GFP_KERNEL, 1); |
597 | } | 598 | } |
598 | 599 | ||
599 | /* | 600 | /* |
@@ -623,7 +624,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
623 | struct nfs4_lock_state *lsp; | 624 | struct nfs4_lock_state *lsp; |
624 | struct nfs_client *clp = state->owner->so_client; | 625 | struct nfs_client *clp = state->owner->so_client; |
625 | 626 | ||
626 | lsp = kzalloc(sizeof(*lsp), GFP_KERNEL); | 627 | lsp = kzalloc(sizeof(*lsp), GFP_NOFS); |
627 | if (lsp == NULL) | 628 | if (lsp == NULL) |
628 | return NULL; | 629 | return NULL; |
629 | rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); | 630 | rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); |
@@ -759,11 +760,11 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f | |||
759 | nfs4_put_lock_state(lsp); | 760 | nfs4_put_lock_state(lsp); |
760 | } | 761 | } |
761 | 762 | ||
762 | struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) | 763 | struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) |
763 | { | 764 | { |
764 | struct nfs_seqid *new; | 765 | struct nfs_seqid *new; |
765 | 766 | ||
766 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 767 | new = kmalloc(sizeof(*new), gfp_mask); |
767 | if (new != NULL) { | 768 | if (new != NULL) { |
768 | new->sequence = counter; | 769 | new->sequence = counter; |
769 | INIT_LIST_HEAD(&new->list); | 770 | INIT_LIST_HEAD(&new->list); |
@@ -1352,7 +1353,7 @@ static int nfs4_recall_slot(struct nfs_client *clp) | |||
1352 | 1353 | ||
1353 | nfs4_begin_drain_session(clp); | 1354 | nfs4_begin_drain_session(clp); |
1354 | new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), | 1355 | new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), |
1355 | GFP_KERNEL); | 1356 | GFP_NOFS); |
1356 | if (!new) | 1357 | if (!new) |
1357 | return -ENOMEM; | 1358 | return -ENOMEM; |
1358 | 1359 | ||