diff options
author | Jeff Layton <jlayton@poochiereds.net> | 2015-08-24 12:41:48 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2015-08-31 16:32:16 -0400 |
commit | 3fcbbd244ed1d20dc0eb7d48d729503992fa9b7d (patch) | |
tree | ac901181a7228f5534a4b013c522cc2064beba8d /fs/nfsd | |
parent | e85687393f3ee0a77ccca016f903d1558bb69258 (diff) |
nfsd: ensure that delegation stateid hash references are only put once
It's possible that a DELEGRETURN could race with (e.g.) client expiry,
in which case we could end up putting the delegation hash reference more
than once.
Have unhash_delegation_locked return a bool that indicates whether it
was already unhashed. In the case of destroy_delegation we only
conditionally put the hash reference if that returns true.
The other callers of unhash_delegation_locked call it while walking
list_heads that shouldn't yet be detached. If we find that it doesn't
return true in those cases, then throw a WARN_ON as that indicates that
we have a partially hashed delegation, and that something is likely very
wrong.
Tested-by: Andrew W Elble <aweits@rit.edu>
Tested-by: Anna Schumaker <Anna.Schumaker@netapp.com>
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Cc: stable@vger.kernel.org
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd')
-rw-r--r-- | fs/nfsd/nfs4state.c | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f318e706cb35..416f32e34a33 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) | |||
777 | list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); | 777 | list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); |
778 | } | 778 | } |
779 | 779 | ||
780 | static void | 780 | static bool |
781 | unhash_delegation_locked(struct nfs4_delegation *dp) | 781 | unhash_delegation_locked(struct nfs4_delegation *dp) |
782 | { | 782 | { |
783 | struct nfs4_file *fp = dp->dl_stid.sc_file; | 783 | struct nfs4_file *fp = dp->dl_stid.sc_file; |
784 | 784 | ||
785 | lockdep_assert_held(&state_lock); | 785 | lockdep_assert_held(&state_lock); |
786 | 786 | ||
787 | if (list_empty(&dp->dl_perfile)) | ||
788 | return false; | ||
789 | |||
787 | dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; | 790 | dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; |
788 | /* Ensure that deleg break won't try to requeue it */ | 791 | /* Ensure that deleg break won't try to requeue it */ |
789 | ++dp->dl_time; | 792 | ++dp->dl_time; |
@@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp) | |||
792 | list_del_init(&dp->dl_recall_lru); | 795 | list_del_init(&dp->dl_recall_lru); |
793 | list_del_init(&dp->dl_perfile); | 796 | list_del_init(&dp->dl_perfile); |
794 | spin_unlock(&fp->fi_lock); | 797 | spin_unlock(&fp->fi_lock); |
798 | return true; | ||
795 | } | 799 | } |
796 | 800 | ||
797 | static void destroy_delegation(struct nfs4_delegation *dp) | 801 | static void destroy_delegation(struct nfs4_delegation *dp) |
798 | { | 802 | { |
803 | bool unhashed; | ||
804 | |||
799 | spin_lock(&state_lock); | 805 | spin_lock(&state_lock); |
800 | unhash_delegation_locked(dp); | 806 | unhashed = unhash_delegation_locked(dp); |
801 | spin_unlock(&state_lock); | 807 | spin_unlock(&state_lock); |
802 | put_clnt_odstate(dp->dl_clnt_odstate); | 808 | if (unhashed) { |
803 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); | 809 | put_clnt_odstate(dp->dl_clnt_odstate); |
804 | nfs4_put_stid(&dp->dl_stid); | 810 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); |
811 | nfs4_put_stid(&dp->dl_stid); | ||
812 | } | ||
805 | } | 813 | } |
806 | 814 | ||
807 | static void revoke_delegation(struct nfs4_delegation *dp) | 815 | static void revoke_delegation(struct nfs4_delegation *dp) |
@@ -1730,7 +1738,7 @@ __destroy_client(struct nfs4_client *clp) | |||
1730 | spin_lock(&state_lock); | 1738 | spin_lock(&state_lock); |
1731 | while (!list_empty(&clp->cl_delegations)) { | 1739 | while (!list_empty(&clp->cl_delegations)) { |
1732 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); | 1740 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); |
1733 | unhash_delegation_locked(dp); | 1741 | WARN_ON(!unhash_delegation_locked(dp)); |
1734 | list_add(&dp->dl_recall_lru, &reaplist); | 1742 | list_add(&dp->dl_recall_lru, &reaplist); |
1735 | } | 1743 | } |
1736 | spin_unlock(&state_lock); | 1744 | spin_unlock(&state_lock); |
@@ -4357,7 +4365,7 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
4357 | new_timeo = min(new_timeo, t); | 4365 | new_timeo = min(new_timeo, t); |
4358 | break; | 4366 | break; |
4359 | } | 4367 | } |
4360 | unhash_delegation_locked(dp); | 4368 | WARN_ON(!unhash_delegation_locked(dp)); |
4361 | list_add(&dp->dl_recall_lru, &reaplist); | 4369 | list_add(&dp->dl_recall_lru, &reaplist); |
4362 | } | 4370 | } |
4363 | spin_unlock(&state_lock); | 4371 | spin_unlock(&state_lock); |
@@ -6314,7 +6322,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, | |||
6314 | continue; | 6322 | continue; |
6315 | 6323 | ||
6316 | atomic_inc(&clp->cl_refcount); | 6324 | atomic_inc(&clp->cl_refcount); |
6317 | unhash_delegation_locked(dp); | 6325 | WARN_ON(!unhash_delegation_locked(dp)); |
6318 | list_add(&dp->dl_recall_lru, victims); | 6326 | list_add(&dp->dl_recall_lru, victims); |
6319 | } | 6327 | } |
6320 | ++count; | 6328 | ++count; |
@@ -6645,7 +6653,7 @@ nfs4_state_shutdown_net(struct net *net) | |||
6645 | spin_lock(&state_lock); | 6653 | spin_lock(&state_lock); |
6646 | list_for_each_safe(pos, next, &nn->del_recall_lru) { | 6654 | list_for_each_safe(pos, next, &nn->del_recall_lru) { |
6647 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 6655 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
6648 | unhash_delegation_locked(dp); | 6656 | WARN_ON(!unhash_delegation_locked(dp)); |
6649 | list_add(&dp->dl_recall_lru, &reaplist); | 6657 | list_add(&dp->dl_recall_lru, &reaplist); |
6650 | } | 6658 | } |
6651 | spin_unlock(&state_lock); | 6659 | spin_unlock(&state_lock); |