aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfs4state.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2016-09-16 16:28:25 -0400
committerJ. Bruce Fields <bfields@redhat.com>2016-09-26 15:20:36 -0400
commit7919d0a27f1e7cb324e023776aa1cbff00f1ee7b (patch)
tree49ad90bf5a85f3c5ee3b0814be8902c3186614ec /fs/nfsd/nfs4state.c
parent76d348fadff52e8ad10e7f587a4560df79a5fefe (diff)
nfsd: add a LRU list for blocked locks
It's possible for a client to call in on a lock that is blocked for a long time, but discontinue polling for it. A malicious client could even set a lock on a file, and then spam the server with failing lock requests from different lockowners that pile up in a DoS attack. Add the blocked lock structures to a per-net namespace LRU when hashing them, and timestamp them. If the lock request is not revisited after a lease period, we'll drop it under the assumption that the client is no longer interested. This also gives us a mechanism to clean up these objects at server shutdown time as well. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r--fs/nfsd/nfs4state.c62
1 files changed, 62 insertions, 0 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index ca0db4974e5b..6c74d9a45163 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -221,6 +221,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
222 if (fh_match(fh, &cur->nbl_fh)) { 222 if (fh_match(fh, &cur->nbl_fh)) {
223 list_del_init(&cur->nbl_list); 223 list_del_init(&cur->nbl_list);
224 list_del_init(&cur->nbl_lru);
224 found = cur; 225 found = cur;
225 break; 226 break;
226 } 227 }
@@ -4580,6 +4581,7 @@ nfs4_laundromat(struct nfsd_net *nn)
4580 struct nfs4_openowner *oo; 4581 struct nfs4_openowner *oo;
4581 struct nfs4_delegation *dp; 4582 struct nfs4_delegation *dp;
4582 struct nfs4_ol_stateid *stp; 4583 struct nfs4_ol_stateid *stp;
4584 struct nfsd4_blocked_lock *nbl;
4583 struct list_head *pos, *next, reaplist; 4585 struct list_head *pos, *next, reaplist;
4584 time_t cutoff = get_seconds() - nn->nfsd4_lease; 4586 time_t cutoff = get_seconds() - nn->nfsd4_lease;
4585 time_t t, new_timeo = nn->nfsd4_lease; 4587 time_t t, new_timeo = nn->nfsd4_lease;
@@ -4648,6 +4650,41 @@ nfs4_laundromat(struct nfsd_net *nn)
4648 } 4650 }
4649 spin_unlock(&nn->client_lock); 4651 spin_unlock(&nn->client_lock);
4650 4652
4653 /*
4654 * It's possible for a client to try and acquire an already held lock
4655 * that is being held for a long time, and then lose interest in it.
4656 * So, we clean out any un-revisited request after a lease period
4657 * under the assumption that the client is no longer interested.
4658 *
4659 * RFC5661, sec. 9.6 states that the client must not rely on getting
4660 * notifications and must continue to poll for locks, even when the
4661 * server supports them. Thus this shouldn't lead to clients blocking
4662 * indefinitely once the lock does become free.
4663 */
4664 BUG_ON(!list_empty(&reaplist));
4665 spin_lock(&nn->client_lock);
4666 while (!list_empty(&nn->blocked_locks_lru)) {
4667 nbl = list_first_entry(&nn->blocked_locks_lru,
4668 struct nfsd4_blocked_lock, nbl_lru);
4669 if (time_after((unsigned long)nbl->nbl_time,
4670 (unsigned long)cutoff)) {
4671 t = nbl->nbl_time - cutoff;
4672 new_timeo = min(new_timeo, t);
4673 break;
4674 }
4675 list_move(&nbl->nbl_lru, &reaplist);
4676 list_del_init(&nbl->nbl_list);
4677 }
4678 spin_unlock(&nn->client_lock);
4679
4680 while (!list_empty(&reaplist)) {
4681 nbl = list_first_entry(&nn->blocked_locks_lru,
4682 struct nfsd4_blocked_lock, nbl_lru);
4683 list_del_init(&nbl->nbl_lru);
4684 posix_unblock_lock(&nbl->nbl_lock);
4685 free_blocked_lock(nbl);
4686 }
4687
4651 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 4688 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4652 return new_timeo; 4689 return new_timeo;
4653} 4690}
@@ -5398,9 +5435,11 @@ nfsd4_lm_notify(struct file_lock *fl)
5398 struct nfsd4_blocked_lock, nbl_lock); 5435 struct nfsd4_blocked_lock, nbl_lock);
5399 bool queue = false; 5436 bool queue = false;
5400 5437
5438 /* An empty list means that something else is going to be using it */
5401 spin_lock(&nn->client_lock); 5439 spin_lock(&nn->client_lock);
5402 if (!list_empty(&nbl->nbl_list)) { 5440 if (!list_empty(&nbl->nbl_list)) {
5403 list_del_init(&nbl->nbl_list); 5441 list_del_init(&nbl->nbl_list);
5442 list_del_init(&nbl->nbl_lru);
5404 queue = true; 5443 queue = true;
5405 } 5444 }
5406 spin_unlock(&nn->client_lock); 5445 spin_unlock(&nn->client_lock);
@@ -5825,8 +5864,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5825 } 5864 }
5826 5865
5827 if (fl_flags & FL_SLEEP) { 5866 if (fl_flags & FL_SLEEP) {
5867 nbl->nbl_time = jiffies;
5828 spin_lock(&nn->client_lock); 5868 spin_lock(&nn->client_lock);
5829 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 5869 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
5870 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
5830 spin_unlock(&nn->client_lock); 5871 spin_unlock(&nn->client_lock);
5831 } 5872 }
5832 5873
@@ -5858,6 +5899,7 @@ out:
5858 if (fl_flags & FL_SLEEP) { 5899 if (fl_flags & FL_SLEEP) {
5859 spin_lock(&nn->client_lock); 5900 spin_lock(&nn->client_lock);
5860 list_del_init(&nbl->nbl_list); 5901 list_del_init(&nbl->nbl_list);
5902 list_del_init(&nbl->nbl_lru);
5861 spin_unlock(&nn->client_lock); 5903 spin_unlock(&nn->client_lock);
5862 } 5904 }
5863 free_blocked_lock(nbl); 5905 free_blocked_lock(nbl);
@@ -6898,6 +6940,7 @@ static int nfs4_state_create_net(struct net *net)
6898 INIT_LIST_HEAD(&nn->client_lru); 6940 INIT_LIST_HEAD(&nn->client_lru);
6899 INIT_LIST_HEAD(&nn->close_lru); 6941 INIT_LIST_HEAD(&nn->close_lru);
6900 INIT_LIST_HEAD(&nn->del_recall_lru); 6942 INIT_LIST_HEAD(&nn->del_recall_lru);
6943 INIT_LIST_HEAD(&nn->blocked_locks_lru);
6901 spin_lock_init(&nn->client_lock); 6944 spin_lock_init(&nn->client_lock);
6902 6945
6903 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 6946 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
@@ -6995,6 +7038,7 @@ nfs4_state_shutdown_net(struct net *net)
6995 struct nfs4_delegation *dp = NULL; 7038 struct nfs4_delegation *dp = NULL;
6996 struct list_head *pos, *next, reaplist; 7039 struct list_head *pos, *next, reaplist;
6997 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7040 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7041 struct nfsd4_blocked_lock *nbl;
6998 7042
6999 cancel_delayed_work_sync(&nn->laundromat_work); 7043 cancel_delayed_work_sync(&nn->laundromat_work);
7000 locks_end_grace(&nn->nfsd4_manager); 7044 locks_end_grace(&nn->nfsd4_manager);
@@ -7015,6 +7059,24 @@ nfs4_state_shutdown_net(struct net *net)
7015 nfs4_put_stid(&dp->dl_stid); 7059 nfs4_put_stid(&dp->dl_stid);
7016 } 7060 }
7017 7061
7062 BUG_ON(!list_empty(&reaplist));
7063 spin_lock(&nn->client_lock);
7064 while (!list_empty(&nn->blocked_locks_lru)) {
7065 nbl = list_first_entry(&nn->blocked_locks_lru,
7066 struct nfsd4_blocked_lock, nbl_lru);
7067 list_move(&nbl->nbl_lru, &reaplist);
7068 list_del_init(&nbl->nbl_list);
7069 }
7070 spin_unlock(&nn->client_lock);
7071
7072 while (!list_empty(&reaplist)) {
7073 nbl = list_first_entry(&nn->blocked_locks_lru,
7074 struct nfsd4_blocked_lock, nbl_lru);
7075 list_del_init(&nbl->nbl_lru);
7076 posix_unblock_lock(&nbl->nbl_lock);
7077 free_blocked_lock(nbl);
7078 }
7079
7018 nfsd4_client_tracking_exit(net); 7080 nfsd4_client_tracking_exit(net);
7019 nfs4_state_destroy_net(net); 7081 nfs4_state_destroy_net(net);
7020} 7082}