aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2016-09-16 16:28:24 -0400
committerJ. Bruce Fields <bfields@redhat.com>2016-09-26 15:20:36 -0400
commit76d348fadff52e8ad10e7f587a4560df79a5fefe (patch)
tree24b7a65f5fac106041d8bed8459b2a9a5f64bc79
parenta188620ebd294b18d8da93f4b2a307d484e7bd27 (diff)
nfsd: have nfsd4_lock use blocking locks for v4.1+ locks
Create a new per-lockowner+per-inode structure that contains a file_lock. Have nfsd4_lock add this structure to the lockowner's list prior to setting the lock. Then call the vfs and request a blocking lock (by setting FL_SLEEP). If we get anything besides FILE_LOCK_DEFERRED back, then we dequeue the block structure and free it. When the next lock request comes in, we'll look for an existing block for the same filehandle and dequeue and reuse it if there is one. When the lock comes free (a'la an lm_notify call), we dequeue it from the lockowner's list and kick off a CB_NOTIFY_LOCK callback to inform the client that it should retry the lock request. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--fs/nfsd/nfs4state.c164
-rw-r--r--fs/nfsd/state.h12
2 files changed, 156 insertions, 20 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a204d7e109d4..ca0db4974e5b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -99,6 +99,7 @@ static struct kmem_cache *odstate_slab;
99static void free_session(struct nfsd4_session *); 99static void free_session(struct nfsd4_session *);
100 100
101static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; 101static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
102static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
102 103
103static bool is_session_dead(struct nfsd4_session *ses) 104static bool is_session_dead(struct nfsd4_session *ses)
104{ 105{
@@ -210,6 +211,84 @@ static void nfsd4_put_session(struct nfsd4_session *ses)
210 spin_unlock(&nn->client_lock); 211 spin_unlock(&nn->client_lock);
211} 212}
212 213
214static struct nfsd4_blocked_lock *
215find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
216 struct nfsd_net *nn)
217{
218 struct nfsd4_blocked_lock *cur, *found = NULL;
219
220 spin_lock(&nn->client_lock);
221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
222 if (fh_match(fh, &cur->nbl_fh)) {
223 list_del_init(&cur->nbl_list);
224 found = cur;
225 break;
226 }
227 }
228 spin_unlock(&nn->client_lock);
229 if (found)
230 posix_unblock_lock(&found->nbl_lock);
231 return found;
232}
233
234static struct nfsd4_blocked_lock *
235find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
236 struct nfsd_net *nn)
237{
238 struct nfsd4_blocked_lock *nbl;
239
240 nbl = find_blocked_lock(lo, fh, nn);
241 if (!nbl) {
242 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
243 if (nbl) {
244 fh_copy_shallow(&nbl->nbl_fh, fh);
245 locks_init_lock(&nbl->nbl_lock);
246 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
247 &nfsd4_cb_notify_lock_ops,
248 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
249 }
250 }
251 return nbl;
252}
253
254static void
255free_blocked_lock(struct nfsd4_blocked_lock *nbl)
256{
257 locks_release_private(&nbl->nbl_lock);
258 kfree(nbl);
259}
260
261static int
262nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
263{
264 /*
265 * Since this is just an optimization, we don't try very hard if it
266 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
267 * just quit trying on anything else.
268 */
269 switch (task->tk_status) {
270 case -NFS4ERR_DELAY:
271 rpc_delay(task, 1 * HZ);
272 return 0;
273 default:
274 return 1;
275 }
276}
277
278static void
279nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
280{
281 struct nfsd4_blocked_lock *nbl = container_of(cb,
282 struct nfsd4_blocked_lock, nbl_cb);
283
284 free_blocked_lock(nbl);
285}
286
287static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
288 .done = nfsd4_cb_notify_lock_done,
289 .release = nfsd4_cb_notify_lock_release,
290};
291
213static inline struct nfs4_stateowner * 292static inline struct nfs4_stateowner *
214nfs4_get_stateowner(struct nfs4_stateowner *sop) 293nfs4_get_stateowner(struct nfs4_stateowner *sop)
215{ 294{
@@ -5309,7 +5388,29 @@ nfsd4_fl_put_owner(fl_owner_t owner)
5309 nfs4_put_stateowner(&lo->lo_owner); 5388 nfs4_put_stateowner(&lo->lo_owner);
5310} 5389}
5311 5390
5391static void
5392nfsd4_lm_notify(struct file_lock *fl)
5393{
5394 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
5395 struct net *net = lo->lo_owner.so_client->net;
5396 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5397 struct nfsd4_blocked_lock *nbl = container_of(fl,
5398 struct nfsd4_blocked_lock, nbl_lock);
5399 bool queue = false;
5400
5401 spin_lock(&nn->client_lock);
5402 if (!list_empty(&nbl->nbl_list)) {
5403 list_del_init(&nbl->nbl_list);
5404 queue = true;
5405 }
5406 spin_unlock(&nn->client_lock);
5407
5408 if (queue)
5409 nfsd4_run_cb(&nbl->nbl_cb);
5410}
5411
5312static const struct lock_manager_operations nfsd_posix_mng_ops = { 5412static const struct lock_manager_operations nfsd_posix_mng_ops = {
5413 .lm_notify = nfsd4_lm_notify,
5313 .lm_get_owner = nfsd4_fl_get_owner, 5414 .lm_get_owner = nfsd4_fl_get_owner,
5314 .lm_put_owner = nfsd4_fl_put_owner, 5415 .lm_put_owner = nfsd4_fl_put_owner,
5315}; 5416};
@@ -5407,6 +5508,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5407 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 5508 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5408 if (!lo) 5509 if (!lo)
5409 return NULL; 5510 return NULL;
5511 INIT_LIST_HEAD(&lo->lo_blocked);
5410 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 5512 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5411 lo->lo_owner.so_is_open_owner = 0; 5513 lo->lo_owner.so_is_open_owner = 0;
5412 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; 5514 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
@@ -5588,12 +5690,15 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5588 struct nfs4_ol_stateid *open_stp = NULL; 5690 struct nfs4_ol_stateid *open_stp = NULL;
5589 struct nfs4_file *fp; 5691 struct nfs4_file *fp;
5590 struct file *filp = NULL; 5692 struct file *filp = NULL;
5693 struct nfsd4_blocked_lock *nbl = NULL;
5591 struct file_lock *file_lock = NULL; 5694 struct file_lock *file_lock = NULL;
5592 struct file_lock *conflock = NULL; 5695 struct file_lock *conflock = NULL;
5593 __be32 status = 0; 5696 __be32 status = 0;
5594 int lkflg; 5697 int lkflg;
5595 int err; 5698 int err;
5596 bool new = false; 5699 bool new = false;
5700 unsigned char fl_type;
5701 unsigned int fl_flags = FL_POSIX;
5597 struct net *net = SVC_NET(rqstp); 5702 struct net *net = SVC_NET(rqstp);
5598 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 5703 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5599 5704
@@ -5658,46 +5763,55 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5658 if (!locks_in_grace(net) && lock->lk_reclaim) 5763 if (!locks_in_grace(net) && lock->lk_reclaim)
5659 goto out; 5764 goto out;
5660 5765
5661 file_lock = locks_alloc_lock();
5662 if (!file_lock) {
5663 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5664 status = nfserr_jukebox;
5665 goto out;
5666 }
5667
5668 fp = lock_stp->st_stid.sc_file; 5766 fp = lock_stp->st_stid.sc_file;
5669 switch (lock->lk_type) { 5767 switch (lock->lk_type) {
5670 case NFS4_READ_LT:
5671 case NFS4_READW_LT: 5768 case NFS4_READW_LT:
5769 if (nfsd4_has_session(cstate))
5770 fl_flags |= FL_SLEEP;
5771 /* Fallthrough */
5772 case NFS4_READ_LT:
5672 spin_lock(&fp->fi_lock); 5773 spin_lock(&fp->fi_lock);
5673 filp = find_readable_file_locked(fp); 5774 filp = find_readable_file_locked(fp);
5674 if (filp) 5775 if (filp)
5675 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 5776 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5676 spin_unlock(&fp->fi_lock); 5777 spin_unlock(&fp->fi_lock);
5677 file_lock->fl_type = F_RDLCK; 5778 fl_type = F_RDLCK;
5678 break; 5779 break;
5679 case NFS4_WRITE_LT:
5680 case NFS4_WRITEW_LT: 5780 case NFS4_WRITEW_LT:
5781 if (nfsd4_has_session(cstate))
5782 fl_flags |= FL_SLEEP;
5783 /* Fallthrough */
5784 case NFS4_WRITE_LT:
5681 spin_lock(&fp->fi_lock); 5785 spin_lock(&fp->fi_lock);
5682 filp = find_writeable_file_locked(fp); 5786 filp = find_writeable_file_locked(fp);
5683 if (filp) 5787 if (filp)
5684 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 5788 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5685 spin_unlock(&fp->fi_lock); 5789 spin_unlock(&fp->fi_lock);
5686 file_lock->fl_type = F_WRLCK; 5790 fl_type = F_WRLCK;
5687 break; 5791 break;
5688 default: 5792 default:
5689 status = nfserr_inval; 5793 status = nfserr_inval;
5690 goto out; 5794 goto out;
5691 } 5795 }
5796
5692 if (!filp) { 5797 if (!filp) {
5693 status = nfserr_openmode; 5798 status = nfserr_openmode;
5694 goto out; 5799 goto out;
5695 } 5800 }
5696 5801
5802 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
5803 if (!nbl) {
5804 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
5805 status = nfserr_jukebox;
5806 goto out;
5807 }
5808
5809 file_lock = &nbl->nbl_lock;
5810 file_lock->fl_type = fl_type;
5697 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); 5811 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
5698 file_lock->fl_pid = current->tgid; 5812 file_lock->fl_pid = current->tgid;
5699 file_lock->fl_file = filp; 5813 file_lock->fl_file = filp;
5700 file_lock->fl_flags = FL_POSIX; 5814 file_lock->fl_flags = fl_flags;
5701 file_lock->fl_lmops = &nfsd_posix_mng_ops; 5815 file_lock->fl_lmops = &nfsd_posix_mng_ops;
5702 file_lock->fl_start = lock->lk_offset; 5816 file_lock->fl_start = lock->lk_offset;
5703 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 5817 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
@@ -5710,18 +5824,27 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5710 goto out; 5824 goto out;
5711 } 5825 }
5712 5826
5827 if (fl_flags & FL_SLEEP) {
5828 spin_lock(&nn->client_lock);
5829 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
5830 spin_unlock(&nn->client_lock);
5831 }
5832
5713 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); 5833 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
5714 switch (-err) { 5834 switch (err) {
5715 case 0: /* success! */ 5835 case 0: /* success! */
5716 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); 5836 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
5717 status = 0; 5837 status = 0;
5718 break; 5838 break;
5719 case (EAGAIN): /* conflock holds conflicting lock */ 5839 case FILE_LOCK_DEFERRED:
5840 nbl = NULL;
5841 /* Fallthrough */
5842 case -EAGAIN: /* conflock holds conflicting lock */
5720 status = nfserr_denied; 5843 status = nfserr_denied;
5721 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 5844 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5722 nfs4_set_lock_denied(conflock, &lock->lk_denied); 5845 nfs4_set_lock_denied(conflock, &lock->lk_denied);
5723 break; 5846 break;
5724 case (EDEADLK): 5847 case -EDEADLK:
5725 status = nfserr_deadlock; 5848 status = nfserr_deadlock;
5726 break; 5849 break;
5727 default: 5850 default:
@@ -5730,6 +5853,15 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5730 break; 5853 break;
5731 } 5854 }
5732out: 5855out:
5856 if (nbl) {
5857 /* dequeue it if we queued it before */
5858 if (fl_flags & FL_SLEEP) {
5859 spin_lock(&nn->client_lock);
5860 list_del_init(&nbl->nbl_list);
5861 spin_unlock(&nn->client_lock);
5862 }
5863 free_blocked_lock(nbl);
5864 }
5733 if (filp) 5865 if (filp)
5734 fput(filp); 5866 fput(filp);
5735 if (lock_stp) { 5867 if (lock_stp) {
@@ -5753,8 +5885,6 @@ out:
5753 if (open_stp) 5885 if (open_stp)
5754 nfs4_put_stid(&open_stp->st_stid); 5886 nfs4_put_stid(&open_stp->st_stid);
5755 nfsd4_bump_seqid(cstate, status); 5887 nfsd4_bump_seqid(cstate, status);
5756 if (file_lock)
5757 locks_free_lock(file_lock);
5758 if (conflock) 5888 if (conflock)
5759 locks_free_lock(conflock); 5889 locks_free_lock(conflock);
5760 return status; 5890 return status;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 88d029dd13aa..e45c183a8bf7 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -440,11 +440,11 @@ struct nfs4_openowner {
440/* 440/*
441 * Represents a generic "lockowner". Similar to an openowner. References to it 441 * Represents a generic "lockowner". Similar to an openowner. References to it
442 * are held by the lock stateids that are created on its behalf. This object is 442 * are held by the lock stateids that are created on its behalf. This object is
443 * a superset of the nfs4_stateowner struct (or would be if it needed any extra 443 * a superset of the nfs4_stateowner struct.
444 * fields).
445 */ 444 */
446struct nfs4_lockowner { 445struct nfs4_lockowner {
447 struct nfs4_stateowner lo_owner; /* must be first element */ 446 struct nfs4_stateowner lo_owner; /* must be first element */
447 struct list_head lo_blocked; /* blocked file_locks */
448}; 448};
449 449
450static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so) 450static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so)
@@ -580,7 +580,13 @@ static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b)
580 return (s32)(a->si_generation - b->si_generation) > 0; 580 return (s32)(a->si_generation - b->si_generation) > 0;
581} 581}
582 582
583/*
584 * When a client tries to get a lock on a file, we set one of these objects
585 * on the blocking lock. When the lock becomes free, we can then issue a
586 * CB_NOTIFY_LOCK to the server.
587 */
583struct nfsd4_blocked_lock { 588struct nfsd4_blocked_lock {
589 struct list_head nbl_list;
584 struct file_lock nbl_lock; 590 struct file_lock nbl_lock;
585 struct knfsd_fh nbl_fh; 591 struct knfsd_fh nbl_fh;
586 struct nfsd4_callback nbl_cb; 592 struct nfsd4_callback nbl_cb;