aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfs4state.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@primarydata.com>2014-07-16 10:31:57 -0400
committerJ. Bruce Fields <bfields@redhat.com>2014-07-16 21:06:12 -0400
commit02e1215f9f72ad8c087e21a5701bea0ac18fafd4 (patch)
tree26b317c316a0d37e04ac1d81116d61cfe7138f3e /fs/nfsd/nfs4state.c
parente8051c837bd96ad1eabdd46504363431dc5fddc5 (diff)
nfsd: Avoid taking state_lock while holding inode lock in nfsd_break_one_deleg
state_lock is a heavily contended global lock. We don't want to grab that while simultaneously holding the inode->i_lock. Add a new per-nfs4_file lock that we can use to protect the per-nfs4_file delegation list. Hold that while walking the list in the break_deleg callback and queue the workqueue job for each one. The workqueue job can then take the state_lock and do the list manipulations without the i_lock being held prior to starting the rpc call. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com> Signed-off-by: Jeff Layton <jlayton@primarydata.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r--fs/nfsd/nfs4state.c58
1 files changed, 38 insertions, 20 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 56ea4f12803e..bdf8ac3393bd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -254,6 +254,8 @@ static void nfsd4_free_file(struct nfs4_file *f)
254static inline void 254static inline void
255put_nfs4_file(struct nfs4_file *fi) 255put_nfs4_file(struct nfs4_file *fi)
256{ 256{
257 might_lock(&state_lock);
258
257 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { 259 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
258 hlist_del(&fi->fi_hash); 260 hlist_del(&fi->fi_hash);
259 spin_unlock(&state_lock); 261 spin_unlock(&state_lock);
@@ -554,6 +556,8 @@ static void block_delegations(struct knfsd_fh *fh)
554 u32 hash; 556 u32 hash;
555 struct bloom_pair *bd = &blocked_delegations; 557 struct bloom_pair *bd = &blocked_delegations;
556 558
559 lockdep_assert_held(&state_lock);
560
557 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); 561 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
558 562
559 __set_bit(hash&255, bd->set[bd->new]); 563 __set_bit(hash&255, bd->set[bd->new]);
@@ -592,7 +596,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
592 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle); 596 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
593 dp->dl_time = 0; 597 dp->dl_time = 0;
594 atomic_set(&dp->dl_count, 1); 598 atomic_set(&dp->dl_count, 1);
595 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); 599 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_run_cb_recall);
596 return dp; 600 return dp;
597} 601}
598 602
@@ -640,7 +644,9 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
640 lockdep_assert_held(&state_lock); 644 lockdep_assert_held(&state_lock);
641 645
642 dp->dl_stid.sc_type = NFS4_DELEG_STID; 646 dp->dl_stid.sc_type = NFS4_DELEG_STID;
647 spin_lock(&fp->fi_lock);
643 list_add(&dp->dl_perfile, &fp->fi_delegations); 648 list_add(&dp->dl_perfile, &fp->fi_delegations);
649 spin_unlock(&fp->fi_lock);
644 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 650 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
645} 651}
646 652
@@ -648,14 +654,18 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
648static void 654static void
649unhash_delegation(struct nfs4_delegation *dp) 655unhash_delegation(struct nfs4_delegation *dp)
650{ 656{
657 struct nfs4_file *fp = dp->dl_file;
658
651 spin_lock(&state_lock); 659 spin_lock(&state_lock);
652 list_del_init(&dp->dl_perclnt); 660 list_del_init(&dp->dl_perclnt);
653 list_del_init(&dp->dl_perfile);
654 list_del_init(&dp->dl_recall_lru); 661 list_del_init(&dp->dl_recall_lru);
662 spin_lock(&fp->fi_lock);
663 list_del_init(&dp->dl_perfile);
664 spin_unlock(&fp->fi_lock);
655 spin_unlock(&state_lock); 665 spin_unlock(&state_lock);
656 if (dp->dl_file) { 666 if (fp) {
657 nfs4_put_deleg_lease(dp->dl_file); 667 nfs4_put_deleg_lease(fp);
658 put_nfs4_file(dp->dl_file); 668 put_nfs4_file(fp);
659 dp->dl_file = NULL; 669 dp->dl_file = NULL;
660 } 670 }
661} 671}
@@ -1677,7 +1687,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
1677 spin_unlock(&nn->client_lock); 1687 spin_unlock(&nn->client_lock);
1678 return NULL; 1688 return NULL;
1679 } 1689 }
1680 INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); 1690 INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_run_cb_null);
1681 clp->cl_time = get_seconds(); 1691 clp->cl_time = get_seconds();
1682 clear_bit(0, &clp->cl_cb_slot_busy); 1692 clear_bit(0, &clp->cl_cb_slot_busy);
1683 copy_verf(clp, verf); 1693 copy_verf(clp, verf);
@@ -3079,30 +3089,38 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3079 return ret; 3089 return ret;
3080} 3090}
3081 3091
3082static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 3092void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp)
3083{ 3093{
3084 struct nfs4_client *clp = dp->dl_stid.sc_client; 3094 struct nfs4_client *clp = dp->dl_stid.sc_client;
3085 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3095 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3086 3096
3087 lockdep_assert_held(&state_lock); 3097 /*
3088 /* We're assuming the state code never drops its reference 3098 * We can't do this in nfsd_break_deleg_cb because it is
3089 * without first removing the lease. Since we're in this lease 3099 * already holding inode->i_lock
3090 * callback (and since the lease code is serialized by the kernel 3100 */
3091 * lock) we know the server hasn't removed the lease yet, we know 3101 spin_lock(&state_lock);
3092 * it's safe to take a reference: */ 3102 block_delegations(&dp->dl_fh);
3093 atomic_inc(&dp->dl_count);
3094
3095 /* 3103 /*
3096 * If the dl_time != 0, then we know that it has already been 3104 * If the dl_time != 0, then we know that it has already been
3097 * queued for a lease break. Don't queue it again. 3105 * queued for a lease break. Don't queue it again.
3098 */ 3106 */
3099 if (dp->dl_time == 0) { 3107 if (dp->dl_time == 0) {
3100 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3101 dp->dl_time = get_seconds(); 3108 dp->dl_time = get_seconds();
3109 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3102 } 3110 }
3111 spin_unlock(&state_lock);
3112}
3103 3113
3104 block_delegations(&dp->dl_fh); 3114static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3105 3115{
3116 /*
3117 * We're assuming the state code never drops its reference
3118 * without first removing the lease. Since we're in this lease
3119 * callback (and since the lease code is serialized by the kernel
3120 * lock) we know the server hasn't removed the lease yet, we know
3121 * it's safe to take a reference.
3122 */
3123 atomic_inc(&dp->dl_count);
3106 nfsd4_cb_recall(dp); 3124 nfsd4_cb_recall(dp);
3107} 3125}
3108 3126
@@ -3127,11 +3145,11 @@ static void nfsd_break_deleg_cb(struct file_lock *fl)
3127 */ 3145 */
3128 fl->fl_break_time = 0; 3146 fl->fl_break_time = 0;
3129 3147
3130 spin_lock(&state_lock);
3131 fp->fi_had_conflict = true; 3148 fp->fi_had_conflict = true;
3149 spin_lock(&fp->fi_lock);
3132 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 3150 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3133 nfsd_break_one_deleg(dp); 3151 nfsd_break_one_deleg(dp);
3134 spin_unlock(&state_lock); 3152 spin_unlock(&fp->fi_lock);
3135} 3153}
3136 3154
3137static 3155static