aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Layton <jlayton@primarydata.com>2015-01-16 15:05:57 -0500
committerJeff Layton <jeff.layton@primarydata.com>2015-01-16 16:08:49 -0500
commit6109c85037e53443f29fd39c0de69f578a1cf285 (patch)
tree56823d1615acbba20c858eed9d16cf443cd55872
parenta7231a97467d5a0c36f82f581c76c12c034e4b80 (diff)
locks: add a dedicated spinlock to protect i_flctx lists
We can now add a dedicated spinlock without expanding struct inode. Change to using that to protect the various i_flctx lists. Signed-off-by: Jeff Layton <jlayton@primarydata.com> Acked-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--fs/ceph/locks.c8
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/lockd/svcsubs.c12
-rw-r--r--fs/locks.c87
-rw-r--r--fs/nfs/delegation.c8
-rw-r--r--fs/nfs/nfs4state.c8
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/nfs4state.c4
-rw-r--r--include/linux/fs.h1
9 files changed, 71 insertions, 69 deletions
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 19beeed83233..0303da8e3233 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -255,12 +255,12 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
255 255
256 ctx = inode->i_flctx; 256 ctx = inode->i_flctx;
257 if (ctx) { 257 if (ctx) {
258 spin_lock(&inode->i_lock); 258 spin_lock(&ctx->flc_lock);
259 list_for_each_entry(lock, &ctx->flc_posix, fl_list) 259 list_for_each_entry(lock, &ctx->flc_posix, fl_list)
260 ++(*fcntl_count); 260 ++(*fcntl_count);
261 list_for_each_entry(lock, &ctx->flc_flock, fl_list) 261 list_for_each_entry(lock, &ctx->flc_flock, fl_list)
262 ++(*flock_count); 262 ++(*flock_count);
263 spin_unlock(&inode->i_lock); 263 spin_unlock(&ctx->flc_lock);
264 } 264 }
265 dout("counted %d flock locks and %d fcntl locks", 265 dout("counted %d flock locks and %d fcntl locks",
266 *flock_count, *fcntl_count); 266 *flock_count, *fcntl_count);
@@ -288,7 +288,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
288 if (!ctx) 288 if (!ctx)
289 return 0; 289 return 0;
290 290
291 spin_lock(&inode->i_lock); 291 spin_lock(&ctx->flc_lock);
292 list_for_each_entry(lock, &ctx->flc_flock, fl_list) { 292 list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
293 ++seen_fcntl; 293 ++seen_fcntl;
294 if (seen_fcntl > num_fcntl_locks) { 294 if (seen_fcntl > num_fcntl_locks) {
@@ -312,7 +312,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
312 ++l; 312 ++l;
313 } 313 }
314fail: 314fail:
315 spin_unlock(&inode->i_lock); 315 spin_unlock(&ctx->flc_lock);
316 return err; 316 return err;
317} 317}
318 318
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ea78f6f81ce2..b65166eb111e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1136,11 +1136,11 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1136 if (!flctx) 1136 if (!flctx)
1137 goto out; 1137 goto out;
1138 1138
1139 spin_lock(&inode->i_lock); 1139 spin_lock(&flctx->flc_lock);
1140 list_for_each(el, &flctx->flc_posix) { 1140 list_for_each(el, &flctx->flc_posix) {
1141 count++; 1141 count++;
1142 } 1142 }
1143 spin_unlock(&inode->i_lock); 1143 spin_unlock(&flctx->flc_lock);
1144 1144
1145 INIT_LIST_HEAD(&locks_to_send); 1145 INIT_LIST_HEAD(&locks_to_send);
1146 1146
@@ -1159,7 +1159,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1159 } 1159 }
1160 1160
1161 el = locks_to_send.next; 1161 el = locks_to_send.next;
1162 spin_lock(&inode->i_lock); 1162 spin_lock(&flctx->flc_lock);
1163 list_for_each_entry(flock, &flctx->flc_posix, fl_list) { 1163 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1164 if (el == &locks_to_send) { 1164 if (el == &locks_to_send) {
1165 /* 1165 /*
@@ -1181,7 +1181,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1181 lck->type = type; 1181 lck->type = type;
1182 lck->offset = flock->fl_start; 1182 lck->offset = flock->fl_start;
1183 } 1183 }
1184 spin_unlock(&inode->i_lock); 1184 spin_unlock(&flctx->flc_lock);
1185 1185
1186 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 1186 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1187 int stored_rc; 1187 int stored_rc;
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 5300bb53835f..665ef5a05183 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -171,7 +171,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
171 return 0; 171 return 0;
172again: 172again:
173 file->f_locks = 0; 173 file->f_locks = 0;
174 spin_lock(&inode->i_lock); 174 spin_lock(&flctx->flc_lock);
175 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 175 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
176 if (fl->fl_lmops != &nlmsvc_lock_operations) 176 if (fl->fl_lmops != &nlmsvc_lock_operations)
177 continue; 177 continue;
@@ -183,7 +183,7 @@ again:
183 if (match(lockhost, host)) { 183 if (match(lockhost, host)) {
184 struct file_lock lock = *fl; 184 struct file_lock lock = *fl;
185 185
186 spin_unlock(&inode->i_lock); 186 spin_unlock(&flctx->flc_lock);
187 lock.fl_type = F_UNLCK; 187 lock.fl_type = F_UNLCK;
188 lock.fl_start = 0; 188 lock.fl_start = 0;
189 lock.fl_end = OFFSET_MAX; 189 lock.fl_end = OFFSET_MAX;
@@ -195,7 +195,7 @@ again:
195 goto again; 195 goto again;
196 } 196 }
197 } 197 }
198 spin_unlock(&inode->i_lock); 198 spin_unlock(&flctx->flc_lock);
199 199
200 return 0; 200 return 0;
201} 201}
@@ -232,14 +232,14 @@ nlm_file_inuse(struct nlm_file *file)
232 return 1; 232 return 1;
233 233
234 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 234 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
235 spin_lock(&inode->i_lock); 235 spin_lock(&flctx->flc_lock);
236 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 236 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
237 if (fl->fl_lmops == &nlmsvc_lock_operations) { 237 if (fl->fl_lmops == &nlmsvc_lock_operations) {
238 spin_unlock(&inode->i_lock); 238 spin_unlock(&flctx->flc_lock);
239 return 1; 239 return 1;
240 } 240 }
241 } 241 }
242 spin_unlock(&inode->i_lock); 242 spin_unlock(&flctx->flc_lock);
243 } 243 }
244 file->f_locks = 0; 244 file->f_locks = 0;
245 return 0; 245 return 0;
diff --git a/fs/locks.c b/fs/locks.c
index d46e70567b99..a268d959ccd6 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -161,7 +161,7 @@ int lease_break_time = 45;
161 * The global file_lock_list is only used for displaying /proc/locks, so we 161 * The global file_lock_list is only used for displaying /proc/locks, so we
162 * keep a list on each CPU, with each list protected by its own spinlock via 162 * keep a list on each CPU, with each list protected by its own spinlock via
163 * the file_lock_lglock. Note that alterations to the list also require that 163 * the file_lock_lglock. Note that alterations to the list also require that
164 * the relevant i_lock is held. 164 * the relevant flc_lock is held.
165 */ 165 */
166DEFINE_STATIC_LGLOCK(file_lock_lglock); 166DEFINE_STATIC_LGLOCK(file_lock_lglock);
167static DEFINE_PER_CPU(struct hlist_head, file_lock_list); 167static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
@@ -189,13 +189,13 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
189 * contrast to those that are acting as records of acquired locks). 189 * contrast to those that are acting as records of acquired locks).
190 * 190 *
191 * Note that when we acquire this lock in order to change the above fields, 191 * Note that when we acquire this lock in order to change the above fields,
192 * we often hold the i_lock as well. In certain cases, when reading the fields 192 * we often hold the flc_lock as well. In certain cases, when reading the fields
193 * protected by this lock, we can skip acquiring it iff we already hold the 193 * protected by this lock, we can skip acquiring it iff we already hold the
194 * i_lock. 194 * flc_lock.
195 * 195 *
196 * In particular, adding an entry to the fl_block list requires that you hold 196 * In particular, adding an entry to the fl_block list requires that you hold
197 * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting 197 * both the flc_lock and the blocked_lock_lock (acquired in that order).
198 * an entry from the list however only requires the file_lock_lock. 198 * Deleting an entry from the list however only requires the file_lock_lock.
199 */ 199 */
200static DEFINE_SPINLOCK(blocked_lock_lock); 200static DEFINE_SPINLOCK(blocked_lock_lock);
201 201
@@ -214,6 +214,7 @@ locks_get_lock_context(struct inode *inode)
214 if (!new) 214 if (!new)
215 goto out; 215 goto out;
216 216
217 spin_lock_init(&new->flc_lock);
217 INIT_LIST_HEAD(&new->flc_flock); 218 INIT_LIST_HEAD(&new->flc_flock);
218 INIT_LIST_HEAD(&new->flc_posix); 219 INIT_LIST_HEAD(&new->flc_posix);
219 INIT_LIST_HEAD(&new->flc_lease); 220 INIT_LIST_HEAD(&new->flc_lease);
@@ -557,7 +558,7 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
557 return fl1->fl_owner == fl2->fl_owner; 558 return fl1->fl_owner == fl2->fl_owner;
558} 559}
559 560
560/* Must be called with the i_lock held! */ 561/* Must be called with the flc_lock held! */
561static void locks_insert_global_locks(struct file_lock *fl) 562static void locks_insert_global_locks(struct file_lock *fl)
562{ 563{
563 lg_local_lock(&file_lock_lglock); 564 lg_local_lock(&file_lock_lglock);
@@ -566,12 +567,12 @@ static void locks_insert_global_locks(struct file_lock *fl)
566 lg_local_unlock(&file_lock_lglock); 567 lg_local_unlock(&file_lock_lglock);
567} 568}
568 569
569/* Must be called with the i_lock held! */ 570/* Must be called with the flc_lock held! */
570static void locks_delete_global_locks(struct file_lock *fl) 571static void locks_delete_global_locks(struct file_lock *fl)
571{ 572{
572 /* 573 /*
573 * Avoid taking lock if already unhashed. This is safe since this check 574 * Avoid taking lock if already unhashed. This is safe since this check
574 * is done while holding the i_lock, and new insertions into the list 575 * is done while holding the flc_lock, and new insertions into the list
575 * also require that it be held. 576 * also require that it be held.
576 */ 577 */
577 if (hlist_unhashed(&fl->fl_link)) 578 if (hlist_unhashed(&fl->fl_link))
@@ -623,10 +624,10 @@ static void locks_delete_block(struct file_lock *waiter)
623 * the order they blocked. The documentation doesn't require this but 624 * the order they blocked. The documentation doesn't require this but
624 * it seems like the reasonable thing to do. 625 * it seems like the reasonable thing to do.
625 * 626 *
626 * Must be called with both the i_lock and blocked_lock_lock held. The fl_block 627 * Must be called with both the flc_lock and blocked_lock_lock held. The
627 * list itself is protected by the blocked_lock_lock, but by ensuring that the 628 * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
628 * i_lock is also held on insertions we can avoid taking the blocked_lock_lock 629 * that the flc_lock is also held on insertions we can avoid taking the
629 * in some cases when we see that the fl_block list is empty. 630 * blocked_lock_lock in some cases when we see that the fl_block list is empty.
630 */ 631 */
631static void __locks_insert_block(struct file_lock *blocker, 632static void __locks_insert_block(struct file_lock *blocker,
632 struct file_lock *waiter) 633 struct file_lock *waiter)
@@ -638,7 +639,7 @@ static void __locks_insert_block(struct file_lock *blocker,
638 locks_insert_global_blocked(waiter); 639 locks_insert_global_blocked(waiter);
639} 640}
640 641
641/* Must be called with i_lock held. */ 642/* Must be called with flc_lock held. */
642static void locks_insert_block(struct file_lock *blocker, 643static void locks_insert_block(struct file_lock *blocker,
643 struct file_lock *waiter) 644 struct file_lock *waiter)
644{ 645{
@@ -650,15 +651,15 @@ static void locks_insert_block(struct file_lock *blocker,
650/* 651/*
651 * Wake up processes blocked waiting for blocker. 652 * Wake up processes blocked waiting for blocker.
652 * 653 *
653 * Must be called with the inode->i_lock held! 654 * Must be called with the inode->flc_lock held!
654 */ 655 */
655static void locks_wake_up_blocks(struct file_lock *blocker) 656static void locks_wake_up_blocks(struct file_lock *blocker)
656{ 657{
657 /* 658 /*
658 * Avoid taking global lock if list is empty. This is safe since new 659 * Avoid taking global lock if list is empty. This is safe since new
659 * blocked requests are only added to the list under the i_lock, and 660 * blocked requests are only added to the list under the flc_lock, and
660 * the i_lock is always held here. Note that removal from the fl_block 661 * the flc_lock is always held here. Note that removal from the fl_block
661 * list does not require the i_lock, so we must recheck list_empty() 662 * list does not require the flc_lock, so we must recheck list_empty()
662 * after acquiring the blocked_lock_lock. 663 * after acquiring the blocked_lock_lock.
663 */ 664 */
664 if (list_empty(&blocker->fl_block)) 665 if (list_empty(&blocker->fl_block))
@@ -768,7 +769,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
768 return; 769 return;
769 } 770 }
770 771
771 spin_lock(&inode->i_lock); 772 spin_lock(&ctx->flc_lock);
772 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { 773 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
773 if (posix_locks_conflict(fl, cfl)) { 774 if (posix_locks_conflict(fl, cfl)) {
774 locks_copy_conflock(fl, cfl); 775 locks_copy_conflock(fl, cfl);
@@ -779,7 +780,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
779 } 780 }
780 fl->fl_type = F_UNLCK; 781 fl->fl_type = F_UNLCK;
781out: 782out:
782 spin_unlock(&inode->i_lock); 783 spin_unlock(&ctx->flc_lock);
783 return; 784 return;
784} 785}
785EXPORT_SYMBOL(posix_test_lock); 786EXPORT_SYMBOL(posix_test_lock);
@@ -880,7 +881,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
880 return -ENOMEM; 881 return -ENOMEM;
881 } 882 }
882 883
883 spin_lock(&inode->i_lock); 884 spin_lock(&ctx->flc_lock);
884 if (request->fl_flags & FL_ACCESS) 885 if (request->fl_flags & FL_ACCESS)
885 goto find_conflict; 886 goto find_conflict;
886 887
@@ -905,9 +906,9 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
905 * give it the opportunity to lock the file. 906 * give it the opportunity to lock the file.
906 */ 907 */
907 if (found) { 908 if (found) {
908 spin_unlock(&inode->i_lock); 909 spin_unlock(&ctx->flc_lock);
909 cond_resched(); 910 cond_resched();
910 spin_lock(&inode->i_lock); 911 spin_lock(&ctx->flc_lock);
911 } 912 }
912 913
913find_conflict: 914find_conflict:
@@ -929,7 +930,7 @@ find_conflict:
929 error = 0; 930 error = 0;
930 931
931out: 932out:
932 spin_unlock(&inode->i_lock); 933 spin_unlock(&ctx->flc_lock);
933 if (new_fl) 934 if (new_fl)
934 locks_free_lock(new_fl); 935 locks_free_lock(new_fl);
935 locks_dispose_list(&dispose); 936 locks_dispose_list(&dispose);
@@ -965,7 +966,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
965 new_fl2 = locks_alloc_lock(); 966 new_fl2 = locks_alloc_lock();
966 } 967 }
967 968
968 spin_lock(&inode->i_lock); 969 spin_lock(&ctx->flc_lock);
969 /* 970 /*
970 * New lock request. Walk all POSIX locks and look for conflicts. If 971 * New lock request. Walk all POSIX locks and look for conflicts. If
971 * there are any, either return error or put the request on the 972 * there are any, either return error or put the request on the
@@ -1136,7 +1137,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
1136 locks_wake_up_blocks(left); 1137 locks_wake_up_blocks(left);
1137 } 1138 }
1138 out: 1139 out:
1139 spin_unlock(&inode->i_lock); 1140 spin_unlock(&ctx->flc_lock);
1140 /* 1141 /*
1141 * Free any unused locks. 1142 * Free any unused locks.
1142 */ 1143 */
@@ -1218,7 +1219,7 @@ int locks_mandatory_locked(struct file *file)
1218 /* 1219 /*
1219 * Search the lock list for this inode for any POSIX locks. 1220 * Search the lock list for this inode for any POSIX locks.
1220 */ 1221 */
1221 spin_lock(&inode->i_lock); 1222 spin_lock(&ctx->flc_lock);
1222 ret = 0; 1223 ret = 0;
1223 list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 1224 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1224 if (fl->fl_owner != current->files && 1225 if (fl->fl_owner != current->files &&
@@ -1227,7 +1228,7 @@ int locks_mandatory_locked(struct file *file)
1227 break; 1228 break;
1228 } 1229 }
1229 } 1230 }
1230 spin_unlock(&inode->i_lock); 1231 spin_unlock(&ctx->flc_lock);
1231 return ret; 1232 return ret;
1232} 1233}
1233 1234
@@ -1346,7 +1347,7 @@ static void time_out_leases(struct inode *inode, struct list_head *dispose)
1346 struct file_lock_context *ctx = inode->i_flctx; 1347 struct file_lock_context *ctx = inode->i_flctx;
1347 struct file_lock *fl, *tmp; 1348 struct file_lock *fl, *tmp;
1348 1349
1349 lockdep_assert_held(&inode->i_lock); 1350 lockdep_assert_held(&ctx->flc_lock);
1350 1351
1351 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { 1352 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1352 trace_time_out_leases(inode, fl); 1353 trace_time_out_leases(inode, fl);
@@ -1370,7 +1371,7 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1370 struct file_lock_context *ctx = inode->i_flctx; 1371 struct file_lock_context *ctx = inode->i_flctx;
1371 struct file_lock *fl; 1372 struct file_lock *fl;
1372 1373
1373 lockdep_assert_held(&inode->i_lock); 1374 lockdep_assert_held(&ctx->flc_lock);
1374 1375
1375 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1376 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1376 if (leases_conflict(fl, breaker)) 1377 if (leases_conflict(fl, breaker))
@@ -1413,7 +1414,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1413 return error; 1414 return error;
1414 } 1415 }
1415 1416
1416 spin_lock(&inode->i_lock); 1417 spin_lock(&ctx->flc_lock);
1417 1418
1418 time_out_leases(inode, &dispose); 1419 time_out_leases(inode, &dispose);
1419 1420
@@ -1463,11 +1464,11 @@ restart:
1463 break_time++; 1464 break_time++;
1464 locks_insert_block(fl, new_fl); 1465 locks_insert_block(fl, new_fl);
1465 trace_break_lease_block(inode, new_fl); 1466 trace_break_lease_block(inode, new_fl);
1466 spin_unlock(&inode->i_lock); 1467 spin_unlock(&ctx->flc_lock);
1467 locks_dispose_list(&dispose); 1468 locks_dispose_list(&dispose);
1468 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1469 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1469 !new_fl->fl_next, break_time); 1470 !new_fl->fl_next, break_time);
1470 spin_lock(&inode->i_lock); 1471 spin_lock(&ctx->flc_lock);
1471 trace_break_lease_unblock(inode, new_fl); 1472 trace_break_lease_unblock(inode, new_fl);
1472 locks_delete_block(new_fl); 1473 locks_delete_block(new_fl);
1473 if (error >= 0) { 1474 if (error >= 0) {
@@ -1482,7 +1483,7 @@ restart:
1482 error = 0; 1483 error = 0;
1483 } 1484 }
1484out: 1485out:
1485 spin_unlock(&inode->i_lock); 1486 spin_unlock(&ctx->flc_lock);
1486 locks_dispose_list(&dispose); 1487 locks_dispose_list(&dispose);
1487 locks_free_lock(new_fl); 1488 locks_free_lock(new_fl);
1488 return error; 1489 return error;
@@ -1506,14 +1507,14 @@ void lease_get_mtime(struct inode *inode, struct timespec *time)
1506 struct file_lock *fl; 1507 struct file_lock *fl;
1507 1508
1508 if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1509 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1509 spin_lock(&inode->i_lock); 1510 spin_lock(&ctx->flc_lock);
1510 if (!list_empty(&ctx->flc_lease)) { 1511 if (!list_empty(&ctx->flc_lease)) {
1511 fl = list_first_entry(&ctx->flc_lease, 1512 fl = list_first_entry(&ctx->flc_lease,
1512 struct file_lock, fl_list); 1513 struct file_lock, fl_list);
1513 if (fl->fl_type == F_WRLCK) 1514 if (fl->fl_type == F_WRLCK)
1514 has_lease = true; 1515 has_lease = true;
1515 } 1516 }
1516 spin_unlock(&inode->i_lock); 1517 spin_unlock(&ctx->flc_lock);
1517 } 1518 }
1518 1519
1519 if (has_lease) 1520 if (has_lease)
@@ -1556,7 +1557,7 @@ int fcntl_getlease(struct file *filp)
1556 LIST_HEAD(dispose); 1557 LIST_HEAD(dispose);
1557 1558
1558 if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1559 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1559 spin_lock(&inode->i_lock); 1560 spin_lock(&ctx->flc_lock);
1560 time_out_leases(file_inode(filp), &dispose); 1561 time_out_leases(file_inode(filp), &dispose);
1561 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1562 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1562 if (fl->fl_file != filp) 1563 if (fl->fl_file != filp)
@@ -1564,7 +1565,7 @@ int fcntl_getlease(struct file *filp)
1564 type = target_leasetype(fl); 1565 type = target_leasetype(fl);
1565 break; 1566 break;
1566 } 1567 }
1567 spin_unlock(&inode->i_lock); 1568 spin_unlock(&ctx->flc_lock);
1568 locks_dispose_list(&dispose); 1569 locks_dispose_list(&dispose);
1569 } 1570 }
1570 return type; 1571 return type;
@@ -1632,7 +1633,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1632 return -EINVAL; 1633 return -EINVAL;
1633 } 1634 }
1634 1635
1635 spin_lock(&inode->i_lock); 1636 spin_lock(&ctx->flc_lock);
1636 time_out_leases(inode, &dispose); 1637 time_out_leases(inode, &dispose);
1637 error = check_conflicting_open(dentry, arg); 1638 error = check_conflicting_open(dentry, arg);
1638 if (error) 1639 if (error)
@@ -1699,7 +1700,7 @@ out_setup:
1699 if (lease->fl_lmops->lm_setup) 1700 if (lease->fl_lmops->lm_setup)
1700 lease->fl_lmops->lm_setup(lease, priv); 1701 lease->fl_lmops->lm_setup(lease, priv);
1701out: 1702out:
1702 spin_unlock(&inode->i_lock); 1703 spin_unlock(&ctx->flc_lock);
1703 locks_dispose_list(&dispose); 1704 locks_dispose_list(&dispose);
1704 if (is_deleg) 1705 if (is_deleg)
1705 mutex_unlock(&inode->i_mutex); 1706 mutex_unlock(&inode->i_mutex);
@@ -1722,7 +1723,7 @@ static int generic_delete_lease(struct file *filp)
1722 return error; 1723 return error;
1723 } 1724 }
1724 1725
1725 spin_lock(&inode->i_lock); 1726 spin_lock(&ctx->flc_lock);
1726 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1727 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1727 if (fl->fl_file == filp) { 1728 if (fl->fl_file == filp) {
1728 victim = fl; 1729 victim = fl;
@@ -1732,7 +1733,7 @@ static int generic_delete_lease(struct file *filp)
1732 trace_generic_delete_lease(inode, fl); 1733 trace_generic_delete_lease(inode, fl);
1733 if (victim) 1734 if (victim)
1734 error = fl->fl_lmops->lm_change(&victim, F_UNLCK, &dispose); 1735 error = fl->fl_lmops->lm_change(&victim, F_UNLCK, &dispose);
1735 spin_unlock(&inode->i_lock); 1736 spin_unlock(&ctx->flc_lock);
1736 locks_dispose_list(&dispose); 1737 locks_dispose_list(&dispose);
1737 return error; 1738 return error;
1738} 1739}
@@ -2423,10 +2424,10 @@ locks_remove_lease(struct file *filp)
2423 if (!ctx || list_empty(&ctx->flc_lease)) 2424 if (!ctx || list_empty(&ctx->flc_lease))
2424 return; 2425 return;
2425 2426
2426 spin_lock(&inode->i_lock); 2427 spin_lock(&ctx->flc_lock);
2427 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) 2428 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2428 lease_modify(&fl, F_UNLCK, &dispose); 2429 lease_modify(&fl, F_UNLCK, &dispose);
2429 spin_unlock(&inode->i_lock); 2430 spin_unlock(&ctx->flc_lock);
2430 locks_dispose_list(&dispose); 2431 locks_dispose_list(&dispose);
2431} 2432}
2432 2433
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 3fb1caa3874d..8cdb2b28a104 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -93,22 +93,22 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
93 goto out; 93 goto out;
94 94
95 list = &flctx->flc_posix; 95 list = &flctx->flc_posix;
96 spin_lock(&inode->i_lock); 96 spin_lock(&flctx->flc_lock);
97restart: 97restart:
98 list_for_each_entry(fl, list, fl_list) { 98 list_for_each_entry(fl, list, fl_list) {
99 if (nfs_file_open_context(fl->fl_file) != ctx) 99 if (nfs_file_open_context(fl->fl_file) != ctx)
100 continue; 100 continue;
101 spin_unlock(&inode->i_lock); 101 spin_unlock(&flctx->flc_lock);
102 status = nfs4_lock_delegation_recall(fl, state, stateid); 102 status = nfs4_lock_delegation_recall(fl, state, stateid);
103 if (status < 0) 103 if (status < 0)
104 goto out; 104 goto out;
105 spin_lock(&inode->i_lock); 105 spin_lock(&flctx->flc_lock);
106 } 106 }
107 if (list == &flctx->flc_posix) { 107 if (list == &flctx->flc_posix) {
108 list = &flctx->flc_flock; 108 list = &flctx->flc_flock;
109 goto restart; 109 goto restart;
110 } 110 }
111 spin_unlock(&inode->i_lock); 111 spin_unlock(&flctx->flc_lock);
112out: 112out:
113 return status; 113 return status;
114} 114}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6084c267f3a0..a3bb22ab68c5 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1376,12 +1376,12 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
1376 1376
1377 /* Guard against delegation returns and new lock/unlock calls */ 1377 /* Guard against delegation returns and new lock/unlock calls */
1378 down_write(&nfsi->rwsem); 1378 down_write(&nfsi->rwsem);
1379 spin_lock(&inode->i_lock); 1379 spin_lock(&flctx->flc_lock);
1380restart: 1380restart:
1381 list_for_each_entry(fl, list, fl_list) { 1381 list_for_each_entry(fl, list, fl_list) {
1382 if (nfs_file_open_context(fl->fl_file)->state != state) 1382 if (nfs_file_open_context(fl->fl_file)->state != state)
1383 continue; 1383 continue;
1384 spin_unlock(&inode->i_lock); 1384 spin_unlock(&flctx->flc_lock);
1385 status = ops->recover_lock(state, fl); 1385 status = ops->recover_lock(state, fl);
1386 switch (status) { 1386 switch (status) {
1387 case 0: 1387 case 0:
@@ -1408,13 +1408,13 @@ restart:
1408 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1408 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1409 status = 0; 1409 status = 0;
1410 } 1410 }
1411 spin_lock(&inode->i_lock); 1411 spin_lock(&flctx->flc_lock);
1412 } 1412 }
1413 if (list == &flctx->flc_posix) { 1413 if (list == &flctx->flc_posix) {
1414 list = &flctx->flc_flock; 1414 list = &flctx->flc_flock;
1415 goto restart; 1415 goto restart;
1416 } 1416 }
1417 spin_unlock(&inode->i_lock); 1417 spin_unlock(&flctx->flc_lock);
1418out: 1418out:
1419 up_write(&nfsi->rwsem); 1419 up_write(&nfsi->rwsem);
1420 return status; 1420 return status;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 784c13485b3f..4ae66f416eb9 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1206,7 +1206,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
1206 1206
1207 /* Check to see if there are whole file write locks */ 1207 /* Check to see if there are whole file write locks */
1208 ret = 0; 1208 ret = 0;
1209 spin_lock(&inode->i_lock); 1209 spin_lock(&flctx->flc_lock);
1210 if (!list_empty(&flctx->flc_posix)) { 1210 if (!list_empty(&flctx->flc_posix)) {
1211 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1211 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1212 fl_list); 1212 fl_list);
@@ -1218,7 +1218,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
1218 if (fl->fl_type == F_WRLCK) 1218 if (fl->fl_type == F_WRLCK)
1219 ret = 1; 1219 ret = 1;
1220 } 1220 }
1221 spin_unlock(&inode->i_lock); 1221 spin_unlock(&flctx->flc_lock);
1222 return ret; 1222 return ret;
1223} 1223}
1224 1224
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index fad821991369..80242f5bd621 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -5572,14 +5572,14 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5572 flctx = inode->i_flctx; 5572 flctx = inode->i_flctx;
5573 5573
5574 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 5574 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
5575 spin_lock(&inode->i_lock); 5575 spin_lock(&flctx->flc_lock);
5576 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 5576 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
5577 if (fl->fl_owner == (fl_owner_t)lowner) { 5577 if (fl->fl_owner == (fl_owner_t)lowner) {
5578 status = true; 5578 status = true;
5579 break; 5579 break;
5580 } 5580 }
5581 } 5581 }
5582 spin_unlock(&inode->i_lock); 5582 spin_unlock(&flctx->flc_lock);
5583 } 5583 }
5584 fput(filp); 5584 fput(filp);
5585 return status; 5585 return status;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ce0873af0b97..32eafa9b5c9f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -968,6 +968,7 @@ struct file_lock {
968}; 968};
969 969
970struct file_lock_context { 970struct file_lock_context {
971 spinlock_t flc_lock;
971 struct list_head flc_flock; 972 struct list_head flc_flock;
972 struct list_head flc_posix; 973 struct list_head flc_posix;
973 struct list_head flc_lease; 974 struct list_head flc_lease;