diff options
author | Eric Sandeen <sandeen@sandeen.net> | 2007-10-11 03:42:32 -0400 |
---|---|---|
committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-02-07 00:47:15 -0500 |
commit | 3685c2a1d773781608c9e281a6ff6b4c8ea8f6f9 (patch) | |
tree | f3d3a45002e64c8204de0db66bad2db1b73b5844 /fs/xfs | |
parent | ba74d0cba51dcaa99e4dc2e4fb62e6e13abbf703 (diff) |
[XFS] Unwrap XFS_SB_LOCK.
Un-obfuscate XFS_SB_LOCK, remove XFS_SB_LOCK->mutex_lock->spin_lock
macros, call spin_lock directly, remove extraneous cookie holdover from
old xfs code, and change lock type to spinlock_t.
SGI-PV: 970382
SGI-Modid: xfs-linux-melb:xfs-kern:29746a
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
Signed-off-by: Donald Douwsma <donaldd@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/quota/xfs_qm.c | 12 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm_syscalls.c | 19 | ||||
-rw-r--r-- | fs/xfs/xfs_attr_leaf.c | 8 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap.c | 7 | ||||
-rw-r--r-- | fs/xfs/xfs_fsops.c | 13 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.c | 57 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_qmops.c | 7 | ||||
-rw-r--r-- | fs/xfs/xfs_utils.c | 7 | ||||
-rw-r--r-- | fs/xfs/xfs_vfsops.c | 5 |
10 files changed, 60 insertions, 79 deletions
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index d488645f833d..08b44758035e 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -310,7 +310,6 @@ xfs_qm_mount_quotas( | |||
310 | xfs_mount_t *mp, | 310 | xfs_mount_t *mp, |
311 | int mfsi_flags) | 311 | int mfsi_flags) |
312 | { | 312 | { |
313 | unsigned long s; | ||
314 | int error = 0; | 313 | int error = 0; |
315 | uint sbf; | 314 | uint sbf; |
316 | 315 | ||
@@ -367,13 +366,13 @@ xfs_qm_mount_quotas( | |||
367 | 366 | ||
368 | write_changes: | 367 | write_changes: |
369 | /* | 368 | /* |
370 | * We actually don't have to acquire the SB_LOCK at all. | 369 | * We actually don't have to acquire the m_sb_lock at all. |
371 | * This can only be called from mount, and that's single threaded. XXX | 370 | * This can only be called from mount, and that's single threaded. XXX |
372 | */ | 371 | */ |
373 | s = XFS_SB_LOCK(mp); | 372 | spin_lock(&mp->m_sb_lock); |
374 | sbf = mp->m_sb.sb_qflags; | 373 | sbf = mp->m_sb.sb_qflags; |
375 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; | 374 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; |
376 | XFS_SB_UNLOCK(mp, s); | 375 | spin_unlock(&mp->m_sb_lock); |
377 | 376 | ||
378 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { | 377 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { |
379 | if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { | 378 | if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { |
@@ -1370,7 +1369,6 @@ xfs_qm_qino_alloc( | |||
1370 | { | 1369 | { |
1371 | xfs_trans_t *tp; | 1370 | xfs_trans_t *tp; |
1372 | int error; | 1371 | int error; |
1373 | unsigned long s; | ||
1374 | int committed; | 1372 | int committed; |
1375 | 1373 | ||
1376 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); | 1374 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); |
@@ -1402,7 +1400,7 @@ xfs_qm_qino_alloc( | |||
1402 | * sbfields arg may contain fields other than *QUOTINO; | 1400 | * sbfields arg may contain fields other than *QUOTINO; |
1403 | * VERSIONNUM for example. | 1401 | * VERSIONNUM for example. |
1404 | */ | 1402 | */ |
1405 | s = XFS_SB_LOCK(mp); | 1403 | spin_lock(&mp->m_sb_lock); |
1406 | if (flags & XFS_QMOPT_SBVERSION) { | 1404 | if (flags & XFS_QMOPT_SBVERSION) { |
1407 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) | 1405 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) |
1408 | unsigned oldv = mp->m_sb.sb_versionnum; | 1406 | unsigned oldv = mp->m_sb.sb_versionnum; |
@@ -1429,7 +1427,7 @@ xfs_qm_qino_alloc( | |||
1429 | mp->m_sb.sb_uquotino = (*ip)->i_ino; | 1427 | mp->m_sb.sb_uquotino = (*ip)->i_ino; |
1430 | else | 1428 | else |
1431 | mp->m_sb.sb_gquotino = (*ip)->i_ino; | 1429 | mp->m_sb.sb_gquotino = (*ip)->i_ino; |
1432 | XFS_SB_UNLOCK(mp, s); | 1430 | spin_unlock(&mp->m_sb_lock); |
1433 | xfs_mod_sb(tp, sbfields); | 1431 | xfs_mod_sb(tp, sbfields); |
1434 | 1432 | ||
1435 | if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { | 1433 | if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { |
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index ad5579d4eac4..2cc5886cfe85 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
@@ -200,7 +200,6 @@ xfs_qm_scall_quotaoff( | |||
200 | boolean_t force) | 200 | boolean_t force) |
201 | { | 201 | { |
202 | uint dqtype; | 202 | uint dqtype; |
203 | unsigned long s; | ||
204 | int error; | 203 | int error; |
205 | uint inactivate_flags; | 204 | uint inactivate_flags; |
206 | xfs_qoff_logitem_t *qoffstart; | 205 | xfs_qoff_logitem_t *qoffstart; |
@@ -237,9 +236,9 @@ xfs_qm_scall_quotaoff( | |||
237 | if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { | 236 | if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { |
238 | mp->m_qflags &= ~(flags); | 237 | mp->m_qflags &= ~(flags); |
239 | 238 | ||
240 | s = XFS_SB_LOCK(mp); | 239 | spin_lock(&mp->m_sb_lock); |
241 | mp->m_sb.sb_qflags = mp->m_qflags; | 240 | mp->m_sb.sb_qflags = mp->m_qflags; |
242 | XFS_SB_UNLOCK(mp, s); | 241 | spin_unlock(&mp->m_sb_lock); |
243 | mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); | 242 | mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); |
244 | 243 | ||
245 | /* XXX what to do if error ? Revert back to old vals incore ? */ | 244 | /* XXX what to do if error ? Revert back to old vals incore ? */ |
@@ -415,7 +414,6 @@ xfs_qm_scall_quotaon( | |||
415 | uint flags) | 414 | uint flags) |
416 | { | 415 | { |
417 | int error; | 416 | int error; |
418 | unsigned long s; | ||
419 | uint qf; | 417 | uint qf; |
420 | uint accflags; | 418 | uint accflags; |
421 | __int64_t sbflags; | 419 | __int64_t sbflags; |
@@ -468,10 +466,10 @@ xfs_qm_scall_quotaon( | |||
468 | * Change sb_qflags on disk but not incore mp->qflags | 466 | * Change sb_qflags on disk but not incore mp->qflags |
469 | * if this is the root filesystem. | 467 | * if this is the root filesystem. |
470 | */ | 468 | */ |
471 | s = XFS_SB_LOCK(mp); | 469 | spin_lock(&mp->m_sb_lock); |
472 | qf = mp->m_sb.sb_qflags; | 470 | qf = mp->m_sb.sb_qflags; |
473 | mp->m_sb.sb_qflags = qf | flags; | 471 | mp->m_sb.sb_qflags = qf | flags; |
474 | XFS_SB_UNLOCK(mp, s); | 472 | spin_unlock(&mp->m_sb_lock); |
475 | 473 | ||
476 | /* | 474 | /* |
477 | * There's nothing to change if it's the same. | 475 | * There's nothing to change if it's the same. |
@@ -815,7 +813,6 @@ xfs_qm_log_quotaoff( | |||
815 | { | 813 | { |
816 | xfs_trans_t *tp; | 814 | xfs_trans_t *tp; |
817 | int error; | 815 | int error; |
818 | unsigned long s; | ||
819 | xfs_qoff_logitem_t *qoffi=NULL; | 816 | xfs_qoff_logitem_t *qoffi=NULL; |
820 | uint oldsbqflag=0; | 817 | uint oldsbqflag=0; |
821 | 818 | ||
@@ -832,10 +829,10 @@ xfs_qm_log_quotaoff( | |||
832 | qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); | 829 | qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); |
833 | xfs_trans_log_quotaoff_item(tp, qoffi); | 830 | xfs_trans_log_quotaoff_item(tp, qoffi); |
834 | 831 | ||
835 | s = XFS_SB_LOCK(mp); | 832 | spin_lock(&mp->m_sb_lock); |
836 | oldsbqflag = mp->m_sb.sb_qflags; | 833 | oldsbqflag = mp->m_sb.sb_qflags; |
837 | mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; | 834 | mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; |
838 | XFS_SB_UNLOCK(mp, s); | 835 | spin_unlock(&mp->m_sb_lock); |
839 | 836 | ||
840 | xfs_mod_sb(tp, XFS_SB_QFLAGS); | 837 | xfs_mod_sb(tp, XFS_SB_QFLAGS); |
841 | 838 | ||
@@ -854,9 +851,9 @@ error0: | |||
854 | * No one else is modifying sb_qflags, so this is OK. | 851 | * No one else is modifying sb_qflags, so this is OK. |
855 | * We still hold the quotaofflock. | 852 | * We still hold the quotaofflock. |
856 | */ | 853 | */ |
857 | s = XFS_SB_LOCK(mp); | 854 | spin_lock(&mp->m_sb_lock); |
858 | mp->m_sb.sb_qflags = oldsbqflag; | 855 | mp->m_sb.sb_qflags = oldsbqflag; |
859 | XFS_SB_UNLOCK(mp, s); | 856 | spin_unlock(&mp->m_sb_lock); |
860 | } | 857 | } |
861 | *qoffstartp = qoffi; | 858 | *qoffstartp = qoffi; |
862 | return (error); | 859 | return (error); |
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 81f45dae1c57..eb3815ebb7aa 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c | |||
@@ -226,17 +226,15 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) | |||
226 | STATIC void | 226 | STATIC void |
227 | xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp) | 227 | xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp) |
228 | { | 228 | { |
229 | unsigned long s; | ||
230 | |||
231 | if ((mp->m_flags & XFS_MOUNT_ATTR2) && | 229 | if ((mp->m_flags & XFS_MOUNT_ATTR2) && |
232 | !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) { | 230 | !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) { |
233 | s = XFS_SB_LOCK(mp); | 231 | spin_lock(&mp->m_sb_lock); |
234 | if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { | 232 | if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { |
235 | XFS_SB_VERSION_ADDATTR2(&mp->m_sb); | 233 | XFS_SB_VERSION_ADDATTR2(&mp->m_sb); |
236 | XFS_SB_UNLOCK(mp, s); | 234 | spin_unlock(&mp->m_sb_lock); |
237 | xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); | 235 | xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); |
238 | } else | 236 | } else |
239 | XFS_SB_UNLOCK(mp, s); | 237 | spin_unlock(&mp->m_sb_lock); |
240 | } | 238 | } |
241 | } | 239 | } |
242 | 240 | ||
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 2e9b34b7344b..97f0328b5ac2 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -3956,7 +3956,6 @@ xfs_bmap_add_attrfork( | |||
3956 | xfs_bmap_free_t flist; /* freed extent records */ | 3956 | xfs_bmap_free_t flist; /* freed extent records */ |
3957 | xfs_mount_t *mp; /* mount structure */ | 3957 | xfs_mount_t *mp; /* mount structure */ |
3958 | xfs_trans_t *tp; /* transaction pointer */ | 3958 | xfs_trans_t *tp; /* transaction pointer */ |
3959 | unsigned long s; /* spinlock spl value */ | ||
3960 | int blks; /* space reservation */ | 3959 | int blks; /* space reservation */ |
3961 | int version = 1; /* superblock attr version */ | 3960 | int version = 1; /* superblock attr version */ |
3962 | int committed; /* xaction was committed */ | 3961 | int committed; /* xaction was committed */ |
@@ -4053,7 +4052,7 @@ xfs_bmap_add_attrfork( | |||
4053 | (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) { | 4052 | (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) { |
4054 | __int64_t sbfields = 0; | 4053 | __int64_t sbfields = 0; |
4055 | 4054 | ||
4056 | s = XFS_SB_LOCK(mp); | 4055 | spin_lock(&mp->m_sb_lock); |
4057 | if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { | 4056 | if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { |
4058 | XFS_SB_VERSION_ADDATTR(&mp->m_sb); | 4057 | XFS_SB_VERSION_ADDATTR(&mp->m_sb); |
4059 | sbfields |= XFS_SB_VERSIONNUM; | 4058 | sbfields |= XFS_SB_VERSIONNUM; |
@@ -4063,10 +4062,10 @@ xfs_bmap_add_attrfork( | |||
4063 | sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); | 4062 | sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); |
4064 | } | 4063 | } |
4065 | if (sbfields) { | 4064 | if (sbfields) { |
4066 | XFS_SB_UNLOCK(mp, s); | 4065 | spin_unlock(&mp->m_sb_lock); |
4067 | xfs_mod_sb(tp, sbfields); | 4066 | xfs_mod_sb(tp, sbfields); |
4068 | } else | 4067 | } else |
4069 | XFS_SB_UNLOCK(mp, s); | 4068 | spin_unlock(&mp->m_sb_lock); |
4070 | } | 4069 | } |
4071 | if ((error = xfs_bmap_finish(&tp, &flist, &committed))) | 4070 | if ((error = xfs_bmap_finish(&tp, &flist, &committed))) |
4072 | goto error2; | 4071 | goto error2; |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index c92d5b821029..b8de7f3cc17e 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -462,15 +462,13 @@ xfs_fs_counts( | |||
462 | xfs_mount_t *mp, | 462 | xfs_mount_t *mp, |
463 | xfs_fsop_counts_t *cnt) | 463 | xfs_fsop_counts_t *cnt) |
464 | { | 464 | { |
465 | unsigned long s; | ||
466 | |||
467 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); | 465 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); |
468 | s = XFS_SB_LOCK(mp); | 466 | spin_lock(&mp->m_sb_lock); |
469 | cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | 467 | cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
470 | cnt->freertx = mp->m_sb.sb_frextents; | 468 | cnt->freertx = mp->m_sb.sb_frextents; |
471 | cnt->freeino = mp->m_sb.sb_ifree; | 469 | cnt->freeino = mp->m_sb.sb_ifree; |
472 | cnt->allocino = mp->m_sb.sb_icount; | 470 | cnt->allocino = mp->m_sb.sb_icount; |
473 | XFS_SB_UNLOCK(mp, s); | 471 | spin_unlock(&mp->m_sb_lock); |
474 | return 0; | 472 | return 0; |
475 | } | 473 | } |
476 | 474 | ||
@@ -497,7 +495,6 @@ xfs_reserve_blocks( | |||
497 | { | 495 | { |
498 | __int64_t lcounter, delta, fdblks_delta; | 496 | __int64_t lcounter, delta, fdblks_delta; |
499 | __uint64_t request; | 497 | __uint64_t request; |
500 | unsigned long s; | ||
501 | 498 | ||
502 | /* If inval is null, report current values and return */ | 499 | /* If inval is null, report current values and return */ |
503 | if (inval == (__uint64_t *)NULL) { | 500 | if (inval == (__uint64_t *)NULL) { |
@@ -515,7 +512,7 @@ xfs_reserve_blocks( | |||
515 | * problem. we needto work out if we are freeing or allocation | 512 | * problem. we needto work out if we are freeing or allocation |
516 | * blocks first, then we can do the modification as necessary. | 513 | * blocks first, then we can do the modification as necessary. |
517 | * | 514 | * |
518 | * We do this under the XFS_SB_LOCK so that if we are near | 515 | * We do this under the m_sb_lock so that if we are near |
519 | * ENOSPC, we will hold out any changes while we work out | 516 | * ENOSPC, we will hold out any changes while we work out |
520 | * what to do. This means that the amount of free space can | 517 | * what to do. This means that the amount of free space can |
521 | * change while we do this, so we need to retry if we end up | 518 | * change while we do this, so we need to retry if we end up |
@@ -526,7 +523,7 @@ xfs_reserve_blocks( | |||
526 | * enabled, disabled or even compiled in.... | 523 | * enabled, disabled or even compiled in.... |
527 | */ | 524 | */ |
528 | retry: | 525 | retry: |
529 | s = XFS_SB_LOCK(mp); | 526 | spin_lock(&mp->m_sb_lock); |
530 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); | 527 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); |
531 | 528 | ||
532 | /* | 529 | /* |
@@ -569,7 +566,7 @@ out: | |||
569 | outval->resblks = mp->m_resblks; | 566 | outval->resblks = mp->m_resblks; |
570 | outval->resblks_avail = mp->m_resblks_avail; | 567 | outval->resblks_avail = mp->m_resblks_avail; |
571 | } | 568 | } |
572 | XFS_SB_UNLOCK(mp, s); | 569 | spin_unlock(&mp->m_sb_lock); |
573 | 570 | ||
574 | if (fdblks_delta) { | 571 | if (fdblks_delta) { |
575 | /* | 572 | /* |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 9a80b5581844..6bc92c85733f 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -696,7 +696,6 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) | |||
696 | uint64_t bfreelst = 0; | 696 | uint64_t bfreelst = 0; |
697 | uint64_t btree = 0; | 697 | uint64_t btree = 0; |
698 | int error; | 698 | int error; |
699 | int s; | ||
700 | 699 | ||
701 | for (index = 0; index < agcount; index++) { | 700 | for (index = 0; index < agcount; index++) { |
702 | /* | 701 | /* |
@@ -721,11 +720,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) | |||
721 | /* | 720 | /* |
722 | * Overwrite incore superblock counters with just-read data | 721 | * Overwrite incore superblock counters with just-read data |
723 | */ | 722 | */ |
724 | s = XFS_SB_LOCK(mp); | 723 | spin_lock(&mp->m_sb_lock); |
725 | sbp->sb_ifree = ifree; | 724 | sbp->sb_ifree = ifree; |
726 | sbp->sb_icount = ialloc; | 725 | sbp->sb_icount = ialloc; |
727 | sbp->sb_fdblocks = bfree + bfreelst + btree; | 726 | sbp->sb_fdblocks = bfree + bfreelst + btree; |
728 | XFS_SB_UNLOCK(mp, s); | 727 | spin_unlock(&mp->m_sb_lock); |
729 | 728 | ||
730 | /* Fixup the per-cpu counters as well. */ | 729 | /* Fixup the per-cpu counters as well. */ |
731 | xfs_icsb_reinit_counters(mp); | 730 | xfs_icsb_reinit_counters(mp); |
@@ -1440,7 +1439,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) | |||
1440 | * Fields are not allowed to dip below zero, so if the delta would | 1439 | * Fields are not allowed to dip below zero, so if the delta would |
1441 | * do this do not apply it and return EINVAL. | 1440 | * do this do not apply it and return EINVAL. |
1442 | * | 1441 | * |
1443 | * The SB_LOCK must be held when this routine is called. | 1442 | * The m_sb_lock must be held when this routine is called. |
1444 | */ | 1443 | */ |
1445 | int | 1444 | int |
1446 | xfs_mod_incore_sb_unlocked( | 1445 | xfs_mod_incore_sb_unlocked( |
@@ -1605,7 +1604,7 @@ xfs_mod_incore_sb_unlocked( | |||
1605 | /* | 1604 | /* |
1606 | * xfs_mod_incore_sb() is used to change a field in the in-core | 1605 | * xfs_mod_incore_sb() is used to change a field in the in-core |
1607 | * superblock structure by the specified delta. This modification | 1606 | * superblock structure by the specified delta. This modification |
1608 | * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() | 1607 | * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() |
1609 | * routine to do the work. | 1608 | * routine to do the work. |
1610 | */ | 1609 | */ |
1611 | int | 1610 | int |
@@ -1615,7 +1614,6 @@ xfs_mod_incore_sb( | |||
1615 | int64_t delta, | 1614 | int64_t delta, |
1616 | int rsvd) | 1615 | int rsvd) |
1617 | { | 1616 | { |
1618 | unsigned long s; | ||
1619 | int status; | 1617 | int status; |
1620 | 1618 | ||
1621 | /* check for per-cpu counters */ | 1619 | /* check for per-cpu counters */ |
@@ -1632,9 +1630,9 @@ xfs_mod_incore_sb( | |||
1632 | /* FALLTHROUGH */ | 1630 | /* FALLTHROUGH */ |
1633 | #endif | 1631 | #endif |
1634 | default: | 1632 | default: |
1635 | s = XFS_SB_LOCK(mp); | 1633 | spin_lock(&mp->m_sb_lock); |
1636 | status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); | 1634 | status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
1637 | XFS_SB_UNLOCK(mp, s); | 1635 | spin_unlock(&mp->m_sb_lock); |
1638 | break; | 1636 | break; |
1639 | } | 1637 | } |
1640 | 1638 | ||
@@ -1655,7 +1653,6 @@ xfs_mod_incore_sb( | |||
1655 | int | 1653 | int |
1656 | xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | 1654 | xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) |
1657 | { | 1655 | { |
1658 | unsigned long s; | ||
1659 | int status=0; | 1656 | int status=0; |
1660 | xfs_mod_sb_t *msbp; | 1657 | xfs_mod_sb_t *msbp; |
1661 | 1658 | ||
@@ -1663,10 +1660,10 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | |||
1663 | * Loop through the array of mod structures and apply each | 1660 | * Loop through the array of mod structures and apply each |
1664 | * individually. If any fail, then back out all those | 1661 | * individually. If any fail, then back out all those |
1665 | * which have already been applied. Do all of this within | 1662 | * which have already been applied. Do all of this within |
1666 | * the scope of the SB_LOCK so that all of the changes will | 1663 | * the scope of the m_sb_lock so that all of the changes will |
1667 | * be atomic. | 1664 | * be atomic. |
1668 | */ | 1665 | */ |
1669 | s = XFS_SB_LOCK(mp); | 1666 | spin_lock(&mp->m_sb_lock); |
1670 | msbp = &msb[0]; | 1667 | msbp = &msb[0]; |
1671 | for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { | 1668 | for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { |
1672 | /* | 1669 | /* |
@@ -1680,11 +1677,11 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | |||
1680 | case XFS_SBS_IFREE: | 1677 | case XFS_SBS_IFREE: |
1681 | case XFS_SBS_FDBLOCKS: | 1678 | case XFS_SBS_FDBLOCKS: |
1682 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { | 1679 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { |
1683 | XFS_SB_UNLOCK(mp, s); | 1680 | spin_unlock(&mp->m_sb_lock); |
1684 | status = xfs_icsb_modify_counters(mp, | 1681 | status = xfs_icsb_modify_counters(mp, |
1685 | msbp->msb_field, | 1682 | msbp->msb_field, |
1686 | msbp->msb_delta, rsvd); | 1683 | msbp->msb_delta, rsvd); |
1687 | s = XFS_SB_LOCK(mp); | 1684 | spin_lock(&mp->m_sb_lock); |
1688 | break; | 1685 | break; |
1689 | } | 1686 | } |
1690 | /* FALLTHROUGH */ | 1687 | /* FALLTHROUGH */ |
@@ -1718,12 +1715,12 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | |||
1718 | case XFS_SBS_IFREE: | 1715 | case XFS_SBS_IFREE: |
1719 | case XFS_SBS_FDBLOCKS: | 1716 | case XFS_SBS_FDBLOCKS: |
1720 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { | 1717 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { |
1721 | XFS_SB_UNLOCK(mp, s); | 1718 | spin_unlock(&mp->m_sb_lock); |
1722 | status = xfs_icsb_modify_counters(mp, | 1719 | status = xfs_icsb_modify_counters(mp, |
1723 | msbp->msb_field, | 1720 | msbp->msb_field, |
1724 | -(msbp->msb_delta), | 1721 | -(msbp->msb_delta), |
1725 | rsvd); | 1722 | rsvd); |
1726 | s = XFS_SB_LOCK(mp); | 1723 | spin_lock(&mp->m_sb_lock); |
1727 | break; | 1724 | break; |
1728 | } | 1725 | } |
1729 | /* FALLTHROUGH */ | 1726 | /* FALLTHROUGH */ |
@@ -1739,7 +1736,7 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | |||
1739 | msbp--; | 1736 | msbp--; |
1740 | } | 1737 | } |
1741 | } | 1738 | } |
1742 | XFS_SB_UNLOCK(mp, s); | 1739 | spin_unlock(&mp->m_sb_lock); |
1743 | return status; | 1740 | return status; |
1744 | } | 1741 | } |
1745 | 1742 | ||
@@ -1887,12 +1884,12 @@ xfs_mount_log_sbunit( | |||
1887 | * | 1884 | * |
1888 | * Locking rules: | 1885 | * Locking rules: |
1889 | * | 1886 | * |
1890 | * 1. XFS_SB_LOCK() before picking up per-cpu locks | 1887 | * 1. m_sb_lock before picking up per-cpu locks |
1891 | * 2. per-cpu locks always picked up via for_each_online_cpu() order | 1888 | * 2. per-cpu locks always picked up via for_each_online_cpu() order |
1892 | * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks | 1889 | * 3. accurate counter sync requires m_sb_lock + per cpu locks |
1893 | * 4. modifying per-cpu counters requires holding per-cpu lock | 1890 | * 4. modifying per-cpu counters requires holding per-cpu lock |
1894 | * 5. modifying global counters requires holding XFS_SB_LOCK | 1891 | * 5. modifying global counters requires holding m_sb_lock |
1895 | * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK | 1892 | * 6. enabling or disabling a counter requires holding the m_sb_lock |
1896 | * and _none_ of the per-cpu locks. | 1893 | * and _none_ of the per-cpu locks. |
1897 | * | 1894 | * |
1898 | * Disabled counters are only ever re-enabled by a balance operation | 1895 | * Disabled counters are only ever re-enabled by a balance operation |
@@ -1945,7 +1942,7 @@ xfs_icsb_cpu_notify( | |||
1945 | * count into the total on the global superblock and | 1942 | * count into the total on the global superblock and |
1946 | * re-enable the counters. */ | 1943 | * re-enable the counters. */ |
1947 | xfs_icsb_lock(mp); | 1944 | xfs_icsb_lock(mp); |
1948 | s = XFS_SB_LOCK(mp); | 1945 | spin_lock(&mp->m_sb_lock); |
1949 | xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); | 1946 | xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); |
1950 | xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); | 1947 | xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); |
1951 | xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); | 1948 | xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); |
@@ -1962,7 +1959,7 @@ xfs_icsb_cpu_notify( | |||
1962 | XFS_ICSB_SB_LOCKED, 0); | 1959 | XFS_ICSB_SB_LOCKED, 0); |
1963 | xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, | 1960 | xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, |
1964 | XFS_ICSB_SB_LOCKED, 0); | 1961 | XFS_ICSB_SB_LOCKED, 0); |
1965 | XFS_SB_UNLOCK(mp, s); | 1962 | spin_unlock(&mp->m_sb_lock); |
1966 | xfs_icsb_unlock(mp); | 1963 | xfs_icsb_unlock(mp); |
1967 | break; | 1964 | break; |
1968 | } | 1965 | } |
@@ -2197,7 +2194,7 @@ xfs_icsb_sync_counters_flags( | |||
2197 | 2194 | ||
2198 | /* Pass 1: lock all counters */ | 2195 | /* Pass 1: lock all counters */ |
2199 | if ((flags & XFS_ICSB_SB_LOCKED) == 0) | 2196 | if ((flags & XFS_ICSB_SB_LOCKED) == 0) |
2200 | s = XFS_SB_LOCK(mp); | 2197 | spin_lock(&mp->m_sb_lock); |
2201 | 2198 | ||
2202 | xfs_icsb_count(mp, &cnt, flags); | 2199 | xfs_icsb_count(mp, &cnt, flags); |
2203 | 2200 | ||
@@ -2210,7 +2207,7 @@ xfs_icsb_sync_counters_flags( | |||
2210 | mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; | 2207 | mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; |
2211 | 2208 | ||
2212 | if ((flags & XFS_ICSB_SB_LOCKED) == 0) | 2209 | if ((flags & XFS_ICSB_SB_LOCKED) == 0) |
2213 | XFS_SB_UNLOCK(mp, s); | 2210 | spin_unlock(&mp->m_sb_lock); |
2214 | } | 2211 | } |
2215 | 2212 | ||
2216 | /* | 2213 | /* |
@@ -2255,7 +2252,7 @@ xfs_icsb_balance_counter( | |||
2255 | uint64_t min = (uint64_t)min_per_cpu; | 2252 | uint64_t min = (uint64_t)min_per_cpu; |
2256 | 2253 | ||
2257 | if (!(flags & XFS_ICSB_SB_LOCKED)) | 2254 | if (!(flags & XFS_ICSB_SB_LOCKED)) |
2258 | s = XFS_SB_LOCK(mp); | 2255 | spin_lock(&mp->m_sb_lock); |
2259 | 2256 | ||
2260 | /* disable counter and sync counter */ | 2257 | /* disable counter and sync counter */ |
2261 | xfs_icsb_disable_counter(mp, field); | 2258 | xfs_icsb_disable_counter(mp, field); |
@@ -2289,7 +2286,7 @@ xfs_icsb_balance_counter( | |||
2289 | xfs_icsb_enable_counter(mp, field, count, resid); | 2286 | xfs_icsb_enable_counter(mp, field, count, resid); |
2290 | out: | 2287 | out: |
2291 | if (!(flags & XFS_ICSB_SB_LOCKED)) | 2288 | if (!(flags & XFS_ICSB_SB_LOCKED)) |
2292 | XFS_SB_UNLOCK(mp, s); | 2289 | spin_unlock(&mp->m_sb_lock); |
2293 | } | 2290 | } |
2294 | 2291 | ||
2295 | int | 2292 | int |
@@ -2379,15 +2376,15 @@ slow_path: | |||
2379 | * running atomically here, we know a rebalance cannot | 2376 | * running atomically here, we know a rebalance cannot |
2380 | * be in progress. Hence we can go straight to operating | 2377 | * be in progress. Hence we can go straight to operating |
2381 | * on the global superblock. We do not call xfs_mod_incore_sb() | 2378 | * on the global superblock. We do not call xfs_mod_incore_sb() |
2382 | * here even though we need to get the SB_LOCK. Doing so | 2379 | * here even though we need to get the m_sb_lock. Doing so |
2383 | * will cause us to re-enter this function and deadlock. | 2380 | * will cause us to re-enter this function and deadlock. |
2384 | * Hence we get the SB_LOCK ourselves and then call | 2381 | * Hence we get the m_sb_lock ourselves and then call |
2385 | * xfs_mod_incore_sb_unlocked() as the unlocked path operates | 2382 | * xfs_mod_incore_sb_unlocked() as the unlocked path operates |
2386 | * directly on the global counters. | 2383 | * directly on the global counters. |
2387 | */ | 2384 | */ |
2388 | s = XFS_SB_LOCK(mp); | 2385 | spin_lock(&mp->m_sb_lock); |
2389 | ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); | 2386 | ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
2390 | XFS_SB_UNLOCK(mp, s); | 2387 | spin_unlock(&mp->m_sb_lock); |
2391 | 2388 | ||
2392 | /* | 2389 | /* |
2393 | * Now that we've modified the global superblock, we | 2390 | * Now that we've modified the global superblock, we |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 723f01f59cb8..31035b76a26f 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -227,7 +227,7 @@ typedef struct xfs_mount { | |||
227 | xfs_ail_entry_t m_ail; /* fs active log item list */ | 227 | xfs_ail_entry_t m_ail; /* fs active log item list */ |
228 | uint m_ail_gen; /* fs AIL generation count */ | 228 | uint m_ail_gen; /* fs AIL generation count */ |
229 | xfs_sb_t m_sb; /* copy of fs superblock */ | 229 | xfs_sb_t m_sb; /* copy of fs superblock */ |
230 | lock_t m_sb_lock; /* sb counter mutex */ | 230 | spinlock_t m_sb_lock; /* sb counter lock */ |
231 | struct xfs_buf *m_sb_bp; /* buffer for superblock */ | 231 | struct xfs_buf *m_sb_bp; /* buffer for superblock */ |
232 | char *m_fsname; /* filesystem name */ | 232 | char *m_fsname; /* filesystem name */ |
233 | int m_fsname_len; /* strlen of fs name */ | 233 | int m_fsname_len; /* strlen of fs name */ |
@@ -503,8 +503,6 @@ typedef struct xfs_mod_sb { | |||
503 | 503 | ||
504 | #define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock)) | 504 | #define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock)) |
505 | #define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) | 505 | #define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) |
506 | #define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock) | ||
507 | #define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s)) | ||
508 | 506 | ||
509 | extern xfs_mount_t *xfs_mount_init(void); | 507 | extern xfs_mount_t *xfs_mount_init(void); |
510 | extern void xfs_mod_sb(xfs_trans_t *, __int64_t); | 508 | extern void xfs_mod_sb(xfs_trans_t *, __int64_t); |
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c index 2ec1d8a27352..a294e58db8dd 100644 --- a/fs/xfs/xfs_qmops.c +++ b/fs/xfs/xfs_qmops.c | |||
@@ -49,18 +49,17 @@ xfs_mount_reset_sbqflags(xfs_mount_t *mp) | |||
49 | { | 49 | { |
50 | int error; | 50 | int error; |
51 | xfs_trans_t *tp; | 51 | xfs_trans_t *tp; |
52 | unsigned long s; | ||
53 | 52 | ||
54 | mp->m_qflags = 0; | 53 | mp->m_qflags = 0; |
55 | /* | 54 | /* |
56 | * It is OK to look at sb_qflags here in mount path, | 55 | * It is OK to look at sb_qflags here in mount path, |
57 | * without SB_LOCK. | 56 | * without m_sb_lock. |
58 | */ | 57 | */ |
59 | if (mp->m_sb.sb_qflags == 0) | 58 | if (mp->m_sb.sb_qflags == 0) |
60 | return 0; | 59 | return 0; |
61 | s = XFS_SB_LOCK(mp); | 60 | spin_lock(&mp->m_sb_lock); |
62 | mp->m_sb.sb_qflags = 0; | 61 | mp->m_sb.sb_qflags = 0; |
63 | XFS_SB_UNLOCK(mp, s); | 62 | spin_unlock(&mp->m_sb_lock); |
64 | 63 | ||
65 | /* | 64 | /* |
66 | * if the fs is readonly, let the incore superblock run | 65 | * if the fs is readonly, let the incore superblock run |
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index a02b944c6c7b..4df466fa3a40 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c | |||
@@ -330,7 +330,6 @@ xfs_bump_ino_vers2( | |||
330 | xfs_inode_t *ip) | 330 | xfs_inode_t *ip) |
331 | { | 331 | { |
332 | xfs_mount_t *mp; | 332 | xfs_mount_t *mp; |
333 | unsigned long s; | ||
334 | 333 | ||
335 | ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); | 334 | ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); |
336 | ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); | 335 | ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); |
@@ -340,13 +339,13 @@ xfs_bump_ino_vers2( | |||
340 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); | 339 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); |
341 | mp = tp->t_mountp; | 340 | mp = tp->t_mountp; |
342 | if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { | 341 | if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { |
343 | s = XFS_SB_LOCK(mp); | 342 | spin_lock(&mp->m_sb_lock); |
344 | if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { | 343 | if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { |
345 | XFS_SB_VERSION_ADDNLINK(&mp->m_sb); | 344 | XFS_SB_VERSION_ADDNLINK(&mp->m_sb); |
346 | XFS_SB_UNLOCK(mp, s); | 345 | spin_unlock(&mp->m_sb_lock); |
347 | xfs_mod_sb(tp, XFS_SB_VERSIONNUM); | 346 | xfs_mod_sb(tp, XFS_SB_VERSIONNUM); |
348 | } else { | 347 | } else { |
349 | XFS_SB_UNLOCK(mp, s); | 348 | spin_unlock(&mp->m_sb_lock); |
350 | } | 349 | } |
351 | } | 350 | } |
352 | /* Caller must log the inode */ | 351 | /* Caller must log the inode */ |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index f90d95582047..44f0216c03e9 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -854,14 +854,13 @@ xfs_statvfs( | |||
854 | __uint64_t fakeinos; | 854 | __uint64_t fakeinos; |
855 | xfs_extlen_t lsize; | 855 | xfs_extlen_t lsize; |
856 | xfs_sb_t *sbp; | 856 | xfs_sb_t *sbp; |
857 | unsigned long s; | ||
858 | 857 | ||
859 | sbp = &(mp->m_sb); | 858 | sbp = &(mp->m_sb); |
860 | 859 | ||
861 | statp->f_type = XFS_SB_MAGIC; | 860 | statp->f_type = XFS_SB_MAGIC; |
862 | 861 | ||
863 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); | 862 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); |
864 | s = XFS_SB_LOCK(mp); | 863 | spin_lock(&mp->m_sb_lock); |
865 | statp->f_bsize = sbp->sb_blocksize; | 864 | statp->f_bsize = sbp->sb_blocksize; |
866 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; | 865 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; |
867 | statp->f_blocks = sbp->sb_dblocks - lsize; | 866 | statp->f_blocks = sbp->sb_dblocks - lsize; |
@@ -881,7 +880,7 @@ xfs_statvfs( | |||
881 | statp->f_files, | 880 | statp->f_files, |
882 | mp->m_maxicount); | 881 | mp->m_maxicount); |
883 | statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); | 882 | statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); |
884 | XFS_SB_UNLOCK(mp, s); | 883 | spin_unlock(&mp->m_sb_lock); |
885 | 884 | ||
886 | xfs_statvfs_fsid(statp, mp); | 885 | xfs_statvfs_fsid(statp, mp); |
887 | statp->f_namelen = MAXNAMELEN - 1; | 886 | statp->f_namelen = MAXNAMELEN - 1; |