aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c268
1 files changed, 202 insertions, 66 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 8b6c9e807efb..e79b56b4bca6 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -44,6 +44,8 @@
44#include "xfs_quota.h" 44#include "xfs_quota.h"
45#include "xfs_fsops.h" 45#include "xfs_fsops.h"
46#include "xfs_utils.h" 46#include "xfs_utils.h"
47#include "xfs_trace.h"
48
47 49
48STATIC void xfs_unmountfs_wait(xfs_mount_t *); 50STATIC void xfs_unmountfs_wait(xfs_mount_t *);
49 51
@@ -199,6 +201,38 @@ xfs_uuid_unmount(
199 201
200 202
201/* 203/*
204 * Reference counting access wrappers to the perag structures.
205 */
206struct xfs_perag *
207xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
208{
209 struct xfs_perag *pag;
210 int ref = 0;
211
212 spin_lock(&mp->m_perag_lock);
213 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
214 if (pag) {
215 ASSERT(atomic_read(&pag->pag_ref) >= 0);
216 /* catch leaks in the positive direction during testing */
217 ASSERT(atomic_read(&pag->pag_ref) < 1000);
218 ref = atomic_inc_return(&pag->pag_ref);
219 }
220 spin_unlock(&mp->m_perag_lock);
221 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
222 return pag;
223}
224
225void
226xfs_perag_put(struct xfs_perag *pag)
227{
228 int ref;
229
230 ASSERT(atomic_read(&pag->pag_ref) > 0);
231 ref = atomic_dec_return(&pag->pag_ref);
232 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
233}
234
235/*
202 * Free up the resources associated with a mount structure. Assume that 236 * Free up the resources associated with a mount structure. Assume that
203 * the structure was initially zeroed, so we can tell which fields got 237 * the structure was initially zeroed, so we can tell which fields got
204 * initialized. 238 * initialized.
@@ -207,13 +241,16 @@ STATIC void
207xfs_free_perag( 241xfs_free_perag(
208 xfs_mount_t *mp) 242 xfs_mount_t *mp)
209{ 243{
210 if (mp->m_perag) { 244 xfs_agnumber_t agno;
211 int agno; 245 struct xfs_perag *pag;
212 246
213 for (agno = 0; agno < mp->m_maxagi; agno++) 247 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
214 if (mp->m_perag[agno].pagb_list) 248 spin_lock(&mp->m_perag_lock);
215 kmem_free(mp->m_perag[agno].pagb_list); 249 pag = radix_tree_delete(&mp->m_perag_tree, agno);
216 kmem_free(mp->m_perag); 250 ASSERT(pag);
251 ASSERT(atomic_read(&pag->pag_ref) == 0);
252 spin_unlock(&mp->m_perag_lock);
253 kmem_free(pag);
217 } 254 }
218} 255}
219 256
@@ -387,22 +424,57 @@ xfs_initialize_perag_icache(
387 } 424 }
388} 425}
389 426
390xfs_agnumber_t 427int
391xfs_initialize_perag( 428xfs_initialize_perag(
392 xfs_mount_t *mp, 429 xfs_mount_t *mp,
393 xfs_agnumber_t agcount) 430 xfs_agnumber_t agcount,
431 xfs_agnumber_t *maxagi)
394{ 432{
395 xfs_agnumber_t index, max_metadata; 433 xfs_agnumber_t index, max_metadata;
434 xfs_agnumber_t first_initialised = 0;
396 xfs_perag_t *pag; 435 xfs_perag_t *pag;
397 xfs_agino_t agino; 436 xfs_agino_t agino;
398 xfs_ino_t ino; 437 xfs_ino_t ino;
399 xfs_sb_t *sbp = &mp->m_sb; 438 xfs_sb_t *sbp = &mp->m_sb;
400 xfs_ino_t max_inum = XFS_MAXINUMBER_32; 439 xfs_ino_t max_inum = XFS_MAXINUMBER_32;
440 int error = -ENOMEM;
401 441
402 /* Check to see if the filesystem can overflow 32 bit inodes */ 442 /* Check to see if the filesystem can overflow 32 bit inodes */
403 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); 443 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
404 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 444 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
405 445
446 /*
447 * Walk the current per-ag tree so we don't try to initialise AGs
448 * that already exist (growfs case). Allocate and insert all the
449 * AGs we don't find ready for initialisation.
450 */
451 for (index = 0; index < agcount; index++) {
452 pag = xfs_perag_get(mp, index);
453 if (pag) {
454 xfs_perag_put(pag);
455 continue;
456 }
457 if (!first_initialised)
458 first_initialised = index;
459 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
460 if (!pag)
461 goto out_unwind;
462 if (radix_tree_preload(GFP_NOFS))
463 goto out_unwind;
464 spin_lock(&mp->m_perag_lock);
465 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
466 BUG();
467 spin_unlock(&mp->m_perag_lock);
468 radix_tree_preload_end();
469 error = -EEXIST;
470 goto out_unwind;
471 }
472 pag->pag_agno = index;
473 pag->pag_mount = mp;
474 spin_unlock(&mp->m_perag_lock);
475 radix_tree_preload_end();
476 }
477
406 /* Clear the mount flag if no inode can overflow 32 bits 478 /* Clear the mount flag if no inode can overflow 32 bits
407 * on this filesystem, or if specifically requested.. 479 * on this filesystem, or if specifically requested..
408 */ 480 */
@@ -436,21 +508,33 @@ xfs_initialize_perag(
436 } 508 }
437 509
438 /* This ag is preferred for inodes */ 510 /* This ag is preferred for inodes */
439 pag = &mp->m_perag[index]; 511 pag = xfs_perag_get(mp, index);
440 pag->pagi_inodeok = 1; 512 pag->pagi_inodeok = 1;
441 if (index < max_metadata) 513 if (index < max_metadata)
442 pag->pagf_metadata = 1; 514 pag->pagf_metadata = 1;
443 xfs_initialize_perag_icache(pag); 515 xfs_initialize_perag_icache(pag);
516 xfs_perag_put(pag);
444 } 517 }
445 } else { 518 } else {
446 /* Setup default behavior for smaller filesystems */ 519 /* Setup default behavior for smaller filesystems */
447 for (index = 0; index < agcount; index++) { 520 for (index = 0; index < agcount; index++) {
448 pag = &mp->m_perag[index]; 521 pag = xfs_perag_get(mp, index);
449 pag->pagi_inodeok = 1; 522 pag->pagi_inodeok = 1;
450 xfs_initialize_perag_icache(pag); 523 xfs_initialize_perag_icache(pag);
524 xfs_perag_put(pag);
451 } 525 }
452 } 526 }
453 return index; 527 if (maxagi)
528 *maxagi = index;
529 return 0;
530
531out_unwind:
532 kmem_free(pag);
533 for (; index > first_initialised; index--) {
534 pag = radix_tree_delete(&mp->m_perag_tree, index);
535 kmem_free(pag);
536 }
537 return error;
454} 538}
455 539
456void 540void
@@ -581,10 +665,10 @@ xfs_readsb(xfs_mount_t *mp, int flags)
581 * access to the superblock. 665 * access to the superblock.
582 */ 666 */
583 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 667 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
584 extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED; 668 extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
585 669
586 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 670 bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
587 BTOBB(sector_size), extra_flags); 671 extra_flags);
588 if (!bp || XFS_BUF_ISERROR(bp)) { 672 if (!bp || XFS_BUF_ISERROR(bp)) {
589 xfs_fs_mount_cmn_err(flags, "SB read failed"); 673 xfs_fs_mount_cmn_err(flags, "SB read failed");
590 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 674 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
@@ -624,8 +708,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
624 XFS_BUF_UNMANAGE(bp); 708 XFS_BUF_UNMANAGE(bp);
625 xfs_buf_relse(bp); 709 xfs_buf_relse(bp);
626 sector_size = mp->m_sb.sb_sectsize; 710 sector_size = mp->m_sb.sb_sectsize;
627 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 711 bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR,
628 BTOBB(sector_size), extra_flags); 712 BTOBB(sector_size), extra_flags);
629 if (!bp || XFS_BUF_ISERROR(bp)) { 713 if (!bp || XFS_BUF_ISERROR(bp)) {
630 xfs_fs_mount_cmn_err(flags, "SB re-read failed"); 714 xfs_fs_mount_cmn_err(flags, "SB re-read failed");
631 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 715 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
@@ -729,12 +813,13 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
729 error = xfs_ialloc_pagi_init(mp, NULL, index); 813 error = xfs_ialloc_pagi_init(mp, NULL, index);
730 if (error) 814 if (error)
731 return error; 815 return error;
732 pag = &mp->m_perag[index]; 816 pag = xfs_perag_get(mp, index);
733 ifree += pag->pagi_freecount; 817 ifree += pag->pagi_freecount;
734 ialloc += pag->pagi_count; 818 ialloc += pag->pagi_count;
735 bfree += pag->pagf_freeblks; 819 bfree += pag->pagf_freeblks;
736 bfreelst += pag->pagf_flcount; 820 bfreelst += pag->pagf_flcount;
737 btree += pag->pagf_btreeblks; 821 btree += pag->pagf_btreeblks;
822 xfs_perag_put(pag);
738 } 823 }
739 /* 824 /*
740 * Overwrite incore superblock counters with just-read data 825 * Overwrite incore superblock counters with just-read data
@@ -1006,6 +1091,24 @@ xfs_mount_reset_sbqflags(
1006 return xfs_trans_commit(tp, 0); 1091 return xfs_trans_commit(tp, 0);
1007} 1092}
1008 1093
1094__uint64_t
1095xfs_default_resblks(xfs_mount_t *mp)
1096{
1097 __uint64_t resblks;
1098
1099 /*
1100 * We default to 5% or 8192 fsbs of space reserved, whichever is
1101 * smaller. This is intended to cover concurrent allocation
1102 * transactions when we initially hit enospc. These each require a 4
1103 * block reservation. Hence by default we cover roughly 2000 concurrent
1104 * allocation reservations.
1105 */
1106 resblks = mp->m_sb.sb_dblocks;
1107 do_div(resblks, 20);
1108 resblks = min_t(__uint64_t, resblks, 8192);
1109 return resblks;
1110}
1111
1009/* 1112/*
1010 * This function does the following on an initial mount of a file system: 1113 * This function does the following on an initial mount of a file system:
1011 * - reads the superblock from disk and init the mount struct 1114 * - reads the superblock from disk and init the mount struct
@@ -1150,13 +1253,13 @@ xfs_mountfs(
1150 /* 1253 /*
1151 * Allocate and initialize the per-ag data. 1254 * Allocate and initialize the per-ag data.
1152 */ 1255 */
1153 init_rwsem(&mp->m_peraglock); 1256 spin_lock_init(&mp->m_perag_lock);
1154 mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), 1257 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_NOFS);
1155 KM_MAYFAIL); 1258 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
1156 if (!mp->m_perag) 1259 if (error) {
1260 cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error);
1157 goto out_remove_uuid; 1261 goto out_remove_uuid;
1158 1262 }
1159 mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
1160 1263
1161 if (!sbp->sb_logblocks) { 1264 if (!sbp->sb_logblocks) {
1162 cmn_err(CE_WARN, "XFS: no log defined"); 1265 cmn_err(CE_WARN, "XFS: no log defined");
@@ -1317,17 +1420,16 @@ xfs_mountfs(
1317 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations 1420 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1318 * are not allowed to use this reserved space. 1421 * are not allowed to use this reserved space.
1319 * 1422 *
1320 * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
1321 * This may drive us straight to ENOSPC on mount, but that implies 1423 * This may drive us straight to ENOSPC on mount, but that implies
1322 * we were already there on the last unmount. Warn if this occurs. 1424 * we were already there on the last unmount. Warn if this occurs.
1323 */ 1425 */
1324 resblks = mp->m_sb.sb_dblocks; 1426 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1325 do_div(resblks, 20); 1427 resblks = xfs_default_resblks(mp);
1326 resblks = min_t(__uint64_t, resblks, 1024); 1428 error = xfs_reserve_blocks(mp, &resblks, NULL);
1327 error = xfs_reserve_blocks(mp, &resblks, NULL); 1429 if (error)
1328 if (error) 1430 cmn_err(CE_WARN, "XFS: Unable to allocate reserve "
1329 cmn_err(CE_WARN, "XFS: Unable to allocate reserve blocks. " 1431 "blocks. Continuing without a reserve pool.");
1330 "Continuing without a reserve pool."); 1432 }
1331 1433
1332 return 0; 1434 return 0;
1333 1435
@@ -1370,8 +1472,19 @@ xfs_unmountfs(
1370 * push out the iclog we will never get that unlocked. hence we 1472 * push out the iclog we will never get that unlocked. hence we
1371 * need to force the log first. 1473 * need to force the log first.
1372 */ 1474 */
1373 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1475 xfs_log_force(mp, XFS_LOG_SYNC);
1374 xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC); 1476
1477 /*
1478 * Do a delwri reclaim pass first so that as many dirty inodes are
1479 * queued up for IO as possible. Then flush the buffers before making
1480 * a synchronous path to catch all the remaining inodes are reclaimed.
1481 * This makes the reclaim process as quick as possible by avoiding
1482 * synchronous writeout and blocking on inodes already in the delwri
1483 * state as much as possible.
1484 */
1485 xfs_reclaim_inodes(mp, 0);
1486 XFS_bflush(mp->m_ddev_targp);
1487 xfs_reclaim_inodes(mp, SYNC_WAIT);
1375 1488
1376 xfs_qm_unmount(mp); 1489 xfs_qm_unmount(mp);
1377 1490
@@ -1380,7 +1493,7 @@ xfs_unmountfs(
1380 * that nothing is pinned. This is important because bflush() 1493 * that nothing is pinned. This is important because bflush()
1381 * will skip pinned buffers. 1494 * will skip pinned buffers.
1382 */ 1495 */
1383 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1496 xfs_log_force(mp, XFS_LOG_SYNC);
1384 1497
1385 xfs_binval(mp->m_ddev_targp); 1498 xfs_binval(mp->m_ddev_targp);
1386 if (mp->m_rtdev_targp) { 1499 if (mp->m_rtdev_targp) {
@@ -1471,7 +1584,7 @@ xfs_log_sbcount(
1471 if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1584 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1472 return 0; 1585 return 0;
1473 1586
1474 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT); 1587 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
1475 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, 1588 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1476 XFS_DEFAULT_LOG_COUNT); 1589 XFS_DEFAULT_LOG_COUNT);
1477 if (error) { 1590 if (error) {
@@ -1546,15 +1659,14 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1546 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields); 1659 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
1547 1660
1548 /* find modified range */ 1661 /* find modified range */
1662 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
1663 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1664 last = xfs_sb_info[f + 1].offset - 1;
1549 1665
1550 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); 1666 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
1551 ASSERT((1LL << f) & XFS_SB_MOD_BITS); 1667 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1552 first = xfs_sb_info[f].offset; 1668 first = xfs_sb_info[f].offset;
1553 1669
1554 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
1555 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1556 last = xfs_sb_info[f + 1].offset - 1;
1557
1558 xfs_trans_log_buf(tp, bp, first, last); 1670 xfs_trans_log_buf(tp, bp, first, last);
1559} 1671}
1560 1672
@@ -1618,26 +1730,30 @@ xfs_mod_incore_sb_unlocked(
1618 lcounter += rem; 1730 lcounter += rem;
1619 } 1731 }
1620 } else { /* Taking blocks away */ 1732 } else { /* Taking blocks away */
1621
1622 lcounter += delta; 1733 lcounter += delta;
1734 if (lcounter >= 0) {
1735 mp->m_sb.sb_fdblocks = lcounter +
1736 XFS_ALLOC_SET_ASIDE(mp);
1737 return 0;
1738 }
1623 1739
1624 /* 1740 /*
1625 * If were out of blocks, use any available reserved blocks if 1741 * We are out of blocks, use any available reserved
1626 * were allowed to. 1742 * blocks if were allowed to.
1627 */ 1743 */
1744 if (!rsvd)
1745 return XFS_ERROR(ENOSPC);
1628 1746
1629 if (lcounter < 0) { 1747 lcounter = (long long)mp->m_resblks_avail + delta;
1630 if (rsvd) { 1748 if (lcounter >= 0) {
1631 lcounter = (long long)mp->m_resblks_avail + delta; 1749 mp->m_resblks_avail = lcounter;
1632 if (lcounter < 0) { 1750 return 0;
1633 return XFS_ERROR(ENOSPC);
1634 }
1635 mp->m_resblks_avail = lcounter;
1636 return 0;
1637 } else { /* not reserved */
1638 return XFS_ERROR(ENOSPC);
1639 }
1640 } 1751 }
1752 printk_once(KERN_WARNING
1753 "Filesystem \"%s\": reserve blocks depleted! "
1754 "Consider increasing reserve pool size.",
1755 mp->m_fsname);
1756 return XFS_ERROR(ENOSPC);
1641 } 1757 }
1642 1758
1643 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); 1759 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
@@ -1885,7 +2001,7 @@ xfs_getsb(
1885 2001
1886 ASSERT(mp->m_sb_bp != NULL); 2002 ASSERT(mp->m_sb_bp != NULL);
1887 bp = mp->m_sb_bp; 2003 bp = mp->m_sb_bp;
1888 if (flags & XFS_BUF_TRYLOCK) { 2004 if (flags & XBF_TRYLOCK) {
1889 if (!XFS_BUF_CPSEMA(bp)) { 2005 if (!XFS_BUF_CPSEMA(bp)) {
1890 return NULL; 2006 return NULL;
1891 } 2007 }
@@ -1945,6 +2061,26 @@ xfs_mount_log_sb(
1945 return error; 2061 return error;
1946} 2062}
1947 2063
2064/*
2065 * If the underlying (data/log/rt) device is readonly, there are some
2066 * operations that cannot proceed.
2067 */
2068int
2069xfs_dev_is_read_only(
2070 struct xfs_mount *mp,
2071 char *message)
2072{
2073 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
2074 xfs_readonly_buftarg(mp->m_logdev_targp) ||
2075 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
2076 cmn_err(CE_NOTE,
2077 "XFS: %s required on read-only device.", message);
2078 cmn_err(CE_NOTE,
2079 "XFS: write access unavailable, cannot proceed.");
2080 return EROFS;
2081 }
2082 return 0;
2083}
1948 2084
1949#ifdef HAVE_PERCPU_SB 2085#ifdef HAVE_PERCPU_SB
1950/* 2086/*
@@ -2123,7 +2259,7 @@ xfs_icsb_destroy_counters(
2123 mutex_destroy(&mp->m_icsb_mutex); 2259 mutex_destroy(&mp->m_icsb_mutex);
2124} 2260}
2125 2261
2126STATIC_INLINE void 2262STATIC void
2127xfs_icsb_lock_cntr( 2263xfs_icsb_lock_cntr(
2128 xfs_icsb_cnts_t *icsbp) 2264 xfs_icsb_cnts_t *icsbp)
2129{ 2265{
@@ -2132,7 +2268,7 @@ xfs_icsb_lock_cntr(
2132 } 2268 }
2133} 2269}
2134 2270
2135STATIC_INLINE void 2271STATIC void
2136xfs_icsb_unlock_cntr( 2272xfs_icsb_unlock_cntr(
2137 xfs_icsb_cnts_t *icsbp) 2273 xfs_icsb_cnts_t *icsbp)
2138{ 2274{
@@ -2140,7 +2276,7 @@ xfs_icsb_unlock_cntr(
2140} 2276}
2141 2277
2142 2278
2143STATIC_INLINE void 2279STATIC void
2144xfs_icsb_lock_all_counters( 2280xfs_icsb_lock_all_counters(
2145 xfs_mount_t *mp) 2281 xfs_mount_t *mp)
2146{ 2282{
@@ -2153,7 +2289,7 @@ xfs_icsb_lock_all_counters(
2153 } 2289 }
2154} 2290}
2155 2291
2156STATIC_INLINE void 2292STATIC void
2157xfs_icsb_unlock_all_counters( 2293xfs_icsb_unlock_all_counters(
2158 xfs_mount_t *mp) 2294 xfs_mount_t *mp)
2159{ 2295{
@@ -2389,12 +2525,12 @@ xfs_icsb_modify_counters(
2389{ 2525{
2390 xfs_icsb_cnts_t *icsbp; 2526 xfs_icsb_cnts_t *icsbp;
2391 long long lcounter; /* long counter for 64 bit fields */ 2527 long long lcounter; /* long counter for 64 bit fields */
2392 int cpu, ret = 0; 2528 int ret = 0;
2393 2529
2394 might_sleep(); 2530 might_sleep();
2395again: 2531again:
2396 cpu = get_cpu(); 2532 preempt_disable();
2397 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu); 2533 icsbp = this_cpu_ptr(mp->m_sb_cnts);
2398 2534
2399 /* 2535 /*
2400 * if the counter is disabled, go to slow path 2536 * if the counter is disabled, go to slow path
@@ -2438,11 +2574,11 @@ again:
2438 break; 2574 break;
2439 } 2575 }
2440 xfs_icsb_unlock_cntr(icsbp); 2576 xfs_icsb_unlock_cntr(icsbp);
2441 put_cpu(); 2577 preempt_enable();
2442 return 0; 2578 return 0;
2443 2579
2444slow_path: 2580slow_path:
2445 put_cpu(); 2581 preempt_enable();
2446 2582
2447 /* 2583 /*
2448 * serialise with a mutex so we don't burn lots of cpu on 2584 * serialise with a mutex so we don't burn lots of cpu on
@@ -2490,7 +2626,7 @@ slow_path:
2490 2626
2491balance_counter: 2627balance_counter:
2492 xfs_icsb_unlock_cntr(icsbp); 2628 xfs_icsb_unlock_cntr(icsbp);
2493 put_cpu(); 2629 preempt_enable();
2494 2630
2495 /* 2631 /*
2496 * We may have multiple threads here if multiple per-cpu 2632 * We may have multiple threads here if multiple per-cpu