aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_super.c')
-rw-r--r--fs/xfs/xfs_super.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 912442cf0f82..dab9a5f6dfd6 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -950,6 +950,22 @@ xfs_fs_evict_inode(
950 xfs_inactive(ip); 950 xfs_inactive(ip);
951} 951}
952 952
953/*
954 * We do an unlocked check for XFS_IDONTCACHE here because we are already
955 * serialised against cache hits here via the inode->i_lock and igrab() in
956 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
957 * racing with us, and it avoids needing to grab a spinlock here for every inode
958 * we drop the final reference on.
959 */
960STATIC int
961xfs_fs_drop_inode(
962 struct inode *inode)
963{
964 struct xfs_inode *ip = XFS_I(inode);
965
966 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
967}
968
953STATIC void 969STATIC void
954xfs_free_fsname( 970xfs_free_fsname(
955 struct xfs_mount *mp) 971 struct xfs_mount *mp)
@@ -1433,6 +1449,7 @@ static const struct super_operations xfs_super_operations = {
1433 .destroy_inode = xfs_fs_destroy_inode, 1449 .destroy_inode = xfs_fs_destroy_inode,
1434 .dirty_inode = xfs_fs_dirty_inode, 1450 .dirty_inode = xfs_fs_dirty_inode,
1435 .evict_inode = xfs_fs_evict_inode, 1451 .evict_inode = xfs_fs_evict_inode,
1452 .drop_inode = xfs_fs_drop_inode,
1436 .put_super = xfs_fs_put_super, 1453 .put_super = xfs_fs_put_super,
1437 .sync_fs = xfs_fs_sync_fs, 1454 .sync_fs = xfs_fs_sync_fs,
1438 .freeze_fs = xfs_fs_freeze, 1455 .freeze_fs = xfs_fs_freeze,
@@ -1606,12 +1623,28 @@ xfs_init_workqueues(void)
1606 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0); 1623 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0);
1607 if (!xfs_syncd_wq) 1624 if (!xfs_syncd_wq)
1608 return -ENOMEM; 1625 return -ENOMEM;
1626
1627 /*
1628 * The allocation workqueue can be used in memory reclaim situations
1629 * (writepage path), and parallelism is only limited by the number of
1630 * AGs in all the filesystems mounted. Hence use the default large
1631 * max_active value for this workqueue.
1632 */
1633 xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0);
1634 if (!xfs_alloc_wq)
1635 goto out_destroy_syncd;
1636
1609 return 0; 1637 return 0;
1638
1639out_destroy_syncd:
1640 destroy_workqueue(xfs_syncd_wq);
1641 return -ENOMEM;
1610} 1642}
1611 1643
1612STATIC void 1644STATIC void
1613xfs_destroy_workqueues(void) 1645xfs_destroy_workqueues(void)
1614{ 1646{
1647 destroy_workqueue(xfs_alloc_wq);
1615 destroy_workqueue(xfs_syncd_wq); 1648 destroy_workqueue(xfs_syncd_wq);
1616} 1649}
1617 1650