aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--fs/xfs/xfs_super.c38
-rw-r--r--fs/xfs/xfs_sync.c20
-rw-r--r--fs/xfs/xfs_sync.h2
5 files changed, 30 insertions, 34 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e788f39721e3..b6ce4d4b6def 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1193,7 +1193,7 @@ void
1193xfs_log_work_queue( 1193xfs_log_work_queue(
1194 struct xfs_mount *mp) 1194 struct xfs_mount *mp)
1195{ 1195{
1196 queue_delayed_work(xfs_syncd_wq, &mp->m_log->l_work, 1196 queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
1197 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1197 msecs_to_jiffies(xfs_syncd_centisecs * 10));
1198} 1198}
1199 1199
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a54b5aa498d4..7c417b6b99ee 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -207,6 +207,8 @@ typedef struct xfs_mount {
207 struct workqueue_struct *m_data_workqueue; 207 struct workqueue_struct *m_data_workqueue;
208 struct workqueue_struct *m_unwritten_workqueue; 208 struct workqueue_struct *m_unwritten_workqueue;
209 struct workqueue_struct *m_cil_workqueue; 209 struct workqueue_struct *m_cil_workqueue;
210 struct workqueue_struct *m_reclaim_workqueue;
211 struct workqueue_struct *m_log_workqueue;
210} xfs_mount_t; 212} xfs_mount_t;
211 213
212/* 214/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 9468c6878463..27d5a92e1210 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -863,8 +863,23 @@ xfs_init_mount_workqueues(
863 WQ_MEM_RECLAIM, 0, mp->m_fsname); 863 WQ_MEM_RECLAIM, 0, mp->m_fsname);
864 if (!mp->m_cil_workqueue) 864 if (!mp->m_cil_workqueue)
865 goto out_destroy_unwritten; 865 goto out_destroy_unwritten;
866
867 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
868 WQ_NON_REENTRANT, 0, mp->m_fsname);
869 if (!mp->m_reclaim_workqueue)
870 goto out_destroy_cil;
871
872 mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
873 WQ_NON_REENTRANT, 0, mp->m_fsname);
874 if (!mp->m_log_workqueue)
875 goto out_destroy_reclaim;
876
866 return 0; 877 return 0;
867 878
879out_destroy_reclaim:
880 destroy_workqueue(mp->m_reclaim_workqueue);
881out_destroy_cil:
882 destroy_workqueue(mp->m_cil_workqueue);
868out_destroy_unwritten: 883out_destroy_unwritten:
869 destroy_workqueue(mp->m_unwritten_workqueue); 884 destroy_workqueue(mp->m_unwritten_workqueue);
870out_destroy_data_iodone_queue: 885out_destroy_data_iodone_queue:
@@ -877,6 +892,8 @@ STATIC void
877xfs_destroy_mount_workqueues( 892xfs_destroy_mount_workqueues(
878 struct xfs_mount *mp) 893 struct xfs_mount *mp)
879{ 894{
895 destroy_workqueue(mp->m_log_workqueue);
896 destroy_workqueue(mp->m_reclaim_workqueue);
880 destroy_workqueue(mp->m_cil_workqueue); 897 destroy_workqueue(mp->m_cil_workqueue);
881 destroy_workqueue(mp->m_data_workqueue); 898 destroy_workqueue(mp->m_data_workqueue);
882 destroy_workqueue(mp->m_unwritten_workqueue); 899 destroy_workqueue(mp->m_unwritten_workqueue);
@@ -1391,10 +1408,6 @@ xfs_fs_fill_super(
1391 /* 1408 /*
1392 * we must configure the block size in the superblock before we run the 1409 * we must configure the block size in the superblock before we run the
1393 * full mount process as the mount process can lookup and cache inodes. 1410 * full mount process as the mount process can lookup and cache inodes.
1394 * For the same reason we must also initialise the syncd and register
1395 * the inode cache shrinker so that inodes can be reclaimed during
1396 * operations like a quotacheck that iterate all inodes in the
1397 * filesystem.
1398 */ 1411 */
1399 sb->s_magic = XFS_SB_MAGIC; 1412 sb->s_magic = XFS_SB_MAGIC;
1400 sb->s_blocksize = mp->m_sb.sb_blocksize; 1413 sb->s_blocksize = mp->m_sb.sb_blocksize;
@@ -1639,16 +1652,6 @@ STATIC int __init
1639xfs_init_workqueues(void) 1652xfs_init_workqueues(void)
1640{ 1653{
1641 /* 1654 /*
1642 * We never want to the same work item to run twice, reclaiming inodes
1643 * or idling the log is not going to get any faster by multiple CPUs
1644 * competing for ressources. Use the default large max_active value
1645 * so that even lots of filesystems can perform these task in parallel.
1646 */
1647 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0);
1648 if (!xfs_syncd_wq)
1649 return -ENOMEM;
1650
1651 /*
1652 * The allocation workqueue can be used in memory reclaim situations 1655 * The allocation workqueue can be used in memory reclaim situations
1653 * (writepage path), and parallelism is only limited by the number of 1656 * (writepage path), and parallelism is only limited by the number of
1654 * AGs in all the filesystems mounted. Hence use the default large 1657 * AGs in all the filesystems mounted. Hence use the default large
@@ -1656,20 +1659,15 @@ xfs_init_workqueues(void)
1656 */ 1659 */
1657 xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0); 1660 xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0);
1658 if (!xfs_alloc_wq) 1661 if (!xfs_alloc_wq)
1659 goto out_destroy_syncd; 1662 return -ENOMEM;
1660 1663
1661 return 0; 1664 return 0;
1662
1663out_destroy_syncd:
1664 destroy_workqueue(xfs_syncd_wq);
1665 return -ENOMEM;
1666} 1665}
1667 1666
1668STATIC void 1667STATIC void
1669xfs_destroy_workqueues(void) 1668xfs_destroy_workqueues(void)
1670{ 1669{
1671 destroy_workqueue(xfs_alloc_wq); 1670 destroy_workqueue(xfs_alloc_wq);
1672 destroy_workqueue(xfs_syncd_wq);
1673} 1671}
1674 1672
1675STATIC int __init 1673STATIC int __init
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index 6a2ada379166..15be21f074fd 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -40,8 +40,6 @@
40#include <linux/kthread.h> 40#include <linux/kthread.h>
41#include <linux/freezer.h> 41#include <linux/freezer.h>
42 42
43struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
44
45/* 43/*
46 * The inode lookup is done in batches to keep the amount of lock traffic and 44 * The inode lookup is done in batches to keep the amount of lock traffic and
47 * radix tree lookups to a minimum. The batch size is a trade off between 45 * radix tree lookups to a minimum. The batch size is a trade off between
@@ -335,18 +333,18 @@ xfs_quiesce_attr(
335/* 333/*
336 * Queue a new inode reclaim pass if there are reclaimable inodes and there 334 * Queue a new inode reclaim pass if there are reclaimable inodes and there
337 * isn't a reclaim pass already in progress. By default it runs every 5s based 335 * isn't a reclaim pass already in progress. By default it runs every 5s based
338 * on the xfs syncd work default of 30s. Perhaps this should have it's own 336 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
339 * tunable, but that can be done if this method proves to be ineffective or too 337 * tunable, but that can be done if this method proves to be ineffective or too
340 * aggressive. 338 * aggressive.
341 */ 339 */
342static void 340static void
343xfs_syncd_queue_reclaim( 341xfs_reclaim_work_queue(
344 struct xfs_mount *mp) 342 struct xfs_mount *mp)
345{ 343{
346 344
347 rcu_read_lock(); 345 rcu_read_lock();
348 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { 346 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
349 queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work, 347 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
350 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); 348 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
351 } 349 }
352 rcu_read_unlock(); 350 rcu_read_unlock();
@@ -367,7 +365,7 @@ xfs_reclaim_worker(
367 struct xfs_mount, m_reclaim_work); 365 struct xfs_mount, m_reclaim_work);
368 366
369 xfs_reclaim_inodes(mp, SYNC_TRYLOCK); 367 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
370 xfs_syncd_queue_reclaim(mp); 368 xfs_reclaim_work_queue(mp);
371} 369}
372 370
373void 371void
@@ -388,7 +386,7 @@ __xfs_inode_set_reclaim_tag(
388 spin_unlock(&ip->i_mount->m_perag_lock); 386 spin_unlock(&ip->i_mount->m_perag_lock);
389 387
390 /* schedule periodic background inode reclaim */ 388 /* schedule periodic background inode reclaim */
391 xfs_syncd_queue_reclaim(ip->i_mount); 389 xfs_reclaim_work_queue(ip->i_mount);
392 390
393 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, 391 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
394 -1, _RET_IP_); 392 -1, _RET_IP_);
@@ -646,9 +644,9 @@ out:
646 /* 644 /*
647 * We could return EAGAIN here to make reclaim rescan the inode tree in 645 * We could return EAGAIN here to make reclaim rescan the inode tree in
648 * a short while. However, this just burns CPU time scanning the tree 646 * a short while. However, this just burns CPU time scanning the tree
649 * waiting for IO to complete and xfssyncd never goes back to the idle 647 * waiting for IO to complete and the reclaim work never goes back to
650 * state. Instead, return 0 to let the next scheduled background reclaim 648 * the idle state. Instead, return 0 to let the next scheduled
651 * attempt to reclaim the inode again. 649 * background reclaim attempt to reclaim the inode again.
652 */ 650 */
653 return 0; 651 return 0;
654} 652}
@@ -804,7 +802,7 @@ xfs_reclaim_inodes_nr(
804 int nr_to_scan) 802 int nr_to_scan)
805{ 803{
806 /* kick background reclaimer and push the AIL */ 804 /* kick background reclaimer and push the AIL */
807 xfs_syncd_queue_reclaim(mp); 805 xfs_reclaim_work_queue(mp);
808 xfs_ail_push_all(mp->m_ail); 806 xfs_ail_push_all(mp->m_ail);
809 807
810 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); 808 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
diff --git a/fs/xfs/xfs_sync.h b/fs/xfs/xfs_sync.h
index 0018e846f0dc..0beabea99e73 100644
--- a/fs/xfs/xfs_sync.h
+++ b/fs/xfs/xfs_sync.h
@@ -24,8 +24,6 @@ struct xfs_perag;
24#define SYNC_WAIT 0x0001 /* wait for i/o to complete */ 24#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
25#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ 25#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
26 26
27extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
28
29void xfs_reclaim_worker(struct work_struct *work); 27void xfs_reclaim_worker(struct work_struct *work);
30 28
31int xfs_quiesce_data(struct xfs_mount *mp); 29int xfs_quiesce_data(struct xfs_mount *mp);