diff options
author | Dave Chinner <dchinner@redhat.com> | 2012-10-08 06:56:05 -0400 |
---|---|---|
committer | Ben Myers <bpm@sgi.com> | 2012-10-17 13:19:27 -0400 |
commit | 5889608df35783590251cfd440fa5d48f1855179 (patch) | |
tree | 4284177945081868e2756d27ae9706e1cc9ee357 /fs/xfs/xfs_sync.c | |
parent | 9aa05000f2b7cab4be582afba64af10b2d74727e (diff) |
xfs: syncd workqueue is no more
With the syncd functions moved to the log and/or removed, the syncd
workqueue is the only remaining bit left. It is used by the log
covering/ail pushing work, as well as by the inode reclaim work.
Given how cheap workqueues are these days, give the log and inode
reclaim work their own work queues and kill the syncd work queue.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_sync.c')
-rw-r--r-- | fs/xfs/xfs_sync.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index 6a2ada379166..15be21f074fd 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c | |||
@@ -40,8 +40,6 @@ | |||
40 | #include <linux/kthread.h> | 40 | #include <linux/kthread.h> |
41 | #include <linux/freezer.h> | 41 | #include <linux/freezer.h> |
42 | 42 | ||
43 | struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ | ||
44 | |||
45 | /* | 43 | /* |
46 | * The inode lookup is done in batches to keep the amount of lock traffic and | 44 | * The inode lookup is done in batches to keep the amount of lock traffic and |
47 | * radix tree lookups to a minimum. The batch size is a trade off between | 45 | * radix tree lookups to a minimum. The batch size is a trade off between |
@@ -335,18 +333,18 @@ xfs_quiesce_attr( | |||
335 | /* | 333 | /* |
336 | * Queue a new inode reclaim pass if there are reclaimable inodes and there | 334 | * Queue a new inode reclaim pass if there are reclaimable inodes and there |
337 | * isn't a reclaim pass already in progress. By default it runs every 5s based | 335 | * isn't a reclaim pass already in progress. By default it runs every 5s based |
338 | * on the xfs syncd work default of 30s. Perhaps this should have it's own | 336 | * on the xfs periodic sync default of 30s. Perhaps this should have it's own |
339 | * tunable, but that can be done if this method proves to be ineffective or too | 337 | * tunable, but that can be done if this method proves to be ineffective or too |
340 | * aggressive. | 338 | * aggressive. |
341 | */ | 339 | */ |
342 | static void | 340 | static void |
343 | xfs_syncd_queue_reclaim( | 341 | xfs_reclaim_work_queue( |
344 | struct xfs_mount *mp) | 342 | struct xfs_mount *mp) |
345 | { | 343 | { |
346 | 344 | ||
347 | rcu_read_lock(); | 345 | rcu_read_lock(); |
348 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | 346 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { |
349 | queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work, | 347 | queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, |
350 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | 348 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); |
351 | } | 349 | } |
352 | rcu_read_unlock(); | 350 | rcu_read_unlock(); |
@@ -367,7 +365,7 @@ xfs_reclaim_worker( | |||
367 | struct xfs_mount, m_reclaim_work); | 365 | struct xfs_mount, m_reclaim_work); |
368 | 366 | ||
369 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | 367 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); |
370 | xfs_syncd_queue_reclaim(mp); | 368 | xfs_reclaim_work_queue(mp); |
371 | } | 369 | } |
372 | 370 | ||
373 | void | 371 | void |
@@ -388,7 +386,7 @@ __xfs_inode_set_reclaim_tag( | |||
388 | spin_unlock(&ip->i_mount->m_perag_lock); | 386 | spin_unlock(&ip->i_mount->m_perag_lock); |
389 | 387 | ||
390 | /* schedule periodic background inode reclaim */ | 388 | /* schedule periodic background inode reclaim */ |
391 | xfs_syncd_queue_reclaim(ip->i_mount); | 389 | xfs_reclaim_work_queue(ip->i_mount); |
392 | 390 | ||
393 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | 391 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, |
394 | -1, _RET_IP_); | 392 | -1, _RET_IP_); |
@@ -646,9 +644,9 @@ out: | |||
646 | /* | 644 | /* |
647 | * We could return EAGAIN here to make reclaim rescan the inode tree in | 645 | * We could return EAGAIN here to make reclaim rescan the inode tree in |
648 | * a short while. However, this just burns CPU time scanning the tree | 646 | * a short while. However, this just burns CPU time scanning the tree |
649 | * waiting for IO to complete and xfssyncd never goes back to the idle | 647 | * waiting for IO to complete and the reclaim work never goes back to |
650 | * state. Instead, return 0 to let the next scheduled background reclaim | 648 | * the idle state. Instead, return 0 to let the next scheduled |
651 | * attempt to reclaim the inode again. | 649 | * background reclaim attempt to reclaim the inode again. |
652 | */ | 650 | */ |
653 | return 0; | 651 | return 0; |
654 | } | 652 | } |
@@ -804,7 +802,7 @@ xfs_reclaim_inodes_nr( | |||
804 | int nr_to_scan) | 802 | int nr_to_scan) |
805 | { | 803 | { |
806 | /* kick background reclaimer and push the AIL */ | 804 | /* kick background reclaimer and push the AIL */ |
807 | xfs_syncd_queue_reclaim(mp); | 805 | xfs_reclaim_work_queue(mp); |
808 | xfs_ail_push_all(mp->m_ail); | 806 | xfs_ail_push_all(mp->m_ail); |
809 | 807 | ||
810 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); | 808 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); |