aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c127
-rw-r--r--fs/xfs/xfs_trans_ail.c133
-rw-r--r--fs/xfs/xfs_trans_priv.h15
3 files changed, 124 insertions, 151 deletions
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index ee0e981aa9d1..67d5b2cddb98 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -816,75 +816,6 @@ xfs_setup_devices(
816 return 0; 816 return 0;
817} 817}
818 818
819/*
820 * XFS AIL push thread support
821 */
822void
823xfsaild_wakeup(
824 struct xfs_ail *ailp,
825 xfs_lsn_t threshold_lsn)
826{
827 /* only ever move the target forwards */
828 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
829 ailp->xa_target = threshold_lsn;
830 wake_up_process(ailp->xa_task);
831 }
832}
833
834STATIC int
835xfsaild(
836 void *data)
837{
838 struct xfs_ail *ailp = data;
839 xfs_lsn_t last_pushed_lsn = 0;
840 long tout = 0; /* milliseconds */
841
842 while (!kthread_should_stop()) {
843 /*
844 * for short sleeps indicating congestion, don't allow us to
845 * get woken early. Otherwise all we do is bang on the AIL lock
846 * without making progress.
847 */
848 if (tout && tout <= 20)
849 __set_current_state(TASK_KILLABLE);
850 else
851 __set_current_state(TASK_INTERRUPTIBLE);
852 schedule_timeout(tout ?
853 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
854
855 /* swsusp */
856 try_to_freeze();
857
858 ASSERT(ailp->xa_mount->m_log);
859 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
860 continue;
861
862 tout = xfsaild_push(ailp, &last_pushed_lsn);
863 }
864
865 return 0;
866} /* xfsaild */
867
868int
869xfsaild_start(
870 struct xfs_ail *ailp)
871{
872 ailp->xa_target = 0;
873 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
874 ailp->xa_mount->m_fsname);
875 if (IS_ERR(ailp->xa_task))
876 return -PTR_ERR(ailp->xa_task);
877 return 0;
878}
879
880void
881xfsaild_stop(
882 struct xfs_ail *ailp)
883{
884 kthread_stop(ailp->xa_task);
885}
886
887
888/* Catch misguided souls that try to use this interface on XFS */ 819/* Catch misguided souls that try to use this interface on XFS */
889STATIC struct inode * 820STATIC struct inode *
890xfs_fs_alloc_inode( 821xfs_fs_alloc_inode(
@@ -1786,6 +1717,38 @@ xfs_destroy_zones(void)
1786} 1717}
1787 1718
1788STATIC int __init 1719STATIC int __init
1720xfs_init_workqueues(void)
1721{
1722 /*
1723 * max_active is set to 8 to give enough concurency to allow
1724 * multiple work operations on each CPU to run. This allows multiple
1725 * filesystems to be running sync work concurrently, and scales with
1726 * the number of CPUs in the system.
1727 */
1728 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1729 if (!xfs_syncd_wq)
1730 goto out;
1731
1732 xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1733 if (!xfs_ail_wq)
1734 goto out_destroy_syncd;
1735
1736 return 0;
1737
1738out_destroy_syncd:
1739 destroy_workqueue(xfs_syncd_wq);
1740out:
1741 return -ENOMEM;
1742}
1743
1744STATIC void __exit
1745xfs_destroy_workqueues(void)
1746{
1747 destroy_workqueue(xfs_ail_wq);
1748 destroy_workqueue(xfs_syncd_wq);
1749}
1750
1751STATIC int __init
1789init_xfs_fs(void) 1752init_xfs_fs(void)
1790{ 1753{
1791 int error; 1754 int error;
@@ -1800,10 +1763,14 @@ init_xfs_fs(void)
1800 if (error) 1763 if (error)
1801 goto out; 1764 goto out;
1802 1765
1803 error = xfs_mru_cache_init(); 1766 error = xfs_init_workqueues();
1804 if (error) 1767 if (error)
1805 goto out_destroy_zones; 1768 goto out_destroy_zones;
1806 1769
1770 error = xfs_mru_cache_init();
1771 if (error)
1772 goto out_destroy_wq;
1773
1807 error = xfs_filestream_init(); 1774 error = xfs_filestream_init();
1808 if (error) 1775 if (error)
1809 goto out_mru_cache_uninit; 1776 goto out_mru_cache_uninit;
@@ -1820,27 +1787,17 @@ init_xfs_fs(void)
1820 if (error) 1787 if (error)
1821 goto out_cleanup_procfs; 1788 goto out_cleanup_procfs;
1822 1789
1823 /* 1790 error = xfs_init_workqueues();
1824 * max_active is set to 8 to give enough concurency to allow 1791 if (error)
1825 * multiple work operations on each CPU to run. This allows multiple
1826 * filesystems to be running sync work concurrently, and scales with
1827 * the number of CPUs in the system.
1828 */
1829 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1830 if (!xfs_syncd_wq) {
1831 error = -ENOMEM;
1832 goto out_sysctl_unregister; 1792 goto out_sysctl_unregister;
1833 }
1834 1793
1835 vfs_initquota(); 1794 vfs_initquota();
1836 1795
1837 error = register_filesystem(&xfs_fs_type); 1796 error = register_filesystem(&xfs_fs_type);
1838 if (error) 1797 if (error)
1839 goto out_destroy_xfs_syncd; 1798 goto out_sysctl_unregister;
1840 return 0; 1799 return 0;
1841 1800
1842 out_destroy_xfs_syncd:
1843 destroy_workqueue(xfs_syncd_wq);
1844 out_sysctl_unregister: 1801 out_sysctl_unregister:
1845 xfs_sysctl_unregister(); 1802 xfs_sysctl_unregister();
1846 out_cleanup_procfs: 1803 out_cleanup_procfs:
@@ -1851,6 +1808,8 @@ init_xfs_fs(void)
1851 xfs_filestream_uninit(); 1808 xfs_filestream_uninit();
1852 out_mru_cache_uninit: 1809 out_mru_cache_uninit:
1853 xfs_mru_cache_uninit(); 1810 xfs_mru_cache_uninit();
1811 out_destroy_wq:
1812 xfs_destroy_workqueues();
1854 out_destroy_zones: 1813 out_destroy_zones:
1855 xfs_destroy_zones(); 1814 xfs_destroy_zones();
1856 out: 1815 out:
@@ -1862,12 +1821,12 @@ exit_xfs_fs(void)
1862{ 1821{
1863 vfs_exitquota(); 1822 vfs_exitquota();
1864 unregister_filesystem(&xfs_fs_type); 1823 unregister_filesystem(&xfs_fs_type);
1865 destroy_workqueue(xfs_syncd_wq);
1866 xfs_sysctl_unregister(); 1824 xfs_sysctl_unregister();
1867 xfs_cleanup_procfs(); 1825 xfs_cleanup_procfs();
1868 xfs_buf_terminate(); 1826 xfs_buf_terminate();
1869 xfs_filestream_uninit(); 1827 xfs_filestream_uninit();
1870 xfs_mru_cache_uninit(); 1828 xfs_mru_cache_uninit();
1829 xfs_destroy_workqueues();
1871 xfs_destroy_zones(); 1830 xfs_destroy_zones();
1872} 1831}
1873 1832
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 12aff9584e29..cb3aeac929bc 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,6 +28,8 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
32
31STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); 33STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
32STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); 34STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); 35STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
@@ -69,36 +71,6 @@ xfs_trans_ail_tail(
69} 71}
70 72
71/* 73/*
72 * xfs_trans_push_ail
73 *
74 * This routine is called to move the tail of the AIL forward. It does this by
75 * trying to flush items in the AIL whose lsns are below the given
76 * threshold_lsn.
77 *
78 * the push is run asynchronously in a separate thread, so we return the tail
79 * of the log right now instead of the tail after the push. This means we will
80 * either continue right away, or we will sleep waiting on the async thread to
81 * do its work.
82 *
83 * We do this unlocked - we only need to know whether there is anything in the
84 * AIL at the time we are called. We don't need to access the contents of
85 * any of the objects, so the lock is not needed.
86 */
87void
88xfs_trans_ail_push(
89 struct xfs_ail *ailp,
90 xfs_lsn_t threshold_lsn)
91{
92 xfs_log_item_t *lip;
93
94 lip = xfs_ail_min(ailp);
95 if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
96 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
97 xfsaild_wakeup(ailp, threshold_lsn);
98 }
99}
100
101/*
102 * AIL traversal cursor initialisation. 74 * AIL traversal cursor initialisation.
103 * 75 *
104 * The cursor keeps track of where our current traversal is up 76 * The cursor keeps track of where our current traversal is up
@@ -236,16 +208,16 @@ out:
236} 208}
237 209
238/* 210/*
239 * xfsaild_push does the work of pushing on the AIL. Returning a timeout of 211 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
240 * zero indicates that the caller should sleep until woken. 212 * to run at a later time if there is more work to do to complete the push.
241 */ 213 */
242long 214STATIC void
243xfsaild_push( 215xfs_ail_worker(
244 struct xfs_ail *ailp, 216 struct work_struct *work)
245 xfs_lsn_t *last_lsn)
246{ 217{
247 long tout = 0; 218 struct xfs_ail *ailp = container_of(to_delayed_work(work),
248 xfs_lsn_t last_pushed_lsn = *last_lsn; 219 struct xfs_ail, xa_work);
220 long tout;
249 xfs_lsn_t target = ailp->xa_target; 221 xfs_lsn_t target = ailp->xa_target;
250 xfs_lsn_t lsn; 222 xfs_lsn_t lsn;
251 xfs_log_item_t *lip; 223 xfs_log_item_t *lip;
@@ -256,15 +228,15 @@ xfsaild_push(
256 228
257 spin_lock(&ailp->xa_lock); 229 spin_lock(&ailp->xa_lock);
258 xfs_trans_ail_cursor_init(ailp, cur); 230 xfs_trans_ail_cursor_init(ailp, cur);
259 lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn); 231 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
260 if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 232 if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
261 /* 233 /*
262 * AIL is empty or our push has reached the end. 234 * AIL is empty or our push has reached the end.
263 */ 235 */
264 xfs_trans_ail_cursor_done(ailp, cur); 236 xfs_trans_ail_cursor_done(ailp, cur);
265 spin_unlock(&ailp->xa_lock); 237 spin_unlock(&ailp->xa_lock);
266 *last_lsn = 0; 238 ailp->xa_last_pushed_lsn = 0;
267 return tout; 239 return;
268 } 240 }
269 241
270 XFS_STATS_INC(xs_push_ail); 242 XFS_STATS_INC(xs_push_ail);
@@ -301,13 +273,13 @@ xfsaild_push(
301 case XFS_ITEM_SUCCESS: 273 case XFS_ITEM_SUCCESS:
302 XFS_STATS_INC(xs_push_ail_success); 274 XFS_STATS_INC(xs_push_ail_success);
303 IOP_PUSH(lip); 275 IOP_PUSH(lip);
304 last_pushed_lsn = lsn; 276 ailp->xa_last_pushed_lsn = lsn;
305 break; 277 break;
306 278
307 case XFS_ITEM_PUSHBUF: 279 case XFS_ITEM_PUSHBUF:
308 XFS_STATS_INC(xs_push_ail_pushbuf); 280 XFS_STATS_INC(xs_push_ail_pushbuf);
309 IOP_PUSHBUF(lip); 281 IOP_PUSHBUF(lip);
310 last_pushed_lsn = lsn; 282 ailp->xa_last_pushed_lsn = lsn;
311 push_xfsbufd = 1; 283 push_xfsbufd = 1;
312 break; 284 break;
313 285
@@ -319,7 +291,7 @@ xfsaild_push(
319 291
320 case XFS_ITEM_LOCKED: 292 case XFS_ITEM_LOCKED:
321 XFS_STATS_INC(xs_push_ail_locked); 293 XFS_STATS_INC(xs_push_ail_locked);
322 last_pushed_lsn = lsn; 294 ailp->xa_last_pushed_lsn = lsn;
323 stuck++; 295 stuck++;
324 break; 296 break;
325 297
@@ -374,9 +346,23 @@ xfsaild_push(
374 wake_up_process(mp->m_ddev_targp->bt_task); 346 wake_up_process(mp->m_ddev_targp->bt_task);
375 } 347 }
376 348
349 /* assume we have more work to do in a short while */
350 tout = 10;
377 if (!count) { 351 if (!count) {
378 /* We're past our target or empty, so idle */ 352 /* We're past our target or empty, so idle */
379 last_pushed_lsn = 0; 353 ailp->xa_last_pushed_lsn = 0;
354
355 /*
356 * Check for an updated push target before clearing the
357 * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
358 * work to do. Wait a bit longer before starting that work.
359 */
360 smp_rmb();
361 if (ailp->xa_target == target) {
362 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
363 return;
364 }
365 tout = 50;
380 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 366 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
381 /* 367 /*
382 * We reached the target so wait a bit longer for I/O to 368 * We reached the target so wait a bit longer for I/O to
@@ -384,7 +370,7 @@ xfsaild_push(
384 * start the next scan from the start of the AIL. 370 * start the next scan from the start of the AIL.
385 */ 371 */
386 tout = 50; 372 tout = 50;
387 last_pushed_lsn = 0; 373 ailp->xa_last_pushed_lsn = 0;
388 } else if ((stuck * 100) / count > 90) { 374 } else if ((stuck * 100) / count > 90) {
389 /* 375 /*
390 * Either there is a lot of contention on the AIL or we 376 * Either there is a lot of contention on the AIL or we
@@ -396,14 +382,48 @@ xfsaild_push(
396 * continuing from where we were. 382 * continuing from where we were.
397 */ 383 */
398 tout = 20; 384 tout = 20;
399 } else {
400 /* more to do, but wait a short while before continuing */
401 tout = 10;
402 } 385 }
403 *last_lsn = last_pushed_lsn; 386
404 return tout; 387 /* There is more to do, requeue us. */
388 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
389 msecs_to_jiffies(tout));
405} 390}
406 391
392/*
393 * This routine is called to move the tail of the AIL forward. It does this by
394 * trying to flush items in the AIL whose lsns are below the given
395 * threshold_lsn.
396 *
397 * The push is run asynchronously in a workqueue, which means the caller needs
398 * to handle waiting on the async flush for space to become available.
399 * We don't want to interrupt any push that is in progress, hence we only queue
400 * work if we set the pushing bit approriately.
401 *
402 * We do this unlocked - we only need to know whether there is anything in the
403 * AIL at the time we are called. We don't need to access the contents of
404 * any of the objects, so the lock is not needed.
405 */
406void
407xfs_trans_ail_push(
408 struct xfs_ail *ailp,
409 xfs_lsn_t threshold_lsn)
410{
411 xfs_log_item_t *lip;
412
413 lip = xfs_ail_min(ailp);
414 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
415 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
416 return;
417
418 /*
419 * Ensure that the new target is noticed in push code before it clears
420 * the XFS_AIL_PUSHING_BIT.
421 */
422 smp_wmb();
423 ailp->xa_target = threshold_lsn;
424 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
425 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
426}
407 427
408/* 428/*
409 * This is to be called when an item is unlocked that may have 429 * This is to be called when an item is unlocked that may have
@@ -615,7 +635,6 @@ xfs_trans_ail_init(
615 xfs_mount_t *mp) 635 xfs_mount_t *mp)
616{ 636{
617 struct xfs_ail *ailp; 637 struct xfs_ail *ailp;
618 int error;
619 638
620 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 639 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
621 if (!ailp) 640 if (!ailp)
@@ -624,15 +643,9 @@ xfs_trans_ail_init(
624 ailp->xa_mount = mp; 643 ailp->xa_mount = mp;
625 INIT_LIST_HEAD(&ailp->xa_ail); 644 INIT_LIST_HEAD(&ailp->xa_ail);
626 spin_lock_init(&ailp->xa_lock); 645 spin_lock_init(&ailp->xa_lock);
627 error = xfsaild_start(ailp); 646 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
628 if (error)
629 goto out_free_ailp;
630 mp->m_ail = ailp; 647 mp->m_ail = ailp;
631 return 0; 648 return 0;
632
633out_free_ailp:
634 kmem_free(ailp);
635 return error;
636} 649}
637 650
638void 651void
@@ -641,7 +654,7 @@ xfs_trans_ail_destroy(
641{ 654{
642 struct xfs_ail *ailp = mp->m_ail; 655 struct xfs_ail *ailp = mp->m_ail;
643 656
644 xfsaild_stop(ailp); 657 cancel_delayed_work_sync(&ailp->xa_work);
645 kmem_free(ailp); 658 kmem_free(ailp);
646} 659}
647 660
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 35162c238fa3..6ebd322bd37c 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -65,16 +65,22 @@ struct xfs_ail_cursor {
65struct xfs_ail { 65struct xfs_ail {
66 struct xfs_mount *xa_mount; 66 struct xfs_mount *xa_mount;
67 struct list_head xa_ail; 67 struct list_head xa_ail;
68 uint xa_gen;
69 struct task_struct *xa_task;
70 xfs_lsn_t xa_target; 68 xfs_lsn_t xa_target;
71 struct xfs_ail_cursor xa_cursors; 69 struct xfs_ail_cursor xa_cursors;
72 spinlock_t xa_lock; 70 spinlock_t xa_lock;
71 struct delayed_work xa_work;
72 xfs_lsn_t xa_last_pushed_lsn;
73 unsigned long xa_flags;
73}; 74};
74 75
76#define XFS_AIL_PUSHING_BIT 0
77
75/* 78/*
76 * From xfs_trans_ail.c 79 * From xfs_trans_ail.c
77 */ 80 */
81
82extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
83
78void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, 84void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
79 struct xfs_log_item **log_items, int nr_items, 85 struct xfs_log_item **log_items, int nr_items,
80 xfs_lsn_t lsn) __releases(ailp->xa_lock); 86 xfs_lsn_t lsn) __releases(ailp->xa_lock);
@@ -112,11 +118,6 @@ struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
112void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, 118void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
113 struct xfs_ail_cursor *cur); 119 struct xfs_ail_cursor *cur);
114 120
115long xfsaild_push(struct xfs_ail *, xfs_lsn_t *);
116void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
117int xfsaild_start(struct xfs_ail *);
118void xfsaild_stop(struct xfs_ail *);
119
120#if BITS_PER_LONG != 64 121#if BITS_PER_LONG != 64
121static inline void 122static inline void
122xfs_trans_ail_copy_lsn( 123xfs_trans_ail_copy_lsn(