aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_trans_ail.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-04-07 22:45:07 -0400
committerDave Chinner <david@fromorbit.com>2011-04-07 22:45:07 -0400
commit0bf6a5bd4b55b466964ead6fa566d8f346a828ee (patch)
tree661f2bcc36458c807752243c6f2a76b43a64302c /fs/xfs/xfs_trans_ail.c
parenta7b339f1b8698667eada006e717cdb4523be2ed5 (diff)
xfs: convert the xfsaild threads to a workqueue
Similar to the xfssyncd, the per-filesystem xfsaild threads can be converted to a global workqueue and run periodically by delayed works. This makes sense for the AIL pushing because it uses variable timeouts depending on the work that needs to be done. By removing the xfsaild, we simplify the AIL pushing code and remove the need to spread the code to implement the threading and pushing across multiple files. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_trans_ail.c')
-rw-r--r--fs/xfs/xfs_trans_ail.c133
1 files changed, 73 insertions, 60 deletions
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 12aff9584e29..cb3aeac929bc 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,6 +28,8 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
32
31STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); 33STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
32STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); 34STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); 35STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
@@ -69,36 +71,6 @@ xfs_trans_ail_tail(
69} 71}
70 72
71/* 73/*
72 * xfs_trans_push_ail
73 *
74 * This routine is called to move the tail of the AIL forward. It does this by
75 * trying to flush items in the AIL whose lsns are below the given
76 * threshold_lsn.
77 *
78 * the push is run asynchronously in a separate thread, so we return the tail
79 * of the log right now instead of the tail after the push. This means we will
80 * either continue right away, or we will sleep waiting on the async thread to
81 * do its work.
82 *
83 * We do this unlocked - we only need to know whether there is anything in the
84 * AIL at the time we are called. We don't need to access the contents of
85 * any of the objects, so the lock is not needed.
86 */
87void
88xfs_trans_ail_push(
89 struct xfs_ail *ailp,
90 xfs_lsn_t threshold_lsn)
91{
92 xfs_log_item_t *lip;
93
94 lip = xfs_ail_min(ailp);
95 if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
96 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
97 xfsaild_wakeup(ailp, threshold_lsn);
98 }
99}
100
101/*
102 * AIL traversal cursor initialisation. 74 * AIL traversal cursor initialisation.
103 * 75 *
104 * The cursor keeps track of where our current traversal is up 76 * The cursor keeps track of where our current traversal is up
@@ -236,16 +208,16 @@ out:
236} 208}
237 209
238/* 210/*
239 * xfsaild_push does the work of pushing on the AIL. Returning a timeout of 211 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
240 * zero indicates that the caller should sleep until woken. 212 * to run at a later time if there is more work to do to complete the push.
241 */ 213 */
242long 214STATIC void
243xfsaild_push( 215xfs_ail_worker(
244 struct xfs_ail *ailp, 216 struct work_struct *work)
245 xfs_lsn_t *last_lsn)
246{ 217{
247 long tout = 0; 218 struct xfs_ail *ailp = container_of(to_delayed_work(work),
248 xfs_lsn_t last_pushed_lsn = *last_lsn; 219 struct xfs_ail, xa_work);
220 long tout;
249 xfs_lsn_t target = ailp->xa_target; 221 xfs_lsn_t target = ailp->xa_target;
250 xfs_lsn_t lsn; 222 xfs_lsn_t lsn;
251 xfs_log_item_t *lip; 223 xfs_log_item_t *lip;
@@ -256,15 +228,15 @@ xfsaild_push(
256 228
257 spin_lock(&ailp->xa_lock); 229 spin_lock(&ailp->xa_lock);
258 xfs_trans_ail_cursor_init(ailp, cur); 230 xfs_trans_ail_cursor_init(ailp, cur);
259 lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn); 231 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
260 if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 232 if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
261 /* 233 /*
262 * AIL is empty or our push has reached the end. 234 * AIL is empty or our push has reached the end.
263 */ 235 */
264 xfs_trans_ail_cursor_done(ailp, cur); 236 xfs_trans_ail_cursor_done(ailp, cur);
265 spin_unlock(&ailp->xa_lock); 237 spin_unlock(&ailp->xa_lock);
266 *last_lsn = 0; 238 ailp->xa_last_pushed_lsn = 0;
267 return tout; 239 return;
268 } 240 }
269 241
270 XFS_STATS_INC(xs_push_ail); 242 XFS_STATS_INC(xs_push_ail);
@@ -301,13 +273,13 @@ xfsaild_push(
301 case XFS_ITEM_SUCCESS: 273 case XFS_ITEM_SUCCESS:
302 XFS_STATS_INC(xs_push_ail_success); 274 XFS_STATS_INC(xs_push_ail_success);
303 IOP_PUSH(lip); 275 IOP_PUSH(lip);
304 last_pushed_lsn = lsn; 276 ailp->xa_last_pushed_lsn = lsn;
305 break; 277 break;
306 278
307 case XFS_ITEM_PUSHBUF: 279 case XFS_ITEM_PUSHBUF:
308 XFS_STATS_INC(xs_push_ail_pushbuf); 280 XFS_STATS_INC(xs_push_ail_pushbuf);
309 IOP_PUSHBUF(lip); 281 IOP_PUSHBUF(lip);
310 last_pushed_lsn = lsn; 282 ailp->xa_last_pushed_lsn = lsn;
311 push_xfsbufd = 1; 283 push_xfsbufd = 1;
312 break; 284 break;
313 285
@@ -319,7 +291,7 @@ xfsaild_push(
319 291
320 case XFS_ITEM_LOCKED: 292 case XFS_ITEM_LOCKED:
321 XFS_STATS_INC(xs_push_ail_locked); 293 XFS_STATS_INC(xs_push_ail_locked);
322 last_pushed_lsn = lsn; 294 ailp->xa_last_pushed_lsn = lsn;
323 stuck++; 295 stuck++;
324 break; 296 break;
325 297
@@ -374,9 +346,23 @@ xfsaild_push(
374 wake_up_process(mp->m_ddev_targp->bt_task); 346 wake_up_process(mp->m_ddev_targp->bt_task);
375 } 347 }
376 348
349 /* assume we have more work to do in a short while */
350 tout = 10;
377 if (!count) { 351 if (!count) {
378 /* We're past our target or empty, so idle */ 352 /* We're past our target or empty, so idle */
379 last_pushed_lsn = 0; 353 ailp->xa_last_pushed_lsn = 0;
354
355 /*
356 * Check for an updated push target before clearing the
357 * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
358 * work to do. Wait a bit longer before starting that work.
359 */
360 smp_rmb();
361 if (ailp->xa_target == target) {
362 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
363 return;
364 }
365 tout = 50;
380 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 366 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
381 /* 367 /*
382 * We reached the target so wait a bit longer for I/O to 368 * We reached the target so wait a bit longer for I/O to
@@ -384,7 +370,7 @@ xfsaild_push(
384 * start the next scan from the start of the AIL. 370 * start the next scan from the start of the AIL.
385 */ 371 */
386 tout = 50; 372 tout = 50;
387 last_pushed_lsn = 0; 373 ailp->xa_last_pushed_lsn = 0;
388 } else if ((stuck * 100) / count > 90) { 374 } else if ((stuck * 100) / count > 90) {
389 /* 375 /*
390 * Either there is a lot of contention on the AIL or we 376 * Either there is a lot of contention on the AIL or we
@@ -396,14 +382,48 @@ xfsaild_push(
396 * continuing from where we were. 382 * continuing from where we were.
397 */ 383 */
398 tout = 20; 384 tout = 20;
399 } else {
400 /* more to do, but wait a short while before continuing */
401 tout = 10;
402 } 385 }
403 *last_lsn = last_pushed_lsn; 386
404 return tout; 387 /* There is more to do, requeue us. */
388 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
389 msecs_to_jiffies(tout));
405} 390}
406 391
392/*
393 * This routine is called to move the tail of the AIL forward. It does this by
394 * trying to flush items in the AIL whose lsns are below the given
395 * threshold_lsn.
396 *
397 * The push is run asynchronously in a workqueue, which means the caller needs
398 * to handle waiting on the async flush for space to become available.
399 * We don't want to interrupt any push that is in progress, hence we only queue
400 * work if we set the pushing bit approriately.
401 *
402 * We do this unlocked - we only need to know whether there is anything in the
403 * AIL at the time we are called. We don't need to access the contents of
404 * any of the objects, so the lock is not needed.
405 */
406void
407xfs_trans_ail_push(
408 struct xfs_ail *ailp,
409 xfs_lsn_t threshold_lsn)
410{
411 xfs_log_item_t *lip;
412
413 lip = xfs_ail_min(ailp);
414 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
415 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
416 return;
417
418 /*
419 * Ensure that the new target is noticed in push code before it clears
420 * the XFS_AIL_PUSHING_BIT.
421 */
422 smp_wmb();
423 ailp->xa_target = threshold_lsn;
424 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
425 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
426}
407 427
408/* 428/*
409 * This is to be called when an item is unlocked that may have 429 * This is to be called when an item is unlocked that may have
@@ -615,7 +635,6 @@ xfs_trans_ail_init(
615 xfs_mount_t *mp) 635 xfs_mount_t *mp)
616{ 636{
617 struct xfs_ail *ailp; 637 struct xfs_ail *ailp;
618 int error;
619 638
620 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 639 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
621 if (!ailp) 640 if (!ailp)
@@ -624,15 +643,9 @@ xfs_trans_ail_init(
624 ailp->xa_mount = mp; 643 ailp->xa_mount = mp;
625 INIT_LIST_HEAD(&ailp->xa_ail); 644 INIT_LIST_HEAD(&ailp->xa_ail);
626 spin_lock_init(&ailp->xa_lock); 645 spin_lock_init(&ailp->xa_lock);
627 error = xfsaild_start(ailp); 646 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
628 if (error)
629 goto out_free_ailp;
630 mp->m_ail = ailp; 647 mp->m_ail = ailp;
631 return 0; 648 return 0;
632
633out_free_ailp:
634 kmem_free(ailp);
635 return error;
636} 649}
637 650
638void 651void
@@ -641,7 +654,7 @@ xfs_trans_ail_destroy(
641{ 654{
642 struct xfs_ail *ailp = mp->m_ail; 655 struct xfs_ail *ailp = mp->m_ail;
643 656
644 xfsaild_stop(ailp); 657 cancel_delayed_work_sync(&ailp->xa_work);
645 kmem_free(ailp); 658 kmem_free(ailp);
646} 659}
647 660