aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-14 01:06:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-14 01:06:39 -0400
commit480082968a78151e731ebd304eeb6cada61a1cd1 (patch)
tree263179c267b99763cb4fd952b24870cca28d14a7
parent95bc156c62a57cda1b225e03ae23eb2764ee1046 (diff)
parent0030807c66f058230bcb20d2573bcaf28852e804 (diff)
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: xfs: revert to using a kthread for AIL pushing xfs: force the log if we encounter pinned buffers in .iop_pushbuf xfs: do not update xa_last_pushed_lsn for locked items
-rw-r--r--fs/xfs/xfs_buf_item.c3
-rw-r--r--fs/xfs/xfs_dquot_item.c10
-rw-r--r--fs/xfs/xfs_inode_item.c10
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_super.c13
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c83
-rw-r--r--fs/xfs/xfs_trans_priv.h8
8 files changed, 69 insertions, 62 deletions
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index cac2ecfa6746..ef43fce519a1 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -629,7 +629,7 @@ xfs_buf_item_push(
629 * the xfsbufd to get this buffer written. We have to unlock the buffer 629 * the xfsbufd to get this buffer written. We have to unlock the buffer
630 * to allow the xfsbufd to write it, too. 630 * to allow the xfsbufd to write it, too.
631 */ 631 */
632STATIC void 632STATIC bool
633xfs_buf_item_pushbuf( 633xfs_buf_item_pushbuf(
634 struct xfs_log_item *lip) 634 struct xfs_log_item *lip)
635{ 635{
@@ -643,6 +643,7 @@ xfs_buf_item_pushbuf(
643 643
644 xfs_buf_delwri_promote(bp); 644 xfs_buf_delwri_promote(bp);
645 xfs_buf_relse(bp); 645 xfs_buf_relse(bp);
646 return true;
646} 647}
647 648
648STATIC void 649STATIC void
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 9e0e2fa3f2c8..bb3f71d236d2 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
183 * search the buffer cache can be a time consuming thing, and AIL lock is a 183 * search the buffer cache can be a time consuming thing, and AIL lock is a
184 * spinlock. 184 * spinlock.
185 */ 185 */
186STATIC void 186STATIC bool
187xfs_qm_dquot_logitem_pushbuf( 187xfs_qm_dquot_logitem_pushbuf(
188 struct xfs_log_item *lip) 188 struct xfs_log_item *lip)
189{ 189{
190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); 190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
191 struct xfs_dquot *dqp = qlip->qli_dquot; 191 struct xfs_dquot *dqp = qlip->qli_dquot;
192 struct xfs_buf *bp; 192 struct xfs_buf *bp;
193 bool ret = true;
193 194
194 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 195 ASSERT(XFS_DQ_IS_LOCKED(dqp));
195 196
@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
201 if (completion_done(&dqp->q_flush) || 202 if (completion_done(&dqp->q_flush) ||
202 !(lip->li_flags & XFS_LI_IN_AIL)) { 203 !(lip->li_flags & XFS_LI_IN_AIL)) {
203 xfs_dqunlock(dqp); 204 xfs_dqunlock(dqp);
204 return; 205 return true;
205 } 206 }
206 207
207 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, 208 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
208 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); 209 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
209 xfs_dqunlock(dqp); 210 xfs_dqunlock(dqp);
210 if (!bp) 211 if (!bp)
211 return; 212 return true;
212 if (XFS_BUF_ISDELAYWRITE(bp)) 213 if (XFS_BUF_ISDELAYWRITE(bp))
213 xfs_buf_delwri_promote(bp); 214 xfs_buf_delwri_promote(bp);
215 if (xfs_buf_ispinned(bp))
216 ret = false;
214 xfs_buf_relse(bp); 217 xfs_buf_relse(bp);
218 return ret;
215} 219}
216 220
217/* 221/*
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 588406dc6a35..836ad80d4f2b 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -708,13 +708,14 @@ xfs_inode_item_committed(
708 * marked delayed write. If that's the case, we'll promote it and that will 708 * marked delayed write. If that's the case, we'll promote it and that will
709 * allow the caller to write the buffer by triggering the xfsbufd to run. 709 * allow the caller to write the buffer by triggering the xfsbufd to run.
710 */ 710 */
711STATIC void 711STATIC bool
712xfs_inode_item_pushbuf( 712xfs_inode_item_pushbuf(
713 struct xfs_log_item *lip) 713 struct xfs_log_item *lip)
714{ 714{
715 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 715 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
716 struct xfs_inode *ip = iip->ili_inode; 716 struct xfs_inode *ip = iip->ili_inode;
717 struct xfs_buf *bp; 717 struct xfs_buf *bp;
718 bool ret = true;
718 719
719 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); 720 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
720 721
@@ -725,7 +726,7 @@ xfs_inode_item_pushbuf(
725 if (completion_done(&ip->i_flush) || 726 if (completion_done(&ip->i_flush) ||
726 !(lip->li_flags & XFS_LI_IN_AIL)) { 727 !(lip->li_flags & XFS_LI_IN_AIL)) {
727 xfs_iunlock(ip, XFS_ILOCK_SHARED); 728 xfs_iunlock(ip, XFS_ILOCK_SHARED);
728 return; 729 return true;
729 } 730 }
730 731
731 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, 732 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
@@ -733,10 +734,13 @@ xfs_inode_item_pushbuf(
733 734
734 xfs_iunlock(ip, XFS_ILOCK_SHARED); 735 xfs_iunlock(ip, XFS_ILOCK_SHARED);
735 if (!bp) 736 if (!bp)
736 return; 737 return true;
737 if (XFS_BUF_ISDELAYWRITE(bp)) 738 if (XFS_BUF_ISDELAYWRITE(bp))
738 xfs_buf_delwri_promote(bp); 739 xfs_buf_delwri_promote(bp);
740 if (xfs_buf_ispinned(bp))
741 ret = false;
739 xfs_buf_relse(bp); 742 xfs_buf_relse(bp);
743 return ret;
740} 744}
741 745
742/* 746/*
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 1e8a45e74c3e..828662f70d64 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -68,6 +68,8 @@
68#include <linux/ctype.h> 68#include <linux/ctype.h>
69#include <linux/writeback.h> 69#include <linux/writeback.h>
70#include <linux/capability.h> 70#include <linux/capability.h>
71#include <linux/kthread.h>
72#include <linux/freezer.h>
71#include <linux/list_sort.h> 73#include <linux/list_sort.h>
72 74
73#include <asm/page.h> 75#include <asm/page.h>
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2366c54cc4fa..5cf06b85fd9d 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1652,24 +1652,13 @@ xfs_init_workqueues(void)
1652 */ 1652 */
1653 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); 1653 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1654 if (!xfs_syncd_wq) 1654 if (!xfs_syncd_wq)
1655 goto out; 1655 return -ENOMEM;
1656
1657 xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1658 if (!xfs_ail_wq)
1659 goto out_destroy_syncd;
1660
1661 return 0; 1656 return 0;
1662
1663out_destroy_syncd:
1664 destroy_workqueue(xfs_syncd_wq);
1665out:
1666 return -ENOMEM;
1667} 1657}
1668 1658
1669STATIC void 1659STATIC void
1670xfs_destroy_workqueues(void) 1660xfs_destroy_workqueues(void)
1671{ 1661{
1672 destroy_workqueue(xfs_ail_wq);
1673 destroy_workqueue(xfs_syncd_wq); 1662 destroy_workqueue(xfs_syncd_wq);
1674} 1663}
1675 1664
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 06a9759b6352..53597f4db9b5 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
350 void (*iop_unlock)(xfs_log_item_t *); 350 void (*iop_unlock)(xfs_log_item_t *);
351 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); 351 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
352 void (*iop_push)(xfs_log_item_t *); 352 void (*iop_push)(xfs_log_item_t *);
353 void (*iop_pushbuf)(xfs_log_item_t *); 353 bool (*iop_pushbuf)(xfs_log_item_t *);
354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
355} xfs_item_ops_t; 355} xfs_item_ops_t;
356 356
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index c15aa29fa169..3a1e7ca54c2d 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,6 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
32
33#ifdef DEBUG 31#ifdef DEBUG
34/* 32/*
35 * Check that the list is sorted as it should be. 33 * Check that the list is sorted as it should be.
@@ -356,16 +354,10 @@ xfs_ail_delete(
356 xfs_trans_ail_cursor_clear(ailp, lip); 354 xfs_trans_ail_cursor_clear(ailp, lip);
357} 355}
358 356
359/* 357static long
360 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 358xfsaild_push(
361 * to run at a later time if there is more work to do to complete the push. 359 struct xfs_ail *ailp)
362 */
363STATIC void
364xfs_ail_worker(
365 struct work_struct *work)
366{ 360{
367 struct xfs_ail *ailp = container_of(to_delayed_work(work),
368 struct xfs_ail, xa_work);
369 xfs_mount_t *mp = ailp->xa_mount; 361 xfs_mount_t *mp = ailp->xa_mount;
370 struct xfs_ail_cursor cur; 362 struct xfs_ail_cursor cur;
371 xfs_log_item_t *lip; 363 xfs_log_item_t *lip;
@@ -427,8 +419,13 @@ xfs_ail_worker(
427 419
428 case XFS_ITEM_PUSHBUF: 420 case XFS_ITEM_PUSHBUF:
429 XFS_STATS_INC(xs_push_ail_pushbuf); 421 XFS_STATS_INC(xs_push_ail_pushbuf);
430 IOP_PUSHBUF(lip); 422
431 ailp->xa_last_pushed_lsn = lsn; 423 if (!IOP_PUSHBUF(lip)) {
424 stuck++;
425 flush_log = 1;
426 } else {
427 ailp->xa_last_pushed_lsn = lsn;
428 }
432 push_xfsbufd = 1; 429 push_xfsbufd = 1;
433 break; 430 break;
434 431
@@ -440,7 +437,6 @@ xfs_ail_worker(
440 437
441 case XFS_ITEM_LOCKED: 438 case XFS_ITEM_LOCKED:
442 XFS_STATS_INC(xs_push_ail_locked); 439 XFS_STATS_INC(xs_push_ail_locked);
443 ailp->xa_last_pushed_lsn = lsn;
444 stuck++; 440 stuck++;
445 break; 441 break;
446 442
@@ -501,20 +497,6 @@ out_done:
501 /* We're past our target or empty, so idle */ 497 /* We're past our target or empty, so idle */
502 ailp->xa_last_pushed_lsn = 0; 498 ailp->xa_last_pushed_lsn = 0;
503 499
504 /*
505 * We clear the XFS_AIL_PUSHING_BIT first before checking
506 * whether the target has changed. If the target has changed,
507 * this pushes the requeue race directly onto the result of the
508 * atomic test/set bit, so we are guaranteed that either the
509 * the pusher that changed the target or ourselves will requeue
510 * the work (but not both).
511 */
512 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
513 smp_rmb();
514 if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
515 test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
516 return;
517
518 tout = 50; 500 tout = 50;
519 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 501 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
520 /* 502 /*
@@ -537,9 +519,30 @@ out_done:
537 tout = 20; 519 tout = 20;
538 } 520 }
539 521
540 /* There is more to do, requeue us. */ 522 return tout;
541 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 523}
542 msecs_to_jiffies(tout)); 524
525static int
526xfsaild(
527 void *data)
528{
529 struct xfs_ail *ailp = data;
530 long tout = 0; /* milliseconds */
531
532 while (!kthread_should_stop()) {
533 if (tout && tout <= 20)
534 __set_current_state(TASK_KILLABLE);
535 else
536 __set_current_state(TASK_INTERRUPTIBLE);
537 schedule_timeout(tout ?
538 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
539
540 try_to_freeze();
541
542 tout = xfsaild_push(ailp);
543 }
544
545 return 0;
543} 546}
544 547
545/* 548/*
@@ -574,8 +577,9 @@ xfs_ail_push(
574 */ 577 */
575 smp_wmb(); 578 smp_wmb();
576 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 579 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
577 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 580 smp_wmb();
578 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 581
582 wake_up_process(ailp->xa_task);
579} 583}
580 584
581/* 585/*
@@ -813,9 +817,18 @@ xfs_trans_ail_init(
813 INIT_LIST_HEAD(&ailp->xa_ail); 817 INIT_LIST_HEAD(&ailp->xa_ail);
814 INIT_LIST_HEAD(&ailp->xa_cursors); 818 INIT_LIST_HEAD(&ailp->xa_cursors);
815 spin_lock_init(&ailp->xa_lock); 819 spin_lock_init(&ailp->xa_lock);
816 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 820
821 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
822 ailp->xa_mount->m_fsname);
823 if (IS_ERR(ailp->xa_task))
824 goto out_free_ailp;
825
817 mp->m_ail = ailp; 826 mp->m_ail = ailp;
818 return 0; 827 return 0;
828
829out_free_ailp:
830 kmem_free(ailp);
831 return ENOMEM;
819} 832}
820 833
821void 834void
@@ -824,6 +837,6 @@ xfs_trans_ail_destroy(
824{ 837{
825 struct xfs_ail *ailp = mp->m_ail; 838 struct xfs_ail *ailp = mp->m_ail;
826 839
827 cancel_delayed_work_sync(&ailp->xa_work); 840 kthread_stop(ailp->xa_task);
828 kmem_free(ailp); 841 kmem_free(ailp);
829} 842}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 212946b97239..22750b5e4a8f 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -64,23 +64,17 @@ struct xfs_ail_cursor {
64 */ 64 */
65struct xfs_ail { 65struct xfs_ail {
66 struct xfs_mount *xa_mount; 66 struct xfs_mount *xa_mount;
67 struct task_struct *xa_task;
67 struct list_head xa_ail; 68 struct list_head xa_ail;
68 xfs_lsn_t xa_target; 69 xfs_lsn_t xa_target;
69 struct list_head xa_cursors; 70 struct list_head xa_cursors;
70 spinlock_t xa_lock; 71 spinlock_t xa_lock;
71 struct delayed_work xa_work;
72 xfs_lsn_t xa_last_pushed_lsn; 72 xfs_lsn_t xa_last_pushed_lsn;
73 unsigned long xa_flags;
74}; 73};
75 74
76#define XFS_AIL_PUSHING_BIT 0
77
78/* 75/*
79 * From xfs_trans_ail.c 76 * From xfs_trans_ail.c
80 */ 77 */
81
82extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
83
84void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, 78void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
85 struct xfs_ail_cursor *cur, 79 struct xfs_ail_cursor *cur,
86 struct xfs_log_item **log_items, int nr_items, 80 struct xfs_log_item **log_items, int nr_items,