aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorAlex Elder <aelder@sgi.com>2011-10-17 16:42:02 -0400
committerAlex Elder <aelder@sgi.com>2011-10-17 16:42:02 -0400
commit9508534c5f170ada5a745cde0df49732718440e9 (patch)
tree2b0f1e19aa529e77720522d13541adedbc46564e /fs/xfs
parent5a93a064d27b42e4af1772b0599b53e3241191ac (diff)
parenta84a79e4d369a73c0130b5858199e949432da4c6 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Resolved conflicts: fs/xfs/xfs_trans_priv.h: - deleted struct xfs_ail field xa_flags - kept field xa_log_flush in struct xfs_ail fs/xfs/xfs_trans_ail.c: - in xfsaild_push(), in XFS_ITEM_PUSHBUF case, replaced "flush_log = 1" with "ailp->xa_log_flush++" Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_buf_item.c3
-rw-r--r--fs/xfs/xfs_dquot_item.c10
-rw-r--r--fs/xfs/xfs_inode_item.c10
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_super.c13
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c83
-rw-r--r--fs/xfs/xfs_trans_priv.h8
8 files changed, 69 insertions, 62 deletions
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 65d6f4432d28..1a3513881bce 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -629,7 +629,7 @@ xfs_buf_item_push(
629 * the xfsbufd to get this buffer written. We have to unlock the buffer 629 * the xfsbufd to get this buffer written. We have to unlock the buffer
630 * to allow the xfsbufd to write it, too. 630 * to allow the xfsbufd to write it, too.
631 */ 631 */
632STATIC void 632STATIC bool
633xfs_buf_item_pushbuf( 633xfs_buf_item_pushbuf(
634 struct xfs_log_item *lip) 634 struct xfs_log_item *lip)
635{ 635{
@@ -643,6 +643,7 @@ xfs_buf_item_pushbuf(
643 643
644 xfs_buf_delwri_promote(bp); 644 xfs_buf_delwri_promote(bp);
645 xfs_buf_relse(bp); 645 xfs_buf_relse(bp);
646 return true;
646} 647}
647 648
648STATIC void 649STATIC void
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 9e0e2fa3f2c8..bb3f71d236d2 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
183 * search the buffer cache can be a time consuming thing, and AIL lock is a 183 * search the buffer cache can be a time consuming thing, and AIL lock is a
184 * spinlock. 184 * spinlock.
185 */ 185 */
186STATIC void 186STATIC bool
187xfs_qm_dquot_logitem_pushbuf( 187xfs_qm_dquot_logitem_pushbuf(
188 struct xfs_log_item *lip) 188 struct xfs_log_item *lip)
189{ 189{
190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); 190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
191 struct xfs_dquot *dqp = qlip->qli_dquot; 191 struct xfs_dquot *dqp = qlip->qli_dquot;
192 struct xfs_buf *bp; 192 struct xfs_buf *bp;
193 bool ret = true;
193 194
194 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 195 ASSERT(XFS_DQ_IS_LOCKED(dqp));
195 196
@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
201 if (completion_done(&dqp->q_flush) || 202 if (completion_done(&dqp->q_flush) ||
202 !(lip->li_flags & XFS_LI_IN_AIL)) { 203 !(lip->li_flags & XFS_LI_IN_AIL)) {
203 xfs_dqunlock(dqp); 204 xfs_dqunlock(dqp);
204 return; 205 return true;
205 } 206 }
206 207
207 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, 208 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
208 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); 209 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
209 xfs_dqunlock(dqp); 210 xfs_dqunlock(dqp);
210 if (!bp) 211 if (!bp)
211 return; 212 return true;
212 if (XFS_BUF_ISDELAYWRITE(bp)) 213 if (XFS_BUF_ISDELAYWRITE(bp))
213 xfs_buf_delwri_promote(bp); 214 xfs_buf_delwri_promote(bp);
215 if (xfs_buf_ispinned(bp))
216 ret = false;
214 xfs_buf_relse(bp); 217 xfs_buf_relse(bp);
218 return ret;
215} 219}
216 220
217/* 221/*
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 8704a99241d7..b7cf21ba240f 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -706,13 +706,14 @@ xfs_inode_item_committed(
706 * marked delayed write. If that's the case, we'll promote it and that will 706 * marked delayed write. If that's the case, we'll promote it and that will
707 * allow the caller to write the buffer by triggering the xfsbufd to run. 707 * allow the caller to write the buffer by triggering the xfsbufd to run.
708 */ 708 */
709STATIC void 709STATIC bool
710xfs_inode_item_pushbuf( 710xfs_inode_item_pushbuf(
711 struct xfs_log_item *lip) 711 struct xfs_log_item *lip)
712{ 712{
713 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 713 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
714 struct xfs_inode *ip = iip->ili_inode; 714 struct xfs_inode *ip = iip->ili_inode;
715 struct xfs_buf *bp; 715 struct xfs_buf *bp;
716 bool ret = true;
716 717
717 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); 718 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
718 719
@@ -723,7 +724,7 @@ xfs_inode_item_pushbuf(
723 if (completion_done(&ip->i_flush) || 724 if (completion_done(&ip->i_flush) ||
724 !(lip->li_flags & XFS_LI_IN_AIL)) { 725 !(lip->li_flags & XFS_LI_IN_AIL)) {
725 xfs_iunlock(ip, XFS_ILOCK_SHARED); 726 xfs_iunlock(ip, XFS_ILOCK_SHARED);
726 return; 727 return true;
727 } 728 }
728 729
729 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, 730 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
@@ -731,10 +732,13 @@ xfs_inode_item_pushbuf(
731 732
732 xfs_iunlock(ip, XFS_ILOCK_SHARED); 733 xfs_iunlock(ip, XFS_ILOCK_SHARED);
733 if (!bp) 734 if (!bp)
734 return; 735 return true;
735 if (XFS_BUF_ISDELAYWRITE(bp)) 736 if (XFS_BUF_ISDELAYWRITE(bp))
736 xfs_buf_delwri_promote(bp); 737 xfs_buf_delwri_promote(bp);
738 if (xfs_buf_ispinned(bp))
739 ret = false;
737 xfs_buf_relse(bp); 740 xfs_buf_relse(bp);
741 return ret;
738} 742}
739 743
740/* 744/*
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 1e8a45e74c3e..828662f70d64 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -68,6 +68,8 @@
68#include <linux/ctype.h> 68#include <linux/ctype.h>
69#include <linux/writeback.h> 69#include <linux/writeback.h>
70#include <linux/capability.h> 70#include <linux/capability.h>
71#include <linux/kthread.h>
72#include <linux/freezer.h>
71#include <linux/list_sort.h> 73#include <linux/list_sort.h>
72 74
73#include <asm/page.h> 75#include <asm/page.h>
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index ba16248bcf24..3eca58f51ae9 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1648,24 +1648,13 @@ xfs_init_workqueues(void)
1648 */ 1648 */
1649 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); 1649 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1650 if (!xfs_syncd_wq) 1650 if (!xfs_syncd_wq)
1651 goto out; 1651 return -ENOMEM;
1652
1653 xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1654 if (!xfs_ail_wq)
1655 goto out_destroy_syncd;
1656
1657 return 0; 1652 return 0;
1658
1659out_destroy_syncd:
1660 destroy_workqueue(xfs_syncd_wq);
1661out:
1662 return -ENOMEM;
1663} 1653}
1664 1654
1665STATIC void 1655STATIC void
1666xfs_destroy_workqueues(void) 1656xfs_destroy_workqueues(void)
1667{ 1657{
1668 destroy_workqueue(xfs_ail_wq);
1669 destroy_workqueue(xfs_syncd_wq); 1658 destroy_workqueue(xfs_syncd_wq);
1670} 1659}
1671 1660
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index f5df16969f82..603f3eb52041 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
350 void (*iop_unlock)(xfs_log_item_t *); 350 void (*iop_unlock)(xfs_log_item_t *);
351 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); 351 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
352 void (*iop_push)(xfs_log_item_t *); 352 void (*iop_push)(xfs_log_item_t *);
353 void (*iop_pushbuf)(xfs_log_item_t *); 353 bool (*iop_pushbuf)(xfs_log_item_t *);
354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
355} xfs_item_ops_t; 355} xfs_item_ops_t;
356 356
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 9df7f9f1b5ee..512ff646d01c 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,6 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
32
33#ifdef DEBUG 31#ifdef DEBUG
34/* 32/*
35 * Check that the list is sorted as it should be. 33 * Check that the list is sorted as it should be.
@@ -356,16 +354,10 @@ xfs_ail_delete(
356 xfs_trans_ail_cursor_clear(ailp, lip); 354 xfs_trans_ail_cursor_clear(ailp, lip);
357} 355}
358 356
359/* 357static long
360 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 358xfsaild_push(
361 * to run at a later time if there is more work to do to complete the push. 359 struct xfs_ail *ailp)
362 */
363STATIC void
364xfs_ail_worker(
365 struct work_struct *work)
366{ 360{
367 struct xfs_ail *ailp = container_of(to_delayed_work(work),
368 struct xfs_ail, xa_work);
369 xfs_mount_t *mp = ailp->xa_mount; 361 xfs_mount_t *mp = ailp->xa_mount;
370 struct xfs_ail_cursor cur; 362 struct xfs_ail_cursor cur;
371 xfs_log_item_t *lip; 363 xfs_log_item_t *lip;
@@ -439,8 +431,13 @@ xfs_ail_worker(
439 431
440 case XFS_ITEM_PUSHBUF: 432 case XFS_ITEM_PUSHBUF:
441 XFS_STATS_INC(xs_push_ail_pushbuf); 433 XFS_STATS_INC(xs_push_ail_pushbuf);
442 IOP_PUSHBUF(lip); 434
443 ailp->xa_last_pushed_lsn = lsn; 435 if (!IOP_PUSHBUF(lip)) {
436 stuck++;
437 flush_log = 1;
438 } else {
439 ailp->xa_last_pushed_lsn = lsn;
440 }
444 push_xfsbufd = 1; 441 push_xfsbufd = 1;
445 break; 442 break;
446 443
@@ -452,7 +449,6 @@ xfs_ail_worker(
452 449
453 case XFS_ITEM_LOCKED: 450 case XFS_ITEM_LOCKED:
454 XFS_STATS_INC(xs_push_ail_locked); 451 XFS_STATS_INC(xs_push_ail_locked);
455 ailp->xa_last_pushed_lsn = lsn;
456 stuck++; 452 stuck++;
457 break; 453 break;
458 454
@@ -504,20 +500,6 @@ out_done:
504 ailp->xa_last_pushed_lsn = 0; 500 ailp->xa_last_pushed_lsn = 0;
505 ailp->xa_log_flush = 0; 501 ailp->xa_log_flush = 0;
506 502
507 /*
508 * We clear the XFS_AIL_PUSHING_BIT first before checking
509 * whether the target has changed. If the target has changed,
510 * this pushes the requeue race directly onto the result of the
511 * atomic test/set bit, so we are guaranteed that either the
512 * the pusher that changed the target or ourselves will requeue
513 * the work (but not both).
514 */
515 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
516 smp_rmb();
517 if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
518 test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
519 return;
520
521 tout = 50; 503 tout = 50;
522 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 504 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
523 /* 505 /*
@@ -544,9 +526,30 @@ out_done:
544 ailp->xa_last_pushed_lsn = 0; 526 ailp->xa_last_pushed_lsn = 0;
545 } 527 }
546 528
547 /* There is more to do, requeue us. */ 529 return tout;
548 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 530}
549 msecs_to_jiffies(tout)); 531
532static int
533xfsaild(
534 void *data)
535{
536 struct xfs_ail *ailp = data;
537 long tout = 0; /* milliseconds */
538
539 while (!kthread_should_stop()) {
540 if (tout && tout <= 20)
541 __set_current_state(TASK_KILLABLE);
542 else
543 __set_current_state(TASK_INTERRUPTIBLE);
544 schedule_timeout(tout ?
545 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
546
547 try_to_freeze();
548
549 tout = xfsaild_push(ailp);
550 }
551
552 return 0;
550} 553}
551 554
552/* 555/*
@@ -581,8 +584,9 @@ xfs_ail_push(
581 */ 584 */
582 smp_wmb(); 585 smp_wmb();
583 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 586 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
584 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 587 smp_wmb();
585 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 588
589 wake_up_process(ailp->xa_task);
586} 590}
587 591
588/* 592/*
@@ -820,9 +824,18 @@ xfs_trans_ail_init(
820 INIT_LIST_HEAD(&ailp->xa_ail); 824 INIT_LIST_HEAD(&ailp->xa_ail);
821 INIT_LIST_HEAD(&ailp->xa_cursors); 825 INIT_LIST_HEAD(&ailp->xa_cursors);
822 spin_lock_init(&ailp->xa_lock); 826 spin_lock_init(&ailp->xa_lock);
823 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 827
828 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
829 ailp->xa_mount->m_fsname);
830 if (IS_ERR(ailp->xa_task))
831 goto out_free_ailp;
832
824 mp->m_ail = ailp; 833 mp->m_ail = ailp;
825 return 0; 834 return 0;
835
836out_free_ailp:
837 kmem_free(ailp);
838 return ENOMEM;
826} 839}
827 840
828void 841void
@@ -831,6 +844,6 @@ xfs_trans_ail_destroy(
831{ 844{
832 struct xfs_ail *ailp = mp->m_ail; 845 struct xfs_ail *ailp = mp->m_ail;
833 846
834 cancel_delayed_work_sync(&ailp->xa_work); 847 kthread_stop(ailp->xa_task);
835 kmem_free(ailp); 848 kmem_free(ailp);
836} 849}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 0a6eec6d472a..44820b9fcb43 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -64,24 +64,18 @@ struct xfs_ail_cursor {
64 */ 64 */
65struct xfs_ail { 65struct xfs_ail {
66 struct xfs_mount *xa_mount; 66 struct xfs_mount *xa_mount;
67 struct task_struct *xa_task;
67 struct list_head xa_ail; 68 struct list_head xa_ail;
68 xfs_lsn_t xa_target; 69 xfs_lsn_t xa_target;
69 struct list_head xa_cursors; 70 struct list_head xa_cursors;
70 spinlock_t xa_lock; 71 spinlock_t xa_lock;
71 struct delayed_work xa_work;
72 xfs_lsn_t xa_last_pushed_lsn; 72 xfs_lsn_t xa_last_pushed_lsn;
73 unsigned long xa_flags;
74 int xa_log_flush; 73 int xa_log_flush;
75}; 74};
76 75
77#define XFS_AIL_PUSHING_BIT 0
78
79/* 76/*
80 * From xfs_trans_ail.c 77 * From xfs_trans_ail.c
81 */ 78 */
82
83extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
84
85void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, 79void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
86 struct xfs_ail_cursor *cur, 80 struct xfs_ail_cursor *cur,
87 struct xfs_log_item **log_items, int nr_items, 81 struct xfs_log_item **log_items, int nr_items,