aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c142
1 files changed, 72 insertions, 70 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4fb01ffdfd1a..e2bea6a661f0 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -34,13 +34,13 @@
34#include <linux/backing-dev.h> 34#include <linux/backing-dev.h>
35#include <linux/freezer.h> 35#include <linux/freezer.h>
36 36
37STATIC kmem_zone_t *xfs_buf_zone; 37static kmem_zone_t *xfs_buf_zone;
38STATIC kmem_shaker_t xfs_buf_shake; 38static kmem_shaker_t xfs_buf_shake;
39STATIC int xfsbufd(void *); 39STATIC int xfsbufd(void *);
40STATIC int xfsbufd_wakeup(int, gfp_t); 40STATIC int xfsbufd_wakeup(int, gfp_t);
41STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 41STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
42 42
43STATIC struct workqueue_struct *xfslogd_workqueue; 43static struct workqueue_struct *xfslogd_workqueue;
44struct workqueue_struct *xfsdatad_workqueue; 44struct workqueue_struct *xfsdatad_workqueue;
45 45
46#ifdef XFS_BUF_TRACE 46#ifdef XFS_BUF_TRACE
@@ -139,7 +139,7 @@ page_region_mask(
139 return mask; 139 return mask;
140} 140}
141 141
142STATIC inline void 142STATIC_INLINE void
143set_page_region( 143set_page_region(
144 struct page *page, 144 struct page *page,
145 size_t offset, 145 size_t offset,
@@ -151,7 +151,7 @@ set_page_region(
151 SetPageUptodate(page); 151 SetPageUptodate(page);
152} 152}
153 153
154STATIC inline int 154STATIC_INLINE int
155test_page_region( 155test_page_region(
156 struct page *page, 156 struct page *page,
157 size_t offset, 157 size_t offset,
@@ -171,9 +171,9 @@ typedef struct a_list {
171 struct a_list *next; 171 struct a_list *next;
172} a_list_t; 172} a_list_t;
173 173
174STATIC a_list_t *as_free_head; 174static a_list_t *as_free_head;
175STATIC int as_list_len; 175static int as_list_len;
176STATIC DEFINE_SPINLOCK(as_lock); 176static DEFINE_SPINLOCK(as_lock);
177 177
178/* 178/*
179 * Try to batch vunmaps because they are costly. 179 * Try to batch vunmaps because they are costly.
@@ -1085,7 +1085,7 @@ xfs_buf_iostart(
1085 return status; 1085 return status;
1086} 1086}
1087 1087
1088STATIC __inline__ int 1088STATIC_INLINE int
1089_xfs_buf_iolocked( 1089_xfs_buf_iolocked(
1090 xfs_buf_t *bp) 1090 xfs_buf_t *bp)
1091{ 1091{
@@ -1095,7 +1095,7 @@ _xfs_buf_iolocked(
1095 return 0; 1095 return 0;
1096} 1096}
1097 1097
1098STATIC __inline__ void 1098STATIC_INLINE void
1099_xfs_buf_ioend( 1099_xfs_buf_ioend(
1100 xfs_buf_t *bp, 1100 xfs_buf_t *bp,
1101 int schedule) 1101 int schedule)
@@ -1426,8 +1426,8 @@ xfs_free_bufhash(
1426/* 1426/*
1427 * buftarg list for delwrite queue processing 1427 * buftarg list for delwrite queue processing
1428 */ 1428 */
1429STATIC LIST_HEAD(xfs_buftarg_list); 1429LIST_HEAD(xfs_buftarg_list);
1430STATIC DEFINE_SPINLOCK(xfs_buftarg_lock); 1430static DEFINE_SPINLOCK(xfs_buftarg_lock);
1431 1431
1432STATIC void 1432STATIC void
1433xfs_register_buftarg( 1433xfs_register_buftarg(
@@ -1679,21 +1679,60 @@ xfsbufd_wakeup(
1679 return 0; 1679 return 0;
1680} 1680}
1681 1681
1682/*
1683 * Move as many buffers as specified to the supplied list
1684 * idicating if we skipped any buffers to prevent deadlocks.
1685 */
1686STATIC int
1687xfs_buf_delwri_split(
1688 xfs_buftarg_t *target,
1689 struct list_head *list,
1690 unsigned long age)
1691{
1692 xfs_buf_t *bp, *n;
1693 struct list_head *dwq = &target->bt_delwrite_queue;
1694 spinlock_t *dwlk = &target->bt_delwrite_lock;
1695 int skipped = 0;
1696 int force;
1697
1698 force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1699 INIT_LIST_HEAD(list);
1700 spin_lock(dwlk);
1701 list_for_each_entry_safe(bp, n, dwq, b_list) {
1702 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1703 ASSERT(bp->b_flags & XBF_DELWRI);
1704
1705 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1706 if (!force &&
1707 time_before(jiffies, bp->b_queuetime + age)) {
1708 xfs_buf_unlock(bp);
1709 break;
1710 }
1711
1712 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1713 _XBF_RUN_QUEUES);
1714 bp->b_flags |= XBF_WRITE;
1715 list_move_tail(&bp->b_list, list);
1716 } else
1717 skipped++;
1718 }
1719 spin_unlock(dwlk);
1720
1721 return skipped;
1722
1723}
1724
1682STATIC int 1725STATIC int
1683xfsbufd( 1726xfsbufd(
1684 void *data) 1727 void *data)
1685{ 1728{
1686 struct list_head tmp; 1729 struct list_head tmp;
1687 unsigned long age; 1730 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1688 xfs_buftarg_t *target = (xfs_buftarg_t *)data; 1731 int count;
1689 xfs_buf_t *bp, *n; 1732 xfs_buf_t *bp;
1690 struct list_head *dwq = &target->bt_delwrite_queue;
1691 spinlock_t *dwlk = &target->bt_delwrite_lock;
1692 int count;
1693 1733
1694 current->flags |= PF_MEMALLOC; 1734 current->flags |= PF_MEMALLOC;
1695 1735
1696 INIT_LIST_HEAD(&tmp);
1697 do { 1736 do {
1698 if (unlikely(freezing(current))) { 1737 if (unlikely(freezing(current))) {
1699 set_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1738 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1705,37 +1744,17 @@ xfsbufd(
1705 schedule_timeout_interruptible( 1744 schedule_timeout_interruptible(
1706 xfs_buf_timer_centisecs * msecs_to_jiffies(10)); 1745 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1707 1746
1708 count = 0; 1747 xfs_buf_delwri_split(target, &tmp,
1709 age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1748 xfs_buf_age_centisecs * msecs_to_jiffies(10));
1710 spin_lock(dwlk);
1711 list_for_each_entry_safe(bp, n, dwq, b_list) {
1712 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1713 ASSERT(bp->b_flags & XBF_DELWRI);
1714
1715 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1716 if (!test_bit(XBT_FORCE_FLUSH,
1717 &target->bt_flags) &&
1718 time_before(jiffies,
1719 bp->b_queuetime + age)) {
1720 xfs_buf_unlock(bp);
1721 break;
1722 }
1723
1724 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1725 _XBF_RUN_QUEUES);
1726 bp->b_flags |= XBF_WRITE;
1727 list_move_tail(&bp->b_list, &tmp);
1728 count++;
1729 }
1730 }
1731 spin_unlock(dwlk);
1732 1749
1750 count = 0;
1733 while (!list_empty(&tmp)) { 1751 while (!list_empty(&tmp)) {
1734 bp = list_entry(tmp.next, xfs_buf_t, b_list); 1752 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1735 ASSERT(target == bp->b_target); 1753 ASSERT(target == bp->b_target);
1736 1754
1737 list_del_init(&bp->b_list); 1755 list_del_init(&bp->b_list);
1738 xfs_buf_iostrategy(bp); 1756 xfs_buf_iostrategy(bp);
1757 count++;
1739 } 1758 }
1740 1759
1741 if (as_list_len > 0) 1760 if (as_list_len > 0)
@@ -1743,7 +1762,6 @@ xfsbufd(
1743 if (count) 1762 if (count)
1744 blk_run_address_space(target->bt_mapping); 1763 blk_run_address_space(target->bt_mapping);
1745 1764
1746 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1747 } while (!kthread_should_stop()); 1765 } while (!kthread_should_stop());
1748 1766
1749 return 0; 1767 return 0;
@@ -1756,40 +1774,24 @@ xfsbufd(
1756 */ 1774 */
1757int 1775int
1758xfs_flush_buftarg( 1776xfs_flush_buftarg(
1759 xfs_buftarg_t *target, 1777 xfs_buftarg_t *target,
1760 int wait) 1778 int wait)
1761{ 1779{
1762 struct list_head tmp; 1780 struct list_head tmp;
1763 xfs_buf_t *bp, *n; 1781 xfs_buf_t *bp, *n;
1764 int pincount = 0; 1782 int pincount = 0;
1765 struct list_head *dwq = &target->bt_delwrite_queue;
1766 spinlock_t *dwlk = &target->bt_delwrite_lock;
1767 1783
1768 xfs_buf_runall_queues(xfsdatad_workqueue); 1784 xfs_buf_runall_queues(xfsdatad_workqueue);
1769 xfs_buf_runall_queues(xfslogd_workqueue); 1785 xfs_buf_runall_queues(xfslogd_workqueue);
1770 1786
1771 INIT_LIST_HEAD(&tmp); 1787 set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1772 spin_lock(dwlk); 1788 pincount = xfs_buf_delwri_split(target, &tmp, 0);
1773 list_for_each_entry_safe(bp, n, dwq, b_list) {
1774 ASSERT(bp->b_target == target);
1775 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1776 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1777 if (xfs_buf_ispin(bp)) {
1778 pincount++;
1779 continue;
1780 }
1781
1782 list_move_tail(&bp->b_list, &tmp);
1783 }
1784 spin_unlock(dwlk);
1785 1789
1786 /* 1790 /*
1787 * Dropped the delayed write list lock, now walk the temporary list 1791 * Dropped the delayed write list lock, now walk the temporary list
1788 */ 1792 */
1789 list_for_each_entry_safe(bp, n, &tmp, b_list) { 1793 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1790 xfs_buf_lock(bp); 1794 ASSERT(target == bp->b_target);
1791 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
1792 bp->b_flags |= XBF_WRITE;
1793 if (wait) 1795 if (wait)
1794 bp->b_flags &= ~XBF_ASYNC; 1796 bp->b_flags &= ~XBF_ASYNC;
1795 else 1797 else