aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c51
1 files changed, 30 insertions, 21 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 2af528dcfb04..9bbadafdcb00 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -318,8 +318,12 @@ xfs_buf_free(
318 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 318 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
319 free_address(bp->b_addr - bp->b_offset); 319 free_address(bp->b_addr - bp->b_offset);
320 320
321 for (i = 0; i < bp->b_page_count; i++) 321 for (i = 0; i < bp->b_page_count; i++) {
322 page_cache_release(bp->b_pages[i]); 322 struct page *page = bp->b_pages[i];
323
324 ASSERT(!PagePrivate(page));
325 page_cache_release(page);
326 }
323 _xfs_buf_free_pages(bp); 327 _xfs_buf_free_pages(bp);
324 } else if (bp->b_flags & _XBF_KMEM_ALLOC) { 328 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
325 /* 329 /*
@@ -400,6 +404,7 @@ _xfs_buf_lookup_pages(
400 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); 404 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
401 size -= nbytes; 405 size -= nbytes;
402 406
407 ASSERT(!PagePrivate(page));
403 if (!PageUptodate(page)) { 408 if (!PageUptodate(page)) {
404 page_count--; 409 page_count--;
405 if (blocksize >= PAGE_CACHE_SIZE) { 410 if (blocksize >= PAGE_CACHE_SIZE) {
@@ -768,7 +773,7 @@ xfs_buf_get_noaddr(
768 _xfs_buf_initialize(bp, target, 0, len, 0); 773 _xfs_buf_initialize(bp, target, 0, len, 0);
769 774
770 try_again: 775 try_again:
771 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); 776 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
772 if (unlikely(data == NULL)) 777 if (unlikely(data == NULL))
773 goto fail_free_buf; 778 goto fail_free_buf;
774 779
@@ -1117,10 +1122,10 @@ xfs_buf_bio_end_io(
1117 do { 1122 do {
1118 struct page *page = bvec->bv_page; 1123 struct page *page = bvec->bv_page;
1119 1124
1125 ASSERT(!PagePrivate(page));
1120 if (unlikely(bp->b_error)) { 1126 if (unlikely(bp->b_error)) {
1121 if (bp->b_flags & XBF_READ) 1127 if (bp->b_flags & XBF_READ)
1122 ClearPageUptodate(page); 1128 ClearPageUptodate(page);
1123 SetPageError(page);
1124 } else if (blocksize >= PAGE_CACHE_SIZE) { 1129 } else if (blocksize >= PAGE_CACHE_SIZE) {
1125 SetPageUptodate(page); 1130 SetPageUptodate(page);
1126 } else if (!PagePrivate(page) && 1131 } else if (!PagePrivate(page) &&
@@ -1156,16 +1161,16 @@ _xfs_buf_ioapply(
1156 total_nr_pages = bp->b_page_count; 1161 total_nr_pages = bp->b_page_count;
1157 map_i = 0; 1162 map_i = 0;
1158 1163
1159 if (bp->b_flags & _XBF_RUN_QUEUES) {
1160 bp->b_flags &= ~_XBF_RUN_QUEUES;
1161 rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
1162 } else {
1163 rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
1164 }
1165
1166 if (bp->b_flags & XBF_ORDERED) { 1164 if (bp->b_flags & XBF_ORDERED) {
1167 ASSERT(!(bp->b_flags & XBF_READ)); 1165 ASSERT(!(bp->b_flags & XBF_READ));
1168 rw = WRITE_BARRIER; 1166 rw = WRITE_BARRIER;
1167 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1168 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1169 bp->b_flags &= ~_XBF_RUN_QUEUES;
1170 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1171 } else {
1172 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1173 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1169 } 1174 }
1170 1175
1171 /* Special code path for reading a sub page size buffer in -- 1176 /* Special code path for reading a sub page size buffer in --
@@ -1681,6 +1686,7 @@ xfsbufd(
1681 xfs_buf_t *bp, *n; 1686 xfs_buf_t *bp, *n;
1682 struct list_head *dwq = &target->bt_delwrite_queue; 1687 struct list_head *dwq = &target->bt_delwrite_queue;
1683 spinlock_t *dwlk = &target->bt_delwrite_lock; 1688 spinlock_t *dwlk = &target->bt_delwrite_lock;
1689 int count;
1684 1690
1685 current->flags |= PF_MEMALLOC; 1691 current->flags |= PF_MEMALLOC;
1686 1692
@@ -1696,6 +1702,7 @@ xfsbufd(
1696 schedule_timeout_interruptible( 1702 schedule_timeout_interruptible(
1697 xfs_buf_timer_centisecs * msecs_to_jiffies(10)); 1703 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1698 1704
1705 count = 0;
1699 age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1706 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1700 spin_lock(dwlk); 1707 spin_lock(dwlk);
1701 list_for_each_entry_safe(bp, n, dwq, b_list) { 1708 list_for_each_entry_safe(bp, n, dwq, b_list) {
@@ -1711,9 +1718,11 @@ xfsbufd(
1711 break; 1718 break;
1712 } 1719 }
1713 1720
1714 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); 1721 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1722 _XBF_RUN_QUEUES);
1715 bp->b_flags |= XBF_WRITE; 1723 bp->b_flags |= XBF_WRITE;
1716 list_move(&bp->b_list, &tmp); 1724 list_move_tail(&bp->b_list, &tmp);
1725 count++;
1717 } 1726 }
1718 } 1727 }
1719 spin_unlock(dwlk); 1728 spin_unlock(dwlk);
@@ -1724,12 +1733,12 @@ xfsbufd(
1724 1733
1725 list_del_init(&bp->b_list); 1734 list_del_init(&bp->b_list);
1726 xfs_buf_iostrategy(bp); 1735 xfs_buf_iostrategy(bp);
1727
1728 blk_run_address_space(target->bt_mapping);
1729 } 1736 }
1730 1737
1731 if (as_list_len > 0) 1738 if (as_list_len > 0)
1732 purge_addresses(); 1739 purge_addresses();
1740 if (count)
1741 blk_run_address_space(target->bt_mapping);
1733 1742
1734 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags); 1743 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1735 } while (!kthread_should_stop()); 1744 } while (!kthread_should_stop());
@@ -1767,7 +1776,7 @@ xfs_flush_buftarg(
1767 continue; 1776 continue;
1768 } 1777 }
1769 1778
1770 list_move(&bp->b_list, &tmp); 1779 list_move_tail(&bp->b_list, &tmp);
1771 } 1780 }
1772 spin_unlock(dwlk); 1781 spin_unlock(dwlk);
1773 1782
@@ -1776,7 +1785,7 @@ xfs_flush_buftarg(
1776 */ 1785 */
1777 list_for_each_entry_safe(bp, n, &tmp, b_list) { 1786 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1778 xfs_buf_lock(bp); 1787 xfs_buf_lock(bp);
1779 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); 1788 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
1780 bp->b_flags |= XBF_WRITE; 1789 bp->b_flags |= XBF_WRITE;
1781 if (wait) 1790 if (wait)
1782 bp->b_flags &= ~XBF_ASYNC; 1791 bp->b_flags &= ~XBF_ASYNC;
@@ -1786,6 +1795,9 @@ xfs_flush_buftarg(
1786 xfs_buf_iostrategy(bp); 1795 xfs_buf_iostrategy(bp);
1787 } 1796 }
1788 1797
1798 if (wait)
1799 blk_run_address_space(target->bt_mapping);
1800
1789 /* 1801 /*
1790 * Remaining list items must be flushed before returning 1802 * Remaining list items must be flushed before returning
1791 */ 1803 */
@@ -1797,9 +1809,6 @@ xfs_flush_buftarg(
1797 xfs_buf_relse(bp); 1809 xfs_buf_relse(bp);
1798 } 1810 }
1799 1811
1800 if (wait)
1801 blk_run_address_space(target->bt_mapping);
1802
1803 return pincount; 1812 return pincount;
1804} 1813}
1805 1814