aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-04-30 13:59:35 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:02 -0400
commitd6bfde8765668c8687de72f7a40f52acdf4f2f19 (patch)
tree2e05fc608da861dd0935dedf88d94627f13a18b3 /fs/btrfs/disk-io.c
parent2fff734fafa742236aecbdcdc8b1ff4d221cbaca (diff)
Btrfs: Fixes for 2.6.18 enterprise kernels
2.6.18 seems to get caught in an infinite loop when cancel_rearming_delayed_workqueue is called more than once, so this switches to cancel_delayed_work, which is arguably more correct. Also, balance_dirty_pages can run into problems with 2.6.18 based kernels because it doesn't have the per-bdi dirty limits. This avoids calling balance_dirty_pages on the btree inode unless there is actually something to balance, which is a good optimization in general. Finally there's a compile fix for ordered-data.h Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e35e70165b53..fabc31b334b6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1548,6 +1548,7 @@ int close_ctree(struct btrfs_root *root)
1548 btrfs_commit_transaction(trans, root); 1548 btrfs_commit_transaction(trans, root);
1549 ret = btrfs_write_and_wait_transaction(NULL, root); 1549 ret = btrfs_write_and_wait_transaction(NULL, root);
1550 BUG_ON(ret); 1550 BUG_ON(ret);
1551
1551 write_ctree_super(NULL, root); 1552 write_ctree_super(NULL, root);
1552 mutex_unlock(&fs_info->fs_mutex); 1553 mutex_unlock(&fs_info->fs_mutex);
1553 1554
@@ -1583,17 +1584,17 @@ int close_ctree(struct btrfs_root *root)
1583 extent_io_tree_empty_lru(&fs_info->extent_ins); 1584 extent_io_tree_empty_lru(&fs_info->extent_ins);
1584 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); 1585 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1585 1586
1586 flush_workqueue(end_io_workqueue);
1587 flush_workqueue(async_submit_workqueue); 1587 flush_workqueue(async_submit_workqueue);
1588 flush_workqueue(end_io_workqueue);
1588 1589
1589 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); 1590 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1590 1591
1591 flush_workqueue(end_io_workqueue);
1592 destroy_workqueue(end_io_workqueue);
1593
1594 flush_workqueue(async_submit_workqueue); 1592 flush_workqueue(async_submit_workqueue);
1595 destroy_workqueue(async_submit_workqueue); 1593 destroy_workqueue(async_submit_workqueue);
1596 1594
1595 flush_workqueue(end_io_workqueue);
1596 destroy_workqueue(end_io_workqueue);
1597
1597 iput(fs_info->btree_inode); 1598 iput(fs_info->btree_inode);
1598#if 0 1599#if 0
1599 while(!list_empty(&fs_info->hashers)) { 1600 while(!list_empty(&fs_info->hashers)) {
@@ -1663,8 +1664,21 @@ void btrfs_throttle(struct btrfs_root *root)
1663 1664
1664void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) 1665void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1665{ 1666{
1666 balance_dirty_pages_ratelimited_nr( 1667 struct extent_io_tree *tree;
1668 u64 num_dirty;
1669 u64 start = 0;
1670 unsigned long thresh = 16 * 1024 * 1024;
1671 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1672
1673 if (current_is_pdflush())
1674 return;
1675
1676 num_dirty = count_range_bits(tree, &start, (u64)-1,
1677 thresh, EXTENT_DIRTY);
1678 if (num_dirty > thresh) {
1679 balance_dirty_pages_ratelimited_nr(
1667 root->fs_info->btree_inode->i_mapping, 1); 1680 root->fs_info->btree_inode->i_mapping, 1);
1681 }
1668} 1682}
1669 1683
1670void btrfs_set_buffer_defrag(struct extent_buffer *buf) 1684void btrfs_set_buffer_defrag(struct extent_buffer *buf)