aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-02 13:24:36 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:06 -0400
commit1edbb734b4e010974c41d2859d22a43d04f5f1cf (patch)
tree4f43aea677f7206707540dd8622fa4cac099057a /fs/btrfs/file.c
parente48c465bb366c0169f7908bfe62ae7080874ee7d (diff)
Btrfs: reduce CPU usage in the extent_state tree
Btrfs is currently mirroring some of the page state bits into its extent state tree. The goal behind this was to use it in supporting blocksizes other than the page size. But, we don't currently support that, and we're using quite a lot of CPU on the rb tree and its spin lock. This commit starts a series of cleanups to reduce the amount of work done in the extent state tree as part of each IO. This commit: * Adds the ability to lock an extent in the state tree and also set other bits. The idea is to do locking and delalloc in one call * Removes the EXTENT_WRITEBACK and EXTENT_DIRTY bits. Btrfs is using a combination of the page bits and the ordered write code for this instead. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c19
1 files changed, 0 insertions, 19 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 8a9c76aecdf3..ef66c3d989b9 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -113,8 +113,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
113 int err = 0; 113 int err = 0;
114 int i; 114 int i;
115 struct inode *inode = fdentry(file)->d_inode; 115 struct inode *inode = fdentry(file)->d_inode;
116 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
117 u64 hint_byte;
118 u64 num_bytes; 116 u64 num_bytes;
119 u64 start_pos; 117 u64 start_pos;
120 u64 end_of_last_block; 118 u64 end_of_last_block;
@@ -126,20 +124,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
126 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 124 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
127 125
128 end_of_last_block = start_pos + num_bytes - 1; 126 end_of_last_block = start_pos + num_bytes - 1;
129
130 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
131 trans = btrfs_join_transaction(root, 1);
132 if (!trans) {
133 err = -ENOMEM;
134 goto out_unlock;
135 }
136 btrfs_set_trans_block_group(trans, inode);
137 hint_byte = 0;
138
139 /* check for reserved extents on each page, we don't want
140 * to reset the delalloc bit on things that already have
141 * extents reserved.
142 */
143 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); 127 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
144 for (i = 0; i < num_pages; i++) { 128 for (i = 0; i < num_pages; i++) {
145 struct page *p = pages[i]; 129 struct page *p = pages[i];
@@ -154,9 +138,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
154 * at this time. 138 * at this time.
155 */ 139 */
156 } 140 }
157 err = btrfs_end_transaction(trans, root);
158out_unlock:
159 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
160 return err; 141 return err;
161} 142}
162 143