aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-02 15:11:07 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:06 -0400
commitd5550c6315fe0647b7ac21a6a736bf4a42620eac (patch)
treed1aeeee5ef7b04915dd6eb1c220b3e137ce4d9b3 /fs/btrfs
parent2c64c53d8d30d43d0670482503a3914dfd3d6d46 (diff)
Btrfs: don't lock bits in the extent tree during writepage
At writepage time, we have the page locked and we have the extent_map entry for this extent pinned in the extent_map tree. So, the page can't go away and its mapping can't change. There is no need for the extra extent_state lock bits during writepage. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent_io.c21
1 files changed, 0 insertions, 21 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c7a5e860fe21..04fafc3cffc0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2219,16 +2219,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2219 goto done_unlocked; 2219 goto done_unlocked;
2220 } 2220 }
2221 } 2221 }
2222 lock_extent_bits(tree, start, page_end, 0, &cached_state, GFP_NOFS);
2223
2224 unlock_start = start;
2225
2226 if (tree->ops && tree->ops->writepage_start_hook) { 2222 if (tree->ops && tree->ops->writepage_start_hook) {
2227 ret = tree->ops->writepage_start_hook(page, start, 2223 ret = tree->ops->writepage_start_hook(page, start,
2228 page_end); 2224 page_end);
2229 if (ret == -EAGAIN) { 2225 if (ret == -EAGAIN) {
2230 unlock_extent_cached(tree, start, page_end,
2231 &cached_state, GFP_NOFS);
2232 redirty_page_for_writepage(wbc, page); 2226 redirty_page_for_writepage(wbc, page);
2233 update_nr_written(page, wbc, nr_written); 2227 update_nr_written(page, wbc, nr_written);
2234 unlock_page(page); 2228 unlock_page(page);
@@ -2244,13 +2238,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2244 update_nr_written(page, wbc, nr_written + 1); 2238 update_nr_written(page, wbc, nr_written + 1);
2245 2239
2246 end = page_end; 2240 end = page_end;
2247 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
2248 printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
2249
2250 if (last_byte <= start) { 2241 if (last_byte <= start) {
2251 clear_extent_bit(tree, start, page_end,
2252 EXTENT_LOCKED | EXTENT_DIRTY,
2253 1, 0, NULL, GFP_NOFS);
2254 if (tree->ops && tree->ops->writepage_end_io_hook) 2242 if (tree->ops && tree->ops->writepage_end_io_hook)
2255 tree->ops->writepage_end_io_hook(page, start, 2243 tree->ops->writepage_end_io_hook(page, start,
2256 page_end, NULL, 1); 2244 page_end, NULL, 1);
@@ -2262,8 +2250,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2262 2250
2263 while (cur <= end) { 2251 while (cur <= end) {
2264 if (cur >= last_byte) { 2252 if (cur >= last_byte) {
2265 unlock_extent_cached(tree, unlock_start, page_end,
2266 &cached_state, GFP_NOFS);
2267 if (tree->ops && tree->ops->writepage_end_io_hook) 2253 if (tree->ops && tree->ops->writepage_end_io_hook)
2268 tree->ops->writepage_end_io_hook(page, cur, 2254 tree->ops->writepage_end_io_hook(page, cur,
2269 page_end, NULL, 1); 2255 page_end, NULL, 1);
@@ -2295,10 +2281,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2295 */ 2281 */
2296 if (compressed || block_start == EXTENT_MAP_HOLE || 2282 if (compressed || block_start == EXTENT_MAP_HOLE ||
2297 block_start == EXTENT_MAP_INLINE) { 2283 block_start == EXTENT_MAP_INLINE) {
2298 unlock_extent_cached(tree, unlock_start,
2299 cur + iosize - 1, &cached_state,
2300 GFP_NOFS);
2301
2302 /* 2284 /*
2303 * end_io notification does not happen here for 2285 * end_io notification does not happen here for
2304 * compressed extents 2286 * compressed extents
@@ -2366,9 +2348,6 @@ done:
2366 set_page_writeback(page); 2348 set_page_writeback(page);
2367 end_page_writeback(page); 2349 end_page_writeback(page);
2368 } 2350 }
2369 if (unlock_start <= page_end)
2370 unlock_extent_cached(tree, unlock_start, page_end,
2371 &cached_state, GFP_NOFS);
2372 unlock_page(page); 2351 unlock_page(page);
2373 2352
2374done_unlocked: 2353done_unlocked: