aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c309
1 files changed, 126 insertions, 183 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7055d11c1ef..d418164a35f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -254,14 +254,14 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
254 * 254 *
255 * This should be called with the tree lock held. 255 * This should be called with the tree lock held.
256 */ 256 */
257static int merge_state(struct extent_io_tree *tree, 257static void merge_state(struct extent_io_tree *tree,
258 struct extent_state *state) 258 struct extent_state *state)
259{ 259{
260 struct extent_state *other; 260 struct extent_state *other;
261 struct rb_node *other_node; 261 struct rb_node *other_node;
262 262
263 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) 263 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
264 return 0; 264 return;
265 265
266 other_node = rb_prev(&state->rb_node); 266 other_node = rb_prev(&state->rb_node);
267 if (other_node) { 267 if (other_node) {
@@ -281,26 +281,19 @@ static int merge_state(struct extent_io_tree *tree,
281 if (other->start == state->end + 1 && 281 if (other->start == state->end + 1 &&
282 other->state == state->state) { 282 other->state == state->state) {
283 merge_cb(tree, state, other); 283 merge_cb(tree, state, other);
284 other->start = state->start; 284 state->end = other->end;
285 state->tree = NULL; 285 other->tree = NULL;
286 rb_erase(&state->rb_node, &tree->state); 286 rb_erase(&other->rb_node, &tree->state);
287 free_extent_state(state); 287 free_extent_state(other);
288 state = NULL;
289 } 288 }
290 } 289 }
291
292 return 0;
293} 290}
294 291
295static int set_state_cb(struct extent_io_tree *tree, 292static void set_state_cb(struct extent_io_tree *tree,
296 struct extent_state *state, int *bits) 293 struct extent_state *state, int *bits)
297{ 294{
298 if (tree->ops && tree->ops->set_bit_hook) { 295 if (tree->ops && tree->ops->set_bit_hook)
299 return tree->ops->set_bit_hook(tree->mapping->host, 296 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
300 state, bits);
301 }
302
303 return 0;
304} 297}
305 298
306static void clear_state_cb(struct extent_io_tree *tree, 299static void clear_state_cb(struct extent_io_tree *tree,
@@ -310,6 +303,9 @@ static void clear_state_cb(struct extent_io_tree *tree,
310 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); 303 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
311} 304}
312 305
306static void set_state_bits(struct extent_io_tree *tree,
307 struct extent_state *state, int *bits);
308
313/* 309/*
314 * insert an extent_state struct into the tree. 'bits' are set on the 310 * insert an extent_state struct into the tree. 'bits' are set on the
315 * struct before it is inserted. 311 * struct before it is inserted.
@@ -325,8 +321,6 @@ static int insert_state(struct extent_io_tree *tree,
325 int *bits) 321 int *bits)
326{ 322{
327 struct rb_node *node; 323 struct rb_node *node;
328 int bits_to_set = *bits & ~EXTENT_CTLBITS;
329 int ret;
330 324
331 if (end < start) { 325 if (end < start) {
332 printk(KERN_ERR "btrfs end < start %llu %llu\n", 326 printk(KERN_ERR "btrfs end < start %llu %llu\n",
@@ -336,13 +330,9 @@ static int insert_state(struct extent_io_tree *tree,
336 } 330 }
337 state->start = start; 331 state->start = start;
338 state->end = end; 332 state->end = end;
339 ret = set_state_cb(tree, state, bits);
340 if (ret)
341 return ret;
342 333
343 if (bits_to_set & EXTENT_DIRTY) 334 set_state_bits(tree, state, bits);
344 tree->dirty_bytes += end - start + 1; 335
345 state->state |= bits_to_set;
346 node = tree_insert(&tree->state, end, &state->rb_node); 336 node = tree_insert(&tree->state, end, &state->rb_node);
347 if (node) { 337 if (node) {
348 struct extent_state *found; 338 struct extent_state *found;
@@ -351,7 +341,6 @@ static int insert_state(struct extent_io_tree *tree,
351 "%llu %llu\n", (unsigned long long)found->start, 341 "%llu %llu\n", (unsigned long long)found->start,
352 (unsigned long long)found->end, 342 (unsigned long long)found->end,
353 (unsigned long long)start, (unsigned long long)end); 343 (unsigned long long)start, (unsigned long long)end);
354 free_extent_state(state);
355 return -EEXIST; 344 return -EEXIST;
356 } 345 }
357 state->tree = tree; 346 state->tree = tree;
@@ -359,13 +348,11 @@ static int insert_state(struct extent_io_tree *tree,
359 return 0; 348 return 0;
360} 349}
361 350
362static int split_cb(struct extent_io_tree *tree, struct extent_state *orig, 351static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
363 u64 split) 352 u64 split)
364{ 353{
365 if (tree->ops && tree->ops->split_extent_hook) 354 if (tree->ops && tree->ops->split_extent_hook)
366 return tree->ops->split_extent_hook(tree->mapping->host, 355 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
367 orig, split);
368 return 0;
369} 356}
370 357
371/* 358/*
@@ -500,7 +487,8 @@ again:
500 cached_state = NULL; 487 cached_state = NULL;
501 } 488 }
502 489
503 if (cached && cached->tree && cached->start == start) { 490 if (cached && cached->tree && cached->start <= start &&
491 cached->end > start) {
504 if (clear) 492 if (clear)
505 atomic_dec(&cached->refs); 493 atomic_dec(&cached->refs);
506 state = cached; 494 state = cached;
@@ -660,34 +648,25 @@ again:
660 if (start > end) 648 if (start > end)
661 break; 649 break;
662 650
663 if (need_resched()) { 651 cond_resched_lock(&tree->lock);
664 spin_unlock(&tree->lock);
665 cond_resched();
666 spin_lock(&tree->lock);
667 }
668 } 652 }
669out: 653out:
670 spin_unlock(&tree->lock); 654 spin_unlock(&tree->lock);
671 return 0; 655 return 0;
672} 656}
673 657
674static int set_state_bits(struct extent_io_tree *tree, 658static void set_state_bits(struct extent_io_tree *tree,
675 struct extent_state *state, 659 struct extent_state *state,
676 int *bits) 660 int *bits)
677{ 661{
678 int ret;
679 int bits_to_set = *bits & ~EXTENT_CTLBITS; 662 int bits_to_set = *bits & ~EXTENT_CTLBITS;
680 663
681 ret = set_state_cb(tree, state, bits); 664 set_state_cb(tree, state, bits);
682 if (ret)
683 return ret;
684 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 665 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
685 u64 range = state->end - state->start + 1; 666 u64 range = state->end - state->start + 1;
686 tree->dirty_bytes += range; 667 tree->dirty_bytes += range;
687 } 668 }
688 state->state |= bits_to_set; 669 state->state |= bits_to_set;
689
690 return 0;
691} 670}
692 671
693static void cache_state(struct extent_state *state, 672static void cache_state(struct extent_state *state,
@@ -742,7 +721,8 @@ again:
742 spin_lock(&tree->lock); 721 spin_lock(&tree->lock);
743 if (cached_state && *cached_state) { 722 if (cached_state && *cached_state) {
744 state = *cached_state; 723 state = *cached_state;
745 if (state->start == start && state->tree) { 724 if (state->start <= start && state->end > start &&
725 state->tree) {
746 node = &state->rb_node; 726 node = &state->rb_node;
747 goto hit_next; 727 goto hit_next;
748 } 728 }
@@ -779,17 +759,15 @@ hit_next:
779 goto out; 759 goto out;
780 } 760 }
781 761
782 err = set_state_bits(tree, state, &bits); 762 set_state_bits(tree, state, &bits);
783 if (err)
784 goto out;
785 763
786 next_node = rb_next(node);
787 cache_state(state, cached_state); 764 cache_state(state, cached_state);
788 merge_state(tree, state); 765 merge_state(tree, state);
789 if (last_end == (u64)-1) 766 if (last_end == (u64)-1)
790 goto out; 767 goto out;
791 768
792 start = last_end + 1; 769 start = last_end + 1;
770 next_node = rb_next(&state->rb_node);
793 if (next_node && start < end && prealloc && !need_resched()) { 771 if (next_node && start < end && prealloc && !need_resched()) {
794 state = rb_entry(next_node, struct extent_state, 772 state = rb_entry(next_node, struct extent_state,
795 rb_node); 773 rb_node);
@@ -830,9 +808,7 @@ hit_next:
830 if (err) 808 if (err)
831 goto out; 809 goto out;
832 if (state->end <= end) { 810 if (state->end <= end) {
833 err = set_state_bits(tree, state, &bits); 811 set_state_bits(tree, state, &bits);
834 if (err)
835 goto out;
836 cache_state(state, cached_state); 812 cache_state(state, cached_state);
837 merge_state(tree, state); 813 merge_state(tree, state);
838 if (last_end == (u64)-1) 814 if (last_end == (u64)-1)
@@ -862,7 +838,6 @@ hit_next:
862 * Avoid to free 'prealloc' if it can be merged with 838 * Avoid to free 'prealloc' if it can be merged with
863 * the later extent. 839 * the later extent.
864 */ 840 */
865 atomic_inc(&prealloc->refs);
866 err = insert_state(tree, prealloc, start, this_end, 841 err = insert_state(tree, prealloc, start, this_end,
867 &bits); 842 &bits);
868 BUG_ON(err == -EEXIST); 843 BUG_ON(err == -EEXIST);
@@ -872,7 +847,6 @@ hit_next:
872 goto out; 847 goto out;
873 } 848 }
874 cache_state(prealloc, cached_state); 849 cache_state(prealloc, cached_state);
875 free_extent_state(prealloc);
876 prealloc = NULL; 850 prealloc = NULL;
877 start = this_end + 1; 851 start = this_end + 1;
878 goto search_again; 852 goto search_again;
@@ -895,11 +869,7 @@ hit_next:
895 err = split_state(tree, state, prealloc, end + 1); 869 err = split_state(tree, state, prealloc, end + 1);
896 BUG_ON(err == -EEXIST); 870 BUG_ON(err == -EEXIST);
897 871
898 err = set_state_bits(tree, prealloc, &bits); 872 set_state_bits(tree, prealloc, &bits);
899 if (err) {
900 prealloc = NULL;
901 goto out;
902 }
903 cache_state(prealloc, cached_state); 873 cache_state(prealloc, cached_state);
904 merge_state(tree, prealloc); 874 merge_state(tree, prealloc);
905 prealloc = NULL; 875 prealloc = NULL;
@@ -1061,46 +1031,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1061 return 0; 1031 return 0;
1062} 1032}
1063 1033
1064/*
1065 * find the first offset in the io tree with 'bits' set. zero is
1066 * returned if we find something, and *start_ret and *end_ret are
1067 * set to reflect the state struct that was found.
1068 *
1069 * If nothing was found, 1 is returned, < 0 on error
1070 */
1071int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1072 u64 *start_ret, u64 *end_ret, int bits)
1073{
1074 struct rb_node *node;
1075 struct extent_state *state;
1076 int ret = 1;
1077
1078 spin_lock(&tree->lock);
1079 /*
1080 * this search will find all the extents that end after
1081 * our range starts.
1082 */
1083 node = tree_search(tree, start);
1084 if (!node)
1085 goto out;
1086
1087 while (1) {
1088 state = rb_entry(node, struct extent_state, rb_node);
1089 if (state->end >= start && (state->state & bits)) {
1090 *start_ret = state->start;
1091 *end_ret = state->end;
1092 ret = 0;
1093 break;
1094 }
1095 node = rb_next(node);
1096 if (!node)
1097 break;
1098 }
1099out:
1100 spin_unlock(&tree->lock);
1101 return ret;
1102}
1103
1104/* find the first state struct with 'bits' set after 'start', and 1034/* find the first state struct with 'bits' set after 'start', and
1105 * return it. tree->lock must be held. NULL will returned if 1035 * return it. tree->lock must be held. NULL will returned if
1106 * nothing was found after 'start' 1036 * nothing was found after 'start'
@@ -1133,6 +1063,30 @@ out:
1133} 1063}
1134 1064
1135/* 1065/*
1066 * find the first offset in the io tree with 'bits' set. zero is
1067 * returned if we find something, and *start_ret and *end_ret are
1068 * set to reflect the state struct that was found.
1069 *
1070 * If nothing was found, 1 is returned, < 0 on error
1071 */
1072int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1073 u64 *start_ret, u64 *end_ret, int bits)
1074{
1075 struct extent_state *state;
1076 int ret = 1;
1077
1078 spin_lock(&tree->lock);
1079 state = find_first_extent_bit_state(tree, start, bits);
1080 if (state) {
1081 *start_ret = state->start;
1082 *end_ret = state->end;
1083 ret = 0;
1084 }
1085 spin_unlock(&tree->lock);
1086 return ret;
1087}
1088
1089/*
1136 * find a contiguous range of bytes in the file marked as delalloc, not 1090 * find a contiguous range of bytes in the file marked as delalloc, not
1137 * more than 'max_bytes'. start and end are used to return the range, 1091 * more than 'max_bytes'. start and end are used to return the range,
1138 * 1092 *
@@ -1564,7 +1518,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1564 int bitset = 0; 1518 int bitset = 0;
1565 1519
1566 spin_lock(&tree->lock); 1520 spin_lock(&tree->lock);
1567 if (cached && cached->tree && cached->start == start) 1521 if (cached && cached->tree && cached->start <= start &&
1522 cached->end > start)
1568 node = &cached->rb_node; 1523 node = &cached->rb_node;
1569 else 1524 else
1570 node = tree_search(tree, start); 1525 node = tree_search(tree, start);
@@ -2432,6 +2387,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2432 pgoff_t index; 2387 pgoff_t index;
2433 pgoff_t end; /* Inclusive */ 2388 pgoff_t end; /* Inclusive */
2434 int scanned = 0; 2389 int scanned = 0;
2390 int tag;
2435 2391
2436 pagevec_init(&pvec, 0); 2392 pagevec_init(&pvec, 0);
2437 if (wbc->range_cyclic) { 2393 if (wbc->range_cyclic) {
@@ -2442,11 +2398,16 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2442 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2398 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2443 scanned = 1; 2399 scanned = 1;
2444 } 2400 }
2401 if (wbc->sync_mode == WB_SYNC_ALL)
2402 tag = PAGECACHE_TAG_TOWRITE;
2403 else
2404 tag = PAGECACHE_TAG_DIRTY;
2445retry: 2405retry:
2406 if (wbc->sync_mode == WB_SYNC_ALL)
2407 tag_pages_for_writeback(mapping, index, end);
2446 while (!done && !nr_to_write_done && (index <= end) && 2408 while (!done && !nr_to_write_done && (index <= end) &&
2447 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 2409 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2448 PAGECACHE_TAG_DIRTY, min(end - index, 2410 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2449 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2450 unsigned i; 2411 unsigned i;
2451 2412
2452 scanned = 1; 2413 scanned = 1;
@@ -2541,7 +2502,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2541 struct writeback_control *wbc) 2502 struct writeback_control *wbc)
2542{ 2503{
2543 int ret; 2504 int ret;
2544 struct address_space *mapping = page->mapping;
2545 struct extent_page_data epd = { 2505 struct extent_page_data epd = {
2546 .bio = NULL, 2506 .bio = NULL,
2547 .tree = tree, 2507 .tree = tree,
@@ -2549,18 +2509,9 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2549 .extent_locked = 0, 2509 .extent_locked = 0,
2550 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 2510 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2551 }; 2511 };
2552 struct writeback_control wbc_writepages = {
2553 .sync_mode = wbc->sync_mode,
2554 .older_than_this = NULL,
2555 .nr_to_write = 64,
2556 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2557 .range_end = (loff_t)-1,
2558 };
2559 2512
2560 ret = __extent_writepage(page, wbc, &epd); 2513 ret = __extent_writepage(page, wbc, &epd);
2561 2514
2562 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2563 __extent_writepage, &epd, flush_write_bio);
2564 flush_epd_write_bio(&epd); 2515 flush_epd_write_bio(&epd);
2565 return ret; 2516 return ret;
2566} 2517}
@@ -2584,7 +2535,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2584 }; 2535 };
2585 struct writeback_control wbc_writepages = { 2536 struct writeback_control wbc_writepages = {
2586 .sync_mode = mode, 2537 .sync_mode = mode,
2587 .older_than_this = NULL,
2588 .nr_to_write = nr_pages * 2, 2538 .nr_to_write = nr_pages * 2,
2589 .range_start = start, 2539 .range_start = start,
2590 .range_end = end + 1, 2540 .range_end = end + 1,
@@ -3022,8 +2972,15 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3022 return NULL; 2972 return NULL;
3023 eb->start = start; 2973 eb->start = start;
3024 eb->len = len; 2974 eb->len = len;
3025 spin_lock_init(&eb->lock); 2975 rwlock_init(&eb->lock);
3026 init_waitqueue_head(&eb->lock_wq); 2976 atomic_set(&eb->write_locks, 0);
2977 atomic_set(&eb->read_locks, 0);
2978 atomic_set(&eb->blocking_readers, 0);
2979 atomic_set(&eb->blocking_writers, 0);
2980 atomic_set(&eb->spinning_readers, 0);
2981 atomic_set(&eb->spinning_writers, 0);
2982 init_waitqueue_head(&eb->write_lock_wq);
2983 init_waitqueue_head(&eb->read_lock_wq);
3027 2984
3028#if LEAK_DEBUG 2985#if LEAK_DEBUG
3029 spin_lock_irqsave(&leak_lock, flags); 2986 spin_lock_irqsave(&leak_lock, flags);
@@ -3119,7 +3076,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3119 i = 0; 3076 i = 0;
3120 } 3077 }
3121 for (; i < num_pages; i++, index++) { 3078 for (; i < num_pages; i++, index++) {
3122 p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM); 3079 p = find_or_create_page(mapping, index, GFP_NOFS);
3123 if (!p) { 3080 if (!p) {
3124 WARN_ON(1); 3081 WARN_ON(1);
3125 goto free_eb; 3082 goto free_eb;
@@ -3266,6 +3223,22 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
3266 return was_dirty; 3223 return was_dirty;
3267} 3224}
3268 3225
3226static int __eb_straddles_pages(u64 start, u64 len)
3227{
3228 if (len < PAGE_CACHE_SIZE)
3229 return 1;
3230 if (start & (PAGE_CACHE_SIZE - 1))
3231 return 1;
3232 if ((start + len) & (PAGE_CACHE_SIZE - 1))
3233 return 1;
3234 return 0;
3235}
3236
3237static int eb_straddles_pages(struct extent_buffer *eb)
3238{
3239 return __eb_straddles_pages(eb->start, eb->len);
3240}
3241
3269int clear_extent_buffer_uptodate(struct extent_io_tree *tree, 3242int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3270 struct extent_buffer *eb, 3243 struct extent_buffer *eb,
3271 struct extent_state **cached_state) 3244 struct extent_state **cached_state)
@@ -3277,8 +3250,10 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3277 num_pages = num_extent_pages(eb->start, eb->len); 3250 num_pages = num_extent_pages(eb->start, eb->len);
3278 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 3251 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3279 3252
3280 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3253 if (eb_straddles_pages(eb)) {
3281 cached_state, GFP_NOFS); 3254 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3255 cached_state, GFP_NOFS);
3256 }
3282 for (i = 0; i < num_pages; i++) { 3257 for (i = 0; i < num_pages; i++) {
3283 page = extent_buffer_page(eb, i); 3258 page = extent_buffer_page(eb, i);
3284 if (page) 3259 if (page)
@@ -3296,8 +3271,10 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3296 3271
3297 num_pages = num_extent_pages(eb->start, eb->len); 3272 num_pages = num_extent_pages(eb->start, eb->len);
3298 3273
3299 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3274 if (eb_straddles_pages(eb)) {
3300 NULL, GFP_NOFS); 3275 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3276 NULL, GFP_NOFS);
3277 }
3301 for (i = 0; i < num_pages; i++) { 3278 for (i = 0; i < num_pages; i++) {
3302 page = extent_buffer_page(eb, i); 3279 page = extent_buffer_page(eb, i);
3303 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 3280 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3320,9 +3297,12 @@ int extent_range_uptodate(struct extent_io_tree *tree,
3320 int uptodate; 3297 int uptodate;
3321 unsigned long index; 3298 unsigned long index;
3322 3299
3323 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL); 3300 if (__eb_straddles_pages(start, end - start + 1)) {
3324 if (ret) 3301 ret = test_range_bit(tree, start, end,
3325 return 1; 3302 EXTENT_UPTODATE, 1, NULL);
3303 if (ret)
3304 return 1;
3305 }
3326 while (start <= end) { 3306 while (start <= end) {
3327 index = start >> PAGE_CACHE_SHIFT; 3307 index = start >> PAGE_CACHE_SHIFT;
3328 page = find_get_page(tree->mapping, index); 3308 page = find_get_page(tree->mapping, index);
@@ -3350,10 +3330,12 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
3350 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 3330 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3351 return 1; 3331 return 1;
3352 3332
3353 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3333 if (eb_straddles_pages(eb)) {
3354 EXTENT_UPTODATE, 1, cached_state); 3334 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3355 if (ret) 3335 EXTENT_UPTODATE, 1, cached_state);
3356 return ret; 3336 if (ret)
3337 return ret;
3338 }
3357 3339
3358 num_pages = num_extent_pages(eb->start, eb->len); 3340 num_pages = num_extent_pages(eb->start, eb->len);
3359 for (i = 0; i < num_pages; i++) { 3341 for (i = 0; i < num_pages; i++) {
@@ -3386,9 +3368,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3386 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 3368 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3387 return 0; 3369 return 0;
3388 3370
3389 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3371 if (eb_straddles_pages(eb)) {
3390 EXTENT_UPTODATE, 1, NULL)) { 3372 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3391 return 0; 3373 EXTENT_UPTODATE, 1, NULL)) {
3374 return 0;
3375 }
3392 } 3376 }
3393 3377
3394 if (start) { 3378 if (start) {
@@ -3492,9 +3476,8 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3492 page = extent_buffer_page(eb, i); 3476 page = extent_buffer_page(eb, i);
3493 3477
3494 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3478 cur = min(len, (PAGE_CACHE_SIZE - offset));
3495 kaddr = kmap_atomic(page, KM_USER1); 3479 kaddr = page_address(page);
3496 memcpy(dst, kaddr + offset, cur); 3480 memcpy(dst, kaddr + offset, cur);
3497 kunmap_atomic(kaddr, KM_USER1);
3498 3481
3499 dst += cur; 3482 dst += cur;
3500 len -= cur; 3483 len -= cur;
@@ -3504,9 +3487,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3504} 3487}
3505 3488
3506int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 3489int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3507 unsigned long min_len, char **token, char **map, 3490 unsigned long min_len, char **map,
3508 unsigned long *map_start, 3491 unsigned long *map_start,
3509 unsigned long *map_len, int km) 3492 unsigned long *map_len)
3510{ 3493{
3511 size_t offset = start & (PAGE_CACHE_SIZE - 1); 3494 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3512 char *kaddr; 3495 char *kaddr;
@@ -3536,42 +3519,12 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3536 } 3519 }
3537 3520
3538 p = extent_buffer_page(eb, i); 3521 p = extent_buffer_page(eb, i);
3539 kaddr = kmap_atomic(p, km); 3522 kaddr = page_address(p);
3540 *token = kaddr;
3541 *map = kaddr + offset; 3523 *map = kaddr + offset;
3542 *map_len = PAGE_CACHE_SIZE - offset; 3524 *map_len = PAGE_CACHE_SIZE - offset;
3543 return 0; 3525 return 0;
3544} 3526}
3545 3527
3546int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3547 unsigned long min_len,
3548 char **token, char **map,
3549 unsigned long *map_start,
3550 unsigned long *map_len, int km)
3551{
3552 int err;
3553 int save = 0;
3554 if (eb->map_token) {
3555 unmap_extent_buffer(eb, eb->map_token, km);
3556 eb->map_token = NULL;
3557 save = 1;
3558 }
3559 err = map_private_extent_buffer(eb, start, min_len, token, map,
3560 map_start, map_len, km);
3561 if (!err && save) {
3562 eb->map_token = *token;
3563 eb->kaddr = *map;
3564 eb->map_start = *map_start;
3565 eb->map_len = *map_len;
3566 }
3567 return err;
3568}
3569
3570void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3571{
3572 kunmap_atomic(token, km);
3573}
3574
3575int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 3528int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3576 unsigned long start, 3529 unsigned long start,
3577 unsigned long len) 3530 unsigned long len)
@@ -3595,9 +3548,8 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3595 3548
3596 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3549 cur = min(len, (PAGE_CACHE_SIZE - offset));
3597 3550
3598 kaddr = kmap_atomic(page, KM_USER0); 3551 kaddr = page_address(page);
3599 ret = memcmp(ptr, kaddr + offset, cur); 3552 ret = memcmp(ptr, kaddr + offset, cur);
3600 kunmap_atomic(kaddr, KM_USER0);
3601 if (ret) 3553 if (ret)
3602 break; 3554 break;
3603 3555
@@ -3630,9 +3582,8 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3630 WARN_ON(!PageUptodate(page)); 3582 WARN_ON(!PageUptodate(page));
3631 3583
3632 cur = min(len, PAGE_CACHE_SIZE - offset); 3584 cur = min(len, PAGE_CACHE_SIZE - offset);
3633 kaddr = kmap_atomic(page, KM_USER1); 3585 kaddr = page_address(page);
3634 memcpy(kaddr + offset, src, cur); 3586 memcpy(kaddr + offset, src, cur);
3635 kunmap_atomic(kaddr, KM_USER1);
3636 3587
3637 src += cur; 3588 src += cur;
3638 len -= cur; 3589 len -= cur;
@@ -3661,9 +3612,8 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
3661 WARN_ON(!PageUptodate(page)); 3612 WARN_ON(!PageUptodate(page));
3662 3613
3663 cur = min(len, PAGE_CACHE_SIZE - offset); 3614 cur = min(len, PAGE_CACHE_SIZE - offset);
3664 kaddr = kmap_atomic(page, KM_USER0); 3615 kaddr = page_address(page);
3665 memset(kaddr + offset, c, cur); 3616 memset(kaddr + offset, c, cur);
3666 kunmap_atomic(kaddr, KM_USER0);
3667 3617
3668 len -= cur; 3618 len -= cur;
3669 offset = 0; 3619 offset = 0;
@@ -3694,9 +3644,8 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3694 3644
3695 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 3645 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3696 3646
3697 kaddr = kmap_atomic(page, KM_USER0); 3647 kaddr = page_address(page);
3698 read_extent_buffer(src, kaddr + offset, src_offset, cur); 3648 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3699 kunmap_atomic(kaddr, KM_USER0);
3700 3649
3701 src_offset += cur; 3650 src_offset += cur;
3702 len -= cur; 3651 len -= cur;
@@ -3709,20 +3658,17 @@ static void move_pages(struct page *dst_page, struct page *src_page,
3709 unsigned long dst_off, unsigned long src_off, 3658 unsigned long dst_off, unsigned long src_off,
3710 unsigned long len) 3659 unsigned long len)
3711{ 3660{
3712 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3661 char *dst_kaddr = page_address(dst_page);
3713 if (dst_page == src_page) { 3662 if (dst_page == src_page) {
3714 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); 3663 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3715 } else { 3664 } else {
3716 char *src_kaddr = kmap_atomic(src_page, KM_USER1); 3665 char *src_kaddr = page_address(src_page);
3717 char *p = dst_kaddr + dst_off + len; 3666 char *p = dst_kaddr + dst_off + len;
3718 char *s = src_kaddr + src_off + len; 3667 char *s = src_kaddr + src_off + len;
3719 3668
3720 while (len--) 3669 while (len--)
3721 *--p = *--s; 3670 *--p = *--s;
3722
3723 kunmap_atomic(src_kaddr, KM_USER1);
3724 } 3671 }
3725 kunmap_atomic(dst_kaddr, KM_USER0);
3726} 3672}
3727 3673
3728static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) 3674static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
@@ -3735,20 +3681,17 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
3735 unsigned long dst_off, unsigned long src_off, 3681 unsigned long dst_off, unsigned long src_off,
3736 unsigned long len) 3682 unsigned long len)
3737{ 3683{
3738 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3684 char *dst_kaddr = page_address(dst_page);
3739 char *src_kaddr; 3685 char *src_kaddr;
3740 3686
3741 if (dst_page != src_page) { 3687 if (dst_page != src_page) {
3742 src_kaddr = kmap_atomic(src_page, KM_USER1); 3688 src_kaddr = page_address(src_page);
3743 } else { 3689 } else {
3744 src_kaddr = dst_kaddr; 3690 src_kaddr = dst_kaddr;
3745 BUG_ON(areas_overlap(src_off, dst_off, len)); 3691 BUG_ON(areas_overlap(src_off, dst_off, len));
3746 } 3692 }
3747 3693
3748 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 3694 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3749 kunmap_atomic(dst_kaddr, KM_USER0);
3750 if (dst_page != src_page)
3751 kunmap_atomic(src_kaddr, KM_USER1);
3752} 3695}
3753 3696
3754void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 3697void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,