aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-27 19:43:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-27 19:43:52 -0400
commit22712200e175e0df5c7f9edfe6c6bf5c94c23b83 (patch)
treea3e332aab7f5a953ff4f12e67af2a0e5f32f5be5 /fs/btrfs/extent_io.c
parent597a67e0ba758e3d2239c81fbb648c6e69ec30a2 (diff)
parentff95acb6733d41a8d45feb0e18b96df25e610e78 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: Btrfs: make sure reserve_metadata_bytes doesn't leak out strange errors Btrfs: use the commit_root for reading free_space_inode crcs Btrfs: reduce extent_state lock contention for metadata Btrfs: remove lockdep magic from btrfs_next_leaf Btrfs: make a lockdep class for each root Btrfs: switch the btrfs tree locks to reader/writer Btrfs: fix deadlock when throttling transactions Btrfs: stop using highmem for extent_buffers Btrfs: fix BUG_ON() caused by ENOSPC when relocating space Btrfs: tag pages for writeback in sync Btrfs: fix enospc problems with delalloc Btrfs: don't flush delalloc arbitrarily Btrfs: use find_or_create_page instead of grab_cache_page Btrfs: use a worker thread to do caching Btrfs: fix how we merge extent states and deal with cached states Btrfs: use the normal checksumming infrastructure for free space cache Btrfs: serialize flushers in reserve_metadata_bytes Btrfs: do transaction space reservation before joining the transaction Btrfs: try to only do one btrfs_search_slot in do_setxattr
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c168
1 files changed, 83 insertions, 85 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 561262d35689..067b1747421b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -281,11 +281,10 @@ static int merge_state(struct extent_io_tree *tree,
281 if (other->start == state->end + 1 && 281 if (other->start == state->end + 1 &&
282 other->state == state->state) { 282 other->state == state->state) {
283 merge_cb(tree, state, other); 283 merge_cb(tree, state, other);
284 other->start = state->start; 284 state->end = other->end;
285 state->tree = NULL; 285 other->tree = NULL;
286 rb_erase(&state->rb_node, &tree->state); 286 rb_erase(&other->rb_node, &tree->state);
287 free_extent_state(state); 287 free_extent_state(other);
288 state = NULL;
289 } 288 }
290 } 289 }
291 290
@@ -351,7 +350,6 @@ static int insert_state(struct extent_io_tree *tree,
351 "%llu %llu\n", (unsigned long long)found->start, 350 "%llu %llu\n", (unsigned long long)found->start,
352 (unsigned long long)found->end, 351 (unsigned long long)found->end,
353 (unsigned long long)start, (unsigned long long)end); 352 (unsigned long long)start, (unsigned long long)end);
354 free_extent_state(state);
355 return -EEXIST; 353 return -EEXIST;
356 } 354 }
357 state->tree = tree; 355 state->tree = tree;
@@ -500,7 +498,8 @@ again:
500 cached_state = NULL; 498 cached_state = NULL;
501 } 499 }
502 500
503 if (cached && cached->tree && cached->start == start) { 501 if (cached && cached->tree && cached->start <= start &&
502 cached->end > start) {
504 if (clear) 503 if (clear)
505 atomic_dec(&cached->refs); 504 atomic_dec(&cached->refs);
506 state = cached; 505 state = cached;
@@ -742,7 +741,8 @@ again:
742 spin_lock(&tree->lock); 741 spin_lock(&tree->lock);
743 if (cached_state && *cached_state) { 742 if (cached_state && *cached_state) {
744 state = *cached_state; 743 state = *cached_state;
745 if (state->start == start && state->tree) { 744 if (state->start <= start && state->end > start &&
745 state->tree) {
746 node = &state->rb_node; 746 node = &state->rb_node;
747 goto hit_next; 747 goto hit_next;
748 } 748 }
@@ -783,13 +783,13 @@ hit_next:
783 if (err) 783 if (err)
784 goto out; 784 goto out;
785 785
786 next_node = rb_next(node);
787 cache_state(state, cached_state); 786 cache_state(state, cached_state);
788 merge_state(tree, state); 787 merge_state(tree, state);
789 if (last_end == (u64)-1) 788 if (last_end == (u64)-1)
790 goto out; 789 goto out;
791 790
792 start = last_end + 1; 791 start = last_end + 1;
792 next_node = rb_next(&state->rb_node);
793 if (next_node && start < end && prealloc && !need_resched()) { 793 if (next_node && start < end && prealloc && !need_resched()) {
794 state = rb_entry(next_node, struct extent_state, 794 state = rb_entry(next_node, struct extent_state,
795 rb_node); 795 rb_node);
@@ -862,7 +862,6 @@ hit_next:
862 * Avoid to free 'prealloc' if it can be merged with 862 * Avoid to free 'prealloc' if it can be merged with
863 * the later extent. 863 * the later extent.
864 */ 864 */
865 atomic_inc(&prealloc->refs);
866 err = insert_state(tree, prealloc, start, this_end, 865 err = insert_state(tree, prealloc, start, this_end,
867 &bits); 866 &bits);
868 BUG_ON(err == -EEXIST); 867 BUG_ON(err == -EEXIST);
@@ -872,7 +871,6 @@ hit_next:
872 goto out; 871 goto out;
873 } 872 }
874 cache_state(prealloc, cached_state); 873 cache_state(prealloc, cached_state);
875 free_extent_state(prealloc);
876 prealloc = NULL; 874 prealloc = NULL;
877 start = this_end + 1; 875 start = this_end + 1;
878 goto search_again; 876 goto search_again;
@@ -1564,7 +1562,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1564 int bitset = 0; 1562 int bitset = 0;
1565 1563
1566 spin_lock(&tree->lock); 1564 spin_lock(&tree->lock);
1567 if (cached && cached->tree && cached->start == start) 1565 if (cached && cached->tree && cached->start <= start &&
1566 cached->end > start)
1568 node = &cached->rb_node; 1567 node = &cached->rb_node;
1569 else 1568 else
1570 node = tree_search(tree, start); 1569 node = tree_search(tree, start);
@@ -2432,6 +2431,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2432 pgoff_t index; 2431 pgoff_t index;
2433 pgoff_t end; /* Inclusive */ 2432 pgoff_t end; /* Inclusive */
2434 int scanned = 0; 2433 int scanned = 0;
2434 int tag;
2435 2435
2436 pagevec_init(&pvec, 0); 2436 pagevec_init(&pvec, 0);
2437 if (wbc->range_cyclic) { 2437 if (wbc->range_cyclic) {
@@ -2442,11 +2442,16 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2442 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2442 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2443 scanned = 1; 2443 scanned = 1;
2444 } 2444 }
2445 if (wbc->sync_mode == WB_SYNC_ALL)
2446 tag = PAGECACHE_TAG_TOWRITE;
2447 else
2448 tag = PAGECACHE_TAG_DIRTY;
2445retry: 2449retry:
2450 if (wbc->sync_mode == WB_SYNC_ALL)
2451 tag_pages_for_writeback(mapping, index, end);
2446 while (!done && !nr_to_write_done && (index <= end) && 2452 while (!done && !nr_to_write_done && (index <= end) &&
2447 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 2453 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2448 PAGECACHE_TAG_DIRTY, min(end - index, 2454 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2449 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2450 unsigned i; 2455 unsigned i;
2451 2456
2452 scanned = 1; 2457 scanned = 1;
@@ -3020,8 +3025,15 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3020 return NULL; 3025 return NULL;
3021 eb->start = start; 3026 eb->start = start;
3022 eb->len = len; 3027 eb->len = len;
3023 spin_lock_init(&eb->lock); 3028 rwlock_init(&eb->lock);
3024 init_waitqueue_head(&eb->lock_wq); 3029 atomic_set(&eb->write_locks, 0);
3030 atomic_set(&eb->read_locks, 0);
3031 atomic_set(&eb->blocking_readers, 0);
3032 atomic_set(&eb->blocking_writers, 0);
3033 atomic_set(&eb->spinning_readers, 0);
3034 atomic_set(&eb->spinning_writers, 0);
3035 init_waitqueue_head(&eb->write_lock_wq);
3036 init_waitqueue_head(&eb->read_lock_wq);
3025 3037
3026#if LEAK_DEBUG 3038#if LEAK_DEBUG
3027 spin_lock_irqsave(&leak_lock, flags); 3039 spin_lock_irqsave(&leak_lock, flags);
@@ -3117,7 +3129,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3117 i = 0; 3129 i = 0;
3118 } 3130 }
3119 for (; i < num_pages; i++, index++) { 3131 for (; i < num_pages; i++, index++) {
3120 p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM); 3132 p = find_or_create_page(mapping, index, GFP_NOFS);
3121 if (!p) { 3133 if (!p) {
3122 WARN_ON(1); 3134 WARN_ON(1);
3123 goto free_eb; 3135 goto free_eb;
@@ -3264,6 +3276,22 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
3264 return was_dirty; 3276 return was_dirty;
3265} 3277}
3266 3278
3279static int __eb_straddles_pages(u64 start, u64 len)
3280{
3281 if (len < PAGE_CACHE_SIZE)
3282 return 1;
3283 if (start & (PAGE_CACHE_SIZE - 1))
3284 return 1;
3285 if ((start + len) & (PAGE_CACHE_SIZE - 1))
3286 return 1;
3287 return 0;
3288}
3289
3290static int eb_straddles_pages(struct extent_buffer *eb)
3291{
3292 return __eb_straddles_pages(eb->start, eb->len);
3293}
3294
3267int clear_extent_buffer_uptodate(struct extent_io_tree *tree, 3295int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3268 struct extent_buffer *eb, 3296 struct extent_buffer *eb,
3269 struct extent_state **cached_state) 3297 struct extent_state **cached_state)
@@ -3275,8 +3303,10 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3275 num_pages = num_extent_pages(eb->start, eb->len); 3303 num_pages = num_extent_pages(eb->start, eb->len);
3276 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 3304 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3277 3305
3278 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3306 if (eb_straddles_pages(eb)) {
3279 cached_state, GFP_NOFS); 3307 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3308 cached_state, GFP_NOFS);
3309 }
3280 for (i = 0; i < num_pages; i++) { 3310 for (i = 0; i < num_pages; i++) {
3281 page = extent_buffer_page(eb, i); 3311 page = extent_buffer_page(eb, i);
3282 if (page) 3312 if (page)
@@ -3294,8 +3324,10 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3294 3324
3295 num_pages = num_extent_pages(eb->start, eb->len); 3325 num_pages = num_extent_pages(eb->start, eb->len);
3296 3326
3297 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3327 if (eb_straddles_pages(eb)) {
3298 NULL, GFP_NOFS); 3328 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3329 NULL, GFP_NOFS);
3330 }
3299 for (i = 0; i < num_pages; i++) { 3331 for (i = 0; i < num_pages; i++) {
3300 page = extent_buffer_page(eb, i); 3332 page = extent_buffer_page(eb, i);
3301 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 3333 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3318,9 +3350,12 @@ int extent_range_uptodate(struct extent_io_tree *tree,
3318 int uptodate; 3350 int uptodate;
3319 unsigned long index; 3351 unsigned long index;
3320 3352
3321 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL); 3353 if (__eb_straddles_pages(start, end - start + 1)) {
3322 if (ret) 3354 ret = test_range_bit(tree, start, end,
3323 return 1; 3355 EXTENT_UPTODATE, 1, NULL);
3356 if (ret)
3357 return 1;
3358 }
3324 while (start <= end) { 3359 while (start <= end) {
3325 index = start >> PAGE_CACHE_SHIFT; 3360 index = start >> PAGE_CACHE_SHIFT;
3326 page = find_get_page(tree->mapping, index); 3361 page = find_get_page(tree->mapping, index);
@@ -3348,10 +3383,12 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
3348 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 3383 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3349 return 1; 3384 return 1;
3350 3385
3351 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3386 if (eb_straddles_pages(eb)) {
3352 EXTENT_UPTODATE, 1, cached_state); 3387 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3353 if (ret) 3388 EXTENT_UPTODATE, 1, cached_state);
3354 return ret; 3389 if (ret)
3390 return ret;
3391 }
3355 3392
3356 num_pages = num_extent_pages(eb->start, eb->len); 3393 num_pages = num_extent_pages(eb->start, eb->len);
3357 for (i = 0; i < num_pages; i++) { 3394 for (i = 0; i < num_pages; i++) {
@@ -3384,9 +3421,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3384 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 3421 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3385 return 0; 3422 return 0;
3386 3423
3387 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3424 if (eb_straddles_pages(eb)) {
3388 EXTENT_UPTODATE, 1, NULL)) { 3425 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3389 return 0; 3426 EXTENT_UPTODATE, 1, NULL)) {
3427 return 0;
3428 }
3390 } 3429 }
3391 3430
3392 if (start) { 3431 if (start) {
@@ -3490,9 +3529,8 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3490 page = extent_buffer_page(eb, i); 3529 page = extent_buffer_page(eb, i);
3491 3530
3492 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3531 cur = min(len, (PAGE_CACHE_SIZE - offset));
3493 kaddr = kmap_atomic(page, KM_USER1); 3532 kaddr = page_address(page);
3494 memcpy(dst, kaddr + offset, cur); 3533 memcpy(dst, kaddr + offset, cur);
3495 kunmap_atomic(kaddr, KM_USER1);
3496 3534
3497 dst += cur; 3535 dst += cur;
3498 len -= cur; 3536 len -= cur;
@@ -3502,9 +3540,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3502} 3540}
3503 3541
3504int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 3542int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3505 unsigned long min_len, char **token, char **map, 3543 unsigned long min_len, char **map,
3506 unsigned long *map_start, 3544 unsigned long *map_start,
3507 unsigned long *map_len, int km) 3545 unsigned long *map_len)
3508{ 3546{
3509 size_t offset = start & (PAGE_CACHE_SIZE - 1); 3547 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3510 char *kaddr; 3548 char *kaddr;
@@ -3534,42 +3572,12 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3534 } 3572 }
3535 3573
3536 p = extent_buffer_page(eb, i); 3574 p = extent_buffer_page(eb, i);
3537 kaddr = kmap_atomic(p, km); 3575 kaddr = page_address(p);
3538 *token = kaddr;
3539 *map = kaddr + offset; 3576 *map = kaddr + offset;
3540 *map_len = PAGE_CACHE_SIZE - offset; 3577 *map_len = PAGE_CACHE_SIZE - offset;
3541 return 0; 3578 return 0;
3542} 3579}
3543 3580
3544int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3545 unsigned long min_len,
3546 char **token, char **map,
3547 unsigned long *map_start,
3548 unsigned long *map_len, int km)
3549{
3550 int err;
3551 int save = 0;
3552 if (eb->map_token) {
3553 unmap_extent_buffer(eb, eb->map_token, km);
3554 eb->map_token = NULL;
3555 save = 1;
3556 }
3557 err = map_private_extent_buffer(eb, start, min_len, token, map,
3558 map_start, map_len, km);
3559 if (!err && save) {
3560 eb->map_token = *token;
3561 eb->kaddr = *map;
3562 eb->map_start = *map_start;
3563 eb->map_len = *map_len;
3564 }
3565 return err;
3566}
3567
3568void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3569{
3570 kunmap_atomic(token, km);
3571}
3572
3573int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 3581int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3574 unsigned long start, 3582 unsigned long start,
3575 unsigned long len) 3583 unsigned long len)
@@ -3593,9 +3601,8 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3593 3601
3594 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3602 cur = min(len, (PAGE_CACHE_SIZE - offset));
3595 3603
3596 kaddr = kmap_atomic(page, KM_USER0); 3604 kaddr = page_address(page);
3597 ret = memcmp(ptr, kaddr + offset, cur); 3605 ret = memcmp(ptr, kaddr + offset, cur);
3598 kunmap_atomic(kaddr, KM_USER0);
3599 if (ret) 3606 if (ret)
3600 break; 3607 break;
3601 3608
@@ -3628,9 +3635,8 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3628 WARN_ON(!PageUptodate(page)); 3635 WARN_ON(!PageUptodate(page));
3629 3636
3630 cur = min(len, PAGE_CACHE_SIZE - offset); 3637 cur = min(len, PAGE_CACHE_SIZE - offset);
3631 kaddr = kmap_atomic(page, KM_USER1); 3638 kaddr = page_address(page);
3632 memcpy(kaddr + offset, src, cur); 3639 memcpy(kaddr + offset, src, cur);
3633 kunmap_atomic(kaddr, KM_USER1);
3634 3640
3635 src += cur; 3641 src += cur;
3636 len -= cur; 3642 len -= cur;
@@ -3659,9 +3665,8 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
3659 WARN_ON(!PageUptodate(page)); 3665 WARN_ON(!PageUptodate(page));
3660 3666
3661 cur = min(len, PAGE_CACHE_SIZE - offset); 3667 cur = min(len, PAGE_CACHE_SIZE - offset);
3662 kaddr = kmap_atomic(page, KM_USER0); 3668 kaddr = page_address(page);
3663 memset(kaddr + offset, c, cur); 3669 memset(kaddr + offset, c, cur);
3664 kunmap_atomic(kaddr, KM_USER0);
3665 3670
3666 len -= cur; 3671 len -= cur;
3667 offset = 0; 3672 offset = 0;
@@ -3692,9 +3697,8 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3692 3697
3693 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 3698 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3694 3699
3695 kaddr = kmap_atomic(page, KM_USER0); 3700 kaddr = page_address(page);
3696 read_extent_buffer(src, kaddr + offset, src_offset, cur); 3701 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3697 kunmap_atomic(kaddr, KM_USER0);
3698 3702
3699 src_offset += cur; 3703 src_offset += cur;
3700 len -= cur; 3704 len -= cur;
@@ -3707,20 +3711,17 @@ static void move_pages(struct page *dst_page, struct page *src_page,
3707 unsigned long dst_off, unsigned long src_off, 3711 unsigned long dst_off, unsigned long src_off,
3708 unsigned long len) 3712 unsigned long len)
3709{ 3713{
3710 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3714 char *dst_kaddr = page_address(dst_page);
3711 if (dst_page == src_page) { 3715 if (dst_page == src_page) {
3712 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); 3716 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3713 } else { 3717 } else {
3714 char *src_kaddr = kmap_atomic(src_page, KM_USER1); 3718 char *src_kaddr = page_address(src_page);
3715 char *p = dst_kaddr + dst_off + len; 3719 char *p = dst_kaddr + dst_off + len;
3716 char *s = src_kaddr + src_off + len; 3720 char *s = src_kaddr + src_off + len;
3717 3721
3718 while (len--) 3722 while (len--)
3719 *--p = *--s; 3723 *--p = *--s;
3720
3721 kunmap_atomic(src_kaddr, KM_USER1);
3722 } 3724 }
3723 kunmap_atomic(dst_kaddr, KM_USER0);
3724} 3725}
3725 3726
3726static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) 3727static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
@@ -3733,20 +3734,17 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
3733 unsigned long dst_off, unsigned long src_off, 3734 unsigned long dst_off, unsigned long src_off,
3734 unsigned long len) 3735 unsigned long len)
3735{ 3736{
3736 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3737 char *dst_kaddr = page_address(dst_page);
3737 char *src_kaddr; 3738 char *src_kaddr;
3738 3739
3739 if (dst_page != src_page) { 3740 if (dst_page != src_page) {
3740 src_kaddr = kmap_atomic(src_page, KM_USER1); 3741 src_kaddr = page_address(src_page);
3741 } else { 3742 } else {
3742 src_kaddr = dst_kaddr; 3743 src_kaddr = dst_kaddr;
3743 BUG_ON(areas_overlap(src_off, dst_off, len)); 3744 BUG_ON(areas_overlap(src_off, dst_off, len));
3744 } 3745 }
3745 3746
3746 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 3747 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3747 kunmap_atomic(dst_kaddr, KM_USER0);
3748 if (dst_page != src_page)
3749 kunmap_atomic(src_kaddr, KM_USER1);
3750} 3748}
3751 3749
3752void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 3750void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,