diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 168 |
1 files changed, 83 insertions, 85 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7055d11c1efd..5bbdb243bb6f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -281,11 +281,10 @@ static int merge_state(struct extent_io_tree *tree, | |||
281 | if (other->start == state->end + 1 && | 281 | if (other->start == state->end + 1 && |
282 | other->state == state->state) { | 282 | other->state == state->state) { |
283 | merge_cb(tree, state, other); | 283 | merge_cb(tree, state, other); |
284 | other->start = state->start; | 284 | state->end = other->end; |
285 | state->tree = NULL; | 285 | other->tree = NULL; |
286 | rb_erase(&state->rb_node, &tree->state); | 286 | rb_erase(&other->rb_node, &tree->state); |
287 | free_extent_state(state); | 287 | free_extent_state(other); |
288 | state = NULL; | ||
289 | } | 288 | } |
290 | } | 289 | } |
291 | 290 | ||
@@ -351,7 +350,6 @@ static int insert_state(struct extent_io_tree *tree, | |||
351 | "%llu %llu\n", (unsigned long long)found->start, | 350 | "%llu %llu\n", (unsigned long long)found->start, |
352 | (unsigned long long)found->end, | 351 | (unsigned long long)found->end, |
353 | (unsigned long long)start, (unsigned long long)end); | 352 | (unsigned long long)start, (unsigned long long)end); |
354 | free_extent_state(state); | ||
355 | return -EEXIST; | 353 | return -EEXIST; |
356 | } | 354 | } |
357 | state->tree = tree; | 355 | state->tree = tree; |
@@ -500,7 +498,8 @@ again: | |||
500 | cached_state = NULL; | 498 | cached_state = NULL; |
501 | } | 499 | } |
502 | 500 | ||
503 | if (cached && cached->tree && cached->start == start) { | 501 | if (cached && cached->tree && cached->start <= start && |
502 | cached->end > start) { | ||
504 | if (clear) | 503 | if (clear) |
505 | atomic_dec(&cached->refs); | 504 | atomic_dec(&cached->refs); |
506 | state = cached; | 505 | state = cached; |
@@ -742,7 +741,8 @@ again: | |||
742 | spin_lock(&tree->lock); | 741 | spin_lock(&tree->lock); |
743 | if (cached_state && *cached_state) { | 742 | if (cached_state && *cached_state) { |
744 | state = *cached_state; | 743 | state = *cached_state; |
745 | if (state->start == start && state->tree) { | 744 | if (state->start <= start && state->end > start && |
745 | state->tree) { | ||
746 | node = &state->rb_node; | 746 | node = &state->rb_node; |
747 | goto hit_next; | 747 | goto hit_next; |
748 | } | 748 | } |
@@ -783,13 +783,13 @@ hit_next: | |||
783 | if (err) | 783 | if (err) |
784 | goto out; | 784 | goto out; |
785 | 785 | ||
786 | next_node = rb_next(node); | ||
787 | cache_state(state, cached_state); | 786 | cache_state(state, cached_state); |
788 | merge_state(tree, state); | 787 | merge_state(tree, state); |
789 | if (last_end == (u64)-1) | 788 | if (last_end == (u64)-1) |
790 | goto out; | 789 | goto out; |
791 | 790 | ||
792 | start = last_end + 1; | 791 | start = last_end + 1; |
792 | next_node = rb_next(&state->rb_node); | ||
793 | if (next_node && start < end && prealloc && !need_resched()) { | 793 | if (next_node && start < end && prealloc && !need_resched()) { |
794 | state = rb_entry(next_node, struct extent_state, | 794 | state = rb_entry(next_node, struct extent_state, |
795 | rb_node); | 795 | rb_node); |
@@ -862,7 +862,6 @@ hit_next: | |||
862 | * Avoid to free 'prealloc' if it can be merged with | 862 | * Avoid to free 'prealloc' if it can be merged with |
863 | * the later extent. | 863 | * the later extent. |
864 | */ | 864 | */ |
865 | atomic_inc(&prealloc->refs); | ||
866 | err = insert_state(tree, prealloc, start, this_end, | 865 | err = insert_state(tree, prealloc, start, this_end, |
867 | &bits); | 866 | &bits); |
868 | BUG_ON(err == -EEXIST); | 867 | BUG_ON(err == -EEXIST); |
@@ -872,7 +871,6 @@ hit_next: | |||
872 | goto out; | 871 | goto out; |
873 | } | 872 | } |
874 | cache_state(prealloc, cached_state); | 873 | cache_state(prealloc, cached_state); |
875 | free_extent_state(prealloc); | ||
876 | prealloc = NULL; | 874 | prealloc = NULL; |
877 | start = this_end + 1; | 875 | start = this_end + 1; |
878 | goto search_again; | 876 | goto search_again; |
@@ -1564,7 +1562,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
1564 | int bitset = 0; | 1562 | int bitset = 0; |
1565 | 1563 | ||
1566 | spin_lock(&tree->lock); | 1564 | spin_lock(&tree->lock); |
1567 | if (cached && cached->tree && cached->start == start) | 1565 | if (cached && cached->tree && cached->start <= start && |
1566 | cached->end > start) | ||
1568 | node = &cached->rb_node; | 1567 | node = &cached->rb_node; |
1569 | else | 1568 | else |
1570 | node = tree_search(tree, start); | 1569 | node = tree_search(tree, start); |
@@ -2432,6 +2431,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, | |||
2432 | pgoff_t index; | 2431 | pgoff_t index; |
2433 | pgoff_t end; /* Inclusive */ | 2432 | pgoff_t end; /* Inclusive */ |
2434 | int scanned = 0; | 2433 | int scanned = 0; |
2434 | int tag; | ||
2435 | 2435 | ||
2436 | pagevec_init(&pvec, 0); | 2436 | pagevec_init(&pvec, 0); |
2437 | if (wbc->range_cyclic) { | 2437 | if (wbc->range_cyclic) { |
@@ -2442,11 +2442,16 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, | |||
2442 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | 2442 | end = wbc->range_end >> PAGE_CACHE_SHIFT; |
2443 | scanned = 1; | 2443 | scanned = 1; |
2444 | } | 2444 | } |
2445 | if (wbc->sync_mode == WB_SYNC_ALL) | ||
2446 | tag = PAGECACHE_TAG_TOWRITE; | ||
2447 | else | ||
2448 | tag = PAGECACHE_TAG_DIRTY; | ||
2445 | retry: | 2449 | retry: |
2450 | if (wbc->sync_mode == WB_SYNC_ALL) | ||
2451 | tag_pages_for_writeback(mapping, index, end); | ||
2446 | while (!done && !nr_to_write_done && (index <= end) && | 2452 | while (!done && !nr_to_write_done && (index <= end) && |
2447 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 2453 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, |
2448 | PAGECACHE_TAG_DIRTY, min(end - index, | 2454 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { |
2449 | (pgoff_t)PAGEVEC_SIZE-1) + 1))) { | ||
2450 | unsigned i; | 2455 | unsigned i; |
2451 | 2456 | ||
2452 | scanned = 1; | 2457 | scanned = 1; |
@@ -3022,8 +3027,15 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | |||
3022 | return NULL; | 3027 | return NULL; |
3023 | eb->start = start; | 3028 | eb->start = start; |
3024 | eb->len = len; | 3029 | eb->len = len; |
3025 | spin_lock_init(&eb->lock); | 3030 | rwlock_init(&eb->lock); |
3026 | init_waitqueue_head(&eb->lock_wq); | 3031 | atomic_set(&eb->write_locks, 0); |
3032 | atomic_set(&eb->read_locks, 0); | ||
3033 | atomic_set(&eb->blocking_readers, 0); | ||
3034 | atomic_set(&eb->blocking_writers, 0); | ||
3035 | atomic_set(&eb->spinning_readers, 0); | ||
3036 | atomic_set(&eb->spinning_writers, 0); | ||
3037 | init_waitqueue_head(&eb->write_lock_wq); | ||
3038 | init_waitqueue_head(&eb->read_lock_wq); | ||
3027 | 3039 | ||
3028 | #if LEAK_DEBUG | 3040 | #if LEAK_DEBUG |
3029 | spin_lock_irqsave(&leak_lock, flags); | 3041 | spin_lock_irqsave(&leak_lock, flags); |
@@ -3119,7 +3131,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3119 | i = 0; | 3131 | i = 0; |
3120 | } | 3132 | } |
3121 | for (; i < num_pages; i++, index++) { | 3133 | for (; i < num_pages; i++, index++) { |
3122 | p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM); | 3134 | p = find_or_create_page(mapping, index, GFP_NOFS); |
3123 | if (!p) { | 3135 | if (!p) { |
3124 | WARN_ON(1); | 3136 | WARN_ON(1); |
3125 | goto free_eb; | 3137 | goto free_eb; |
@@ -3266,6 +3278,22 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3266 | return was_dirty; | 3278 | return was_dirty; |
3267 | } | 3279 | } |
3268 | 3280 | ||
3281 | static int __eb_straddles_pages(u64 start, u64 len) | ||
3282 | { | ||
3283 | if (len < PAGE_CACHE_SIZE) | ||
3284 | return 1; | ||
3285 | if (start & (PAGE_CACHE_SIZE - 1)) | ||
3286 | return 1; | ||
3287 | if ((start + len) & (PAGE_CACHE_SIZE - 1)) | ||
3288 | return 1; | ||
3289 | return 0; | ||
3290 | } | ||
3291 | |||
3292 | static int eb_straddles_pages(struct extent_buffer *eb) | ||
3293 | { | ||
3294 | return __eb_straddles_pages(eb->start, eb->len); | ||
3295 | } | ||
3296 | |||
3269 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | 3297 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, |
3270 | struct extent_buffer *eb, | 3298 | struct extent_buffer *eb, |
3271 | struct extent_state **cached_state) | 3299 | struct extent_state **cached_state) |
@@ -3277,8 +3305,10 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3277 | num_pages = num_extent_pages(eb->start, eb->len); | 3305 | num_pages = num_extent_pages(eb->start, eb->len); |
3278 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 3306 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
3279 | 3307 | ||
3280 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3308 | if (eb_straddles_pages(eb)) { |
3281 | cached_state, GFP_NOFS); | 3309 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
3310 | cached_state, GFP_NOFS); | ||
3311 | } | ||
3282 | for (i = 0; i < num_pages; i++) { | 3312 | for (i = 0; i < num_pages; i++) { |
3283 | page = extent_buffer_page(eb, i); | 3313 | page = extent_buffer_page(eb, i); |
3284 | if (page) | 3314 | if (page) |
@@ -3296,8 +3326,10 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3296 | 3326 | ||
3297 | num_pages = num_extent_pages(eb->start, eb->len); | 3327 | num_pages = num_extent_pages(eb->start, eb->len); |
3298 | 3328 | ||
3299 | set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3329 | if (eb_straddles_pages(eb)) { |
3300 | NULL, GFP_NOFS); | 3330 | set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
3331 | NULL, GFP_NOFS); | ||
3332 | } | ||
3301 | for (i = 0; i < num_pages; i++) { | 3333 | for (i = 0; i < num_pages; i++) { |
3302 | page = extent_buffer_page(eb, i); | 3334 | page = extent_buffer_page(eb, i); |
3303 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || | 3335 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || |
@@ -3320,9 +3352,12 @@ int extent_range_uptodate(struct extent_io_tree *tree, | |||
3320 | int uptodate; | 3352 | int uptodate; |
3321 | unsigned long index; | 3353 | unsigned long index; |
3322 | 3354 | ||
3323 | ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL); | 3355 | if (__eb_straddles_pages(start, end - start + 1)) { |
3324 | if (ret) | 3356 | ret = test_range_bit(tree, start, end, |
3325 | return 1; | 3357 | EXTENT_UPTODATE, 1, NULL); |
3358 | if (ret) | ||
3359 | return 1; | ||
3360 | } | ||
3326 | while (start <= end) { | 3361 | while (start <= end) { |
3327 | index = start >> PAGE_CACHE_SHIFT; | 3362 | index = start >> PAGE_CACHE_SHIFT; |
3328 | page = find_get_page(tree->mapping, index); | 3363 | page = find_get_page(tree->mapping, index); |
@@ -3350,10 +3385,12 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3350 | if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) | 3385 | if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) |
3351 | return 1; | 3386 | return 1; |
3352 | 3387 | ||
3353 | ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, | 3388 | if (eb_straddles_pages(eb)) { |
3354 | EXTENT_UPTODATE, 1, cached_state); | 3389 | ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, |
3355 | if (ret) | 3390 | EXTENT_UPTODATE, 1, cached_state); |
3356 | return ret; | 3391 | if (ret) |
3392 | return ret; | ||
3393 | } | ||
3357 | 3394 | ||
3358 | num_pages = num_extent_pages(eb->start, eb->len); | 3395 | num_pages = num_extent_pages(eb->start, eb->len); |
3359 | for (i = 0; i < num_pages; i++) { | 3396 | for (i = 0; i < num_pages; i++) { |
@@ -3386,9 +3423,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
3386 | if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) | 3423 | if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) |
3387 | return 0; | 3424 | return 0; |
3388 | 3425 | ||
3389 | if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, | 3426 | if (eb_straddles_pages(eb)) { |
3390 | EXTENT_UPTODATE, 1, NULL)) { | 3427 | if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, |
3391 | return 0; | 3428 | EXTENT_UPTODATE, 1, NULL)) { |
3429 | return 0; | ||
3430 | } | ||
3392 | } | 3431 | } |
3393 | 3432 | ||
3394 | if (start) { | 3433 | if (start) { |
@@ -3492,9 +3531,8 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, | |||
3492 | page = extent_buffer_page(eb, i); | 3531 | page = extent_buffer_page(eb, i); |
3493 | 3532 | ||
3494 | cur = min(len, (PAGE_CACHE_SIZE - offset)); | 3533 | cur = min(len, (PAGE_CACHE_SIZE - offset)); |
3495 | kaddr = kmap_atomic(page, KM_USER1); | 3534 | kaddr = page_address(page); |
3496 | memcpy(dst, kaddr + offset, cur); | 3535 | memcpy(dst, kaddr + offset, cur); |
3497 | kunmap_atomic(kaddr, KM_USER1); | ||
3498 | 3536 | ||
3499 | dst += cur; | 3537 | dst += cur; |
3500 | len -= cur; | 3538 | len -= cur; |
@@ -3504,9 +3542,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, | |||
3504 | } | 3542 | } |
3505 | 3543 | ||
3506 | int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, | 3544 | int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, |
3507 | unsigned long min_len, char **token, char **map, | 3545 | unsigned long min_len, char **map, |
3508 | unsigned long *map_start, | 3546 | unsigned long *map_start, |
3509 | unsigned long *map_len, int km) | 3547 | unsigned long *map_len) |
3510 | { | 3548 | { |
3511 | size_t offset = start & (PAGE_CACHE_SIZE - 1); | 3549 | size_t offset = start & (PAGE_CACHE_SIZE - 1); |
3512 | char *kaddr; | 3550 | char *kaddr; |
@@ -3536,42 +3574,12 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, | |||
3536 | } | 3574 | } |
3537 | 3575 | ||
3538 | p = extent_buffer_page(eb, i); | 3576 | p = extent_buffer_page(eb, i); |
3539 | kaddr = kmap_atomic(p, km); | 3577 | kaddr = page_address(p); |
3540 | *token = kaddr; | ||
3541 | *map = kaddr + offset; | 3578 | *map = kaddr + offset; |
3542 | *map_len = PAGE_CACHE_SIZE - offset; | 3579 | *map_len = PAGE_CACHE_SIZE - offset; |
3543 | return 0; | 3580 | return 0; |
3544 | } | 3581 | } |
3545 | 3582 | ||
3546 | int map_extent_buffer(struct extent_buffer *eb, unsigned long start, | ||
3547 | unsigned long min_len, | ||
3548 | char **token, char **map, | ||
3549 | unsigned long *map_start, | ||
3550 | unsigned long *map_len, int km) | ||
3551 | { | ||
3552 | int err; | ||
3553 | int save = 0; | ||
3554 | if (eb->map_token) { | ||
3555 | unmap_extent_buffer(eb, eb->map_token, km); | ||
3556 | eb->map_token = NULL; | ||
3557 | save = 1; | ||
3558 | } | ||
3559 | err = map_private_extent_buffer(eb, start, min_len, token, map, | ||
3560 | map_start, map_len, km); | ||
3561 | if (!err && save) { | ||
3562 | eb->map_token = *token; | ||
3563 | eb->kaddr = *map; | ||
3564 | eb->map_start = *map_start; | ||
3565 | eb->map_len = *map_len; | ||
3566 | } | ||
3567 | return err; | ||
3568 | } | ||
3569 | |||
3570 | void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km) | ||
3571 | { | ||
3572 | kunmap_atomic(token, km); | ||
3573 | } | ||
3574 | |||
3575 | int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, | 3583 | int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, |
3576 | unsigned long start, | 3584 | unsigned long start, |
3577 | unsigned long len) | 3585 | unsigned long len) |
@@ -3595,9 +3603,8 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, | |||
3595 | 3603 | ||
3596 | cur = min(len, (PAGE_CACHE_SIZE - offset)); | 3604 | cur = min(len, (PAGE_CACHE_SIZE - offset)); |
3597 | 3605 | ||
3598 | kaddr = kmap_atomic(page, KM_USER0); | 3606 | kaddr = page_address(page); |
3599 | ret = memcmp(ptr, kaddr + offset, cur); | 3607 | ret = memcmp(ptr, kaddr + offset, cur); |
3600 | kunmap_atomic(kaddr, KM_USER0); | ||
3601 | if (ret) | 3608 | if (ret) |
3602 | break; | 3609 | break; |
3603 | 3610 | ||
@@ -3630,9 +3637,8 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, | |||
3630 | WARN_ON(!PageUptodate(page)); | 3637 | WARN_ON(!PageUptodate(page)); |
3631 | 3638 | ||
3632 | cur = min(len, PAGE_CACHE_SIZE - offset); | 3639 | cur = min(len, PAGE_CACHE_SIZE - offset); |
3633 | kaddr = kmap_atomic(page, KM_USER1); | 3640 | kaddr = page_address(page); |
3634 | memcpy(kaddr + offset, src, cur); | 3641 | memcpy(kaddr + offset, src, cur); |
3635 | kunmap_atomic(kaddr, KM_USER1); | ||
3636 | 3642 | ||
3637 | src += cur; | 3643 | src += cur; |
3638 | len -= cur; | 3644 | len -= cur; |
@@ -3661,9 +3667,8 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, | |||
3661 | WARN_ON(!PageUptodate(page)); | 3667 | WARN_ON(!PageUptodate(page)); |
3662 | 3668 | ||
3663 | cur = min(len, PAGE_CACHE_SIZE - offset); | 3669 | cur = min(len, PAGE_CACHE_SIZE - offset); |
3664 | kaddr = kmap_atomic(page, KM_USER0); | 3670 | kaddr = page_address(page); |
3665 | memset(kaddr + offset, c, cur); | 3671 | memset(kaddr + offset, c, cur); |
3666 | kunmap_atomic(kaddr, KM_USER0); | ||
3667 | 3672 | ||
3668 | len -= cur; | 3673 | len -= cur; |
3669 | offset = 0; | 3674 | offset = 0; |
@@ -3694,9 +3699,8 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, | |||
3694 | 3699 | ||
3695 | cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); | 3700 | cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); |
3696 | 3701 | ||
3697 | kaddr = kmap_atomic(page, KM_USER0); | 3702 | kaddr = page_address(page); |
3698 | read_extent_buffer(src, kaddr + offset, src_offset, cur); | 3703 | read_extent_buffer(src, kaddr + offset, src_offset, cur); |
3699 | kunmap_atomic(kaddr, KM_USER0); | ||
3700 | 3704 | ||
3701 | src_offset += cur; | 3705 | src_offset += cur; |
3702 | len -= cur; | 3706 | len -= cur; |
@@ -3709,20 +3713,17 @@ static void move_pages(struct page *dst_page, struct page *src_page, | |||
3709 | unsigned long dst_off, unsigned long src_off, | 3713 | unsigned long dst_off, unsigned long src_off, |
3710 | unsigned long len) | 3714 | unsigned long len) |
3711 | { | 3715 | { |
3712 | char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); | 3716 | char *dst_kaddr = page_address(dst_page); |
3713 | if (dst_page == src_page) { | 3717 | if (dst_page == src_page) { |
3714 | memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); | 3718 | memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); |
3715 | } else { | 3719 | } else { |
3716 | char *src_kaddr = kmap_atomic(src_page, KM_USER1); | 3720 | char *src_kaddr = page_address(src_page); |
3717 | char *p = dst_kaddr + dst_off + len; | 3721 | char *p = dst_kaddr + dst_off + len; |
3718 | char *s = src_kaddr + src_off + len; | 3722 | char *s = src_kaddr + src_off + len; |
3719 | 3723 | ||
3720 | while (len--) | 3724 | while (len--) |
3721 | *--p = *--s; | 3725 | *--p = *--s; |
3722 | |||
3723 | kunmap_atomic(src_kaddr, KM_USER1); | ||
3724 | } | 3726 | } |
3725 | kunmap_atomic(dst_kaddr, KM_USER0); | ||
3726 | } | 3727 | } |
3727 | 3728 | ||
3728 | static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) | 3729 | static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) |
@@ -3735,20 +3736,17 @@ static void copy_pages(struct page *dst_page, struct page *src_page, | |||
3735 | unsigned long dst_off, unsigned long src_off, | 3736 | unsigned long dst_off, unsigned long src_off, |
3736 | unsigned long len) | 3737 | unsigned long len) |
3737 | { | 3738 | { |
3738 | char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); | 3739 | char *dst_kaddr = page_address(dst_page); |
3739 | char *src_kaddr; | 3740 | char *src_kaddr; |
3740 | 3741 | ||
3741 | if (dst_page != src_page) { | 3742 | if (dst_page != src_page) { |
3742 | src_kaddr = kmap_atomic(src_page, KM_USER1); | 3743 | src_kaddr = page_address(src_page); |
3743 | } else { | 3744 | } else { |
3744 | src_kaddr = dst_kaddr; | 3745 | src_kaddr = dst_kaddr; |
3745 | BUG_ON(areas_overlap(src_off, dst_off, len)); | 3746 | BUG_ON(areas_overlap(src_off, dst_off, len)); |
3746 | } | 3747 | } |
3747 | 3748 | ||
3748 | memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); | 3749 | memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); |
3749 | kunmap_atomic(dst_kaddr, KM_USER0); | ||
3750 | if (dst_page != src_page) | ||
3751 | kunmap_atomic(src_kaddr, KM_USER1); | ||
3752 | } | 3750 | } |
3753 | 3751 | ||
3754 | void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | 3752 | void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, |