aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2011-07-19 12:04:14 -0400
committerChris Mason <chris.mason@oracle.com>2011-07-27 12:46:45 -0400
commita65917156e345946dbde3d7effd28124c6d6a8c2 (patch)
treedc5478189be0f6a321bfc23ee0545f71de67763b /fs/btrfs/extent_io.c
parent199c36eaa95077a47ae1bc55532fc0fbeb80cc95 (diff)
Btrfs: stop using highmem for extent_buffers
The extent_buffers have a very complex interface where we use HIGHMEM for metadata and try to cache a kmap mapping to access the memory. The next commit adds reader/writer locks, and concurrent use of this kmap cache would make it even more complex. This commit drops the ability to use HIGHMEM with extent buffers, and rips out all of the related code. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c67
1 files changed, 13 insertions, 54 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 89bbde46bd83..76ecbb8ed0e0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3114,7 +3114,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3114 i = 0; 3114 i = 0;
3115 } 3115 }
3116 for (; i < num_pages; i++, index++) { 3116 for (; i < num_pages; i++, index++) {
3117 p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM); 3117 p = find_or_create_page(mapping, index, GFP_NOFS);
3118 if (!p) { 3118 if (!p) {
3119 WARN_ON(1); 3119 WARN_ON(1);
3120 goto free_eb; 3120 goto free_eb;
@@ -3487,9 +3487,8 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3487 page = extent_buffer_page(eb, i); 3487 page = extent_buffer_page(eb, i);
3488 3488
3489 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3489 cur = min(len, (PAGE_CACHE_SIZE - offset));
3490 kaddr = kmap_atomic(page, KM_USER1); 3490 kaddr = page_address(page);
3491 memcpy(dst, kaddr + offset, cur); 3491 memcpy(dst, kaddr + offset, cur);
3492 kunmap_atomic(kaddr, KM_USER1);
3493 3492
3494 dst += cur; 3493 dst += cur;
3495 len -= cur; 3494 len -= cur;
@@ -3499,9 +3498,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3499} 3498}
3500 3499
3501int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 3500int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3502 unsigned long min_len, char **token, char **map, 3501 unsigned long min_len, char **map,
3503 unsigned long *map_start, 3502 unsigned long *map_start,
3504 unsigned long *map_len, int km) 3503 unsigned long *map_len)
3505{ 3504{
3506 size_t offset = start & (PAGE_CACHE_SIZE - 1); 3505 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3507 char *kaddr; 3506 char *kaddr;
@@ -3531,42 +3530,12 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3531 } 3530 }
3532 3531
3533 p = extent_buffer_page(eb, i); 3532 p = extent_buffer_page(eb, i);
3534 kaddr = kmap_atomic(p, km); 3533 kaddr = page_address(p);
3535 *token = kaddr;
3536 *map = kaddr + offset; 3534 *map = kaddr + offset;
3537 *map_len = PAGE_CACHE_SIZE - offset; 3535 *map_len = PAGE_CACHE_SIZE - offset;
3538 return 0; 3536 return 0;
3539} 3537}
3540 3538
3541int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3542 unsigned long min_len,
3543 char **token, char **map,
3544 unsigned long *map_start,
3545 unsigned long *map_len, int km)
3546{
3547 int err;
3548 int save = 0;
3549 if (eb->map_token) {
3550 unmap_extent_buffer(eb, eb->map_token, km);
3551 eb->map_token = NULL;
3552 save = 1;
3553 }
3554 err = map_private_extent_buffer(eb, start, min_len, token, map,
3555 map_start, map_len, km);
3556 if (!err && save) {
3557 eb->map_token = *token;
3558 eb->kaddr = *map;
3559 eb->map_start = *map_start;
3560 eb->map_len = *map_len;
3561 }
3562 return err;
3563}
3564
3565void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3566{
3567 kunmap_atomic(token, km);
3568}
3569
3570int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, 3539int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3571 unsigned long start, 3540 unsigned long start,
3572 unsigned long len) 3541 unsigned long len)
@@ -3590,9 +3559,8 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3590 3559
3591 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3560 cur = min(len, (PAGE_CACHE_SIZE - offset));
3592 3561
3593 kaddr = kmap_atomic(page, KM_USER0); 3562 kaddr = page_address(page);
3594 ret = memcmp(ptr, kaddr + offset, cur); 3563 ret = memcmp(ptr, kaddr + offset, cur);
3595 kunmap_atomic(kaddr, KM_USER0);
3596 if (ret) 3564 if (ret)
3597 break; 3565 break;
3598 3566
@@ -3625,9 +3593,8 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3625 WARN_ON(!PageUptodate(page)); 3593 WARN_ON(!PageUptodate(page));
3626 3594
3627 cur = min(len, PAGE_CACHE_SIZE - offset); 3595 cur = min(len, PAGE_CACHE_SIZE - offset);
3628 kaddr = kmap_atomic(page, KM_USER1); 3596 kaddr = page_address(page);
3629 memcpy(kaddr + offset, src, cur); 3597 memcpy(kaddr + offset, src, cur);
3630 kunmap_atomic(kaddr, KM_USER1);
3631 3598
3632 src += cur; 3599 src += cur;
3633 len -= cur; 3600 len -= cur;
@@ -3656,9 +3623,8 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
3656 WARN_ON(!PageUptodate(page)); 3623 WARN_ON(!PageUptodate(page));
3657 3624
3658 cur = min(len, PAGE_CACHE_SIZE - offset); 3625 cur = min(len, PAGE_CACHE_SIZE - offset);
3659 kaddr = kmap_atomic(page, KM_USER0); 3626 kaddr = page_address(page);
3660 memset(kaddr + offset, c, cur); 3627 memset(kaddr + offset, c, cur);
3661 kunmap_atomic(kaddr, KM_USER0);
3662 3628
3663 len -= cur; 3629 len -= cur;
3664 offset = 0; 3630 offset = 0;
@@ -3689,9 +3655,8 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3689 3655
3690 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); 3656 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3691 3657
3692 kaddr = kmap_atomic(page, KM_USER0); 3658 kaddr = page_address(page);
3693 read_extent_buffer(src, kaddr + offset, src_offset, cur); 3659 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3694 kunmap_atomic(kaddr, KM_USER0);
3695 3660
3696 src_offset += cur; 3661 src_offset += cur;
3697 len -= cur; 3662 len -= cur;
@@ -3704,20 +3669,17 @@ static void move_pages(struct page *dst_page, struct page *src_page,
3704 unsigned long dst_off, unsigned long src_off, 3669 unsigned long dst_off, unsigned long src_off,
3705 unsigned long len) 3670 unsigned long len)
3706{ 3671{
3707 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3672 char *dst_kaddr = page_address(dst_page);
3708 if (dst_page == src_page) { 3673 if (dst_page == src_page) {
3709 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); 3674 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3710 } else { 3675 } else {
3711 char *src_kaddr = kmap_atomic(src_page, KM_USER1); 3676 char *src_kaddr = page_address(src_page);
3712 char *p = dst_kaddr + dst_off + len; 3677 char *p = dst_kaddr + dst_off + len;
3713 char *s = src_kaddr + src_off + len; 3678 char *s = src_kaddr + src_off + len;
3714 3679
3715 while (len--) 3680 while (len--)
3716 *--p = *--s; 3681 *--p = *--s;
3717
3718 kunmap_atomic(src_kaddr, KM_USER1);
3719 } 3682 }
3720 kunmap_atomic(dst_kaddr, KM_USER0);
3721} 3683}
3722 3684
3723static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) 3685static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
@@ -3730,20 +3692,17 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
3730 unsigned long dst_off, unsigned long src_off, 3692 unsigned long dst_off, unsigned long src_off,
3731 unsigned long len) 3693 unsigned long len)
3732{ 3694{
3733 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3695 char *dst_kaddr = page_address(dst_page);
3734 char *src_kaddr; 3696 char *src_kaddr;
3735 3697
3736 if (dst_page != src_page) { 3698 if (dst_page != src_page) {
3737 src_kaddr = kmap_atomic(src_page, KM_USER1); 3699 src_kaddr = page_address(src_page);
3738 } else { 3700 } else {
3739 src_kaddr = dst_kaddr; 3701 src_kaddr = dst_kaddr;
3740 BUG_ON(areas_overlap(src_off, dst_off, len)); 3702 BUG_ON(areas_overlap(src_off, dst_off, len));
3741 } 3703 }
3742 3704
3743 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 3705 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3744 kunmap_atomic(dst_kaddr, KM_USER0);
3745 if (dst_page != src_page)
3746 kunmap_atomic(src_kaddr, KM_USER1);
3747} 3706}
3748 3707
3749void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 3708void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,