aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2012-03-07 16:20:05 -0500
committerChris Mason <chris.mason@oracle.com>2012-03-26 16:51:07 -0400
commit4f2de97acee6532b36dd6e995b858343771ad126 (patch)
tree240ee16e76f2f2e4850e6fe2e90174e7cd794a9b /fs
parent727011e07cbdf87772fcc1999cccd15cc915eb62 (diff)
Btrfs: set page->private to the eb
We spend a lot of time looking up extent buffers from pages when we could just store the pointer to the eb the page is associated with in page->private. This patch does just that, and it makes things a little simpler and reduces a bit of CPU overhead involved with doing metadata IO. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/disk-io.c91
-rw-r--r--fs/btrfs/extent_io.c92
-rw-r--r--fs/btrfs/extent_io.h1
3 files changed, 91 insertions, 93 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 68fc93e18db8..bc88649cffb7 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -403,39 +403,28 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
403 struct extent_io_tree *tree; 403 struct extent_io_tree *tree;
404 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 404 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
405 u64 found_start; 405 u64 found_start;
406 unsigned long len;
407 struct extent_buffer *eb; 406 struct extent_buffer *eb;
408 407
409 tree = &BTRFS_I(page->mapping->host)->io_tree; 408 tree = &BTRFS_I(page->mapping->host)->io_tree;
410 409
411 if (page->private == EXTENT_PAGE_PRIVATE) 410 eb = (struct extent_buffer *)page->private;
412 goto out; 411 if (page != eb->pages[0])
413 if (!page->private) { 412 return 0;
414 WARN_ON(1);
415 goto out;
416 }
417 len = page->private >> 2;
418 WARN_ON(len == 0);
419
420 eb = find_extent_buffer(tree, start, len);
421 413
422 found_start = btrfs_header_bytenr(eb); 414 found_start = btrfs_header_bytenr(eb);
423 if (found_start != start) { 415 if (found_start != start) {
424 WARN_ON(1); 416 WARN_ON(1);
425 goto err; 417 return 0;
426 } 418 }
427 if (eb->pages[0] != page) { 419 if (eb->pages[0] != page) {
428 WARN_ON(1); 420 WARN_ON(1);
429 goto err; 421 return 0;
430 } 422 }
431 if (!PageUptodate(page)) { 423 if (!PageUptodate(page)) {
432 WARN_ON(1); 424 WARN_ON(1);
433 goto err; 425 return 0;
434 } 426 }
435 csum_tree_block(root, eb, 0); 427 csum_tree_block(root, eb, 0);
436err:
437 free_extent_buffer(eb);
438out:
439 return 0; 428 return 0;
440} 429}
441 430
@@ -566,7 +555,6 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
566 struct extent_io_tree *tree; 555 struct extent_io_tree *tree;
567 u64 found_start; 556 u64 found_start;
568 int found_level; 557 int found_level;
569 unsigned long len;
570 struct extent_buffer *eb; 558 struct extent_buffer *eb;
571 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 559 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
572 int ret = 0; 560 int ret = 0;
@@ -576,13 +564,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
576 goto out; 564 goto out;
577 565
578 tree = &BTRFS_I(page->mapping->host)->io_tree; 566 tree = &BTRFS_I(page->mapping->host)->io_tree;
579 len = page->private >> 2; 567 eb = (struct extent_buffer *)page->private;
580 568
581 eb = find_eb_for_page(tree, page, max(root->leafsize, root->nodesize));
582 if (!eb) {
583 ret = -EIO;
584 goto out;
585 }
586 reads_done = atomic_dec_and_test(&eb->pages_reading); 569 reads_done = atomic_dec_and_test(&eb->pages_reading);
587 if (!reads_done) 570 if (!reads_done)
588 goto err; 571 goto err;
@@ -631,7 +614,6 @@ err:
631 614
632 if (ret && eb) 615 if (ret && eb)
633 clear_extent_buffer_uptodate(tree, eb, NULL); 616 clear_extent_buffer_uptodate(tree, eb, NULL);
634 free_extent_buffer(eb);
635out: 617out:
636 return ret; 618 return ret;
637} 619}
@@ -640,31 +622,17 @@ static int btree_io_failed_hook(struct bio *failed_bio,
640 struct page *page, u64 start, u64 end, 622 struct page *page, u64 start, u64 end,
641 int mirror_num, struct extent_state *state) 623 int mirror_num, struct extent_state *state)
642{ 624{
643 struct extent_io_tree *tree;
644 unsigned long len;
645 struct extent_buffer *eb; 625 struct extent_buffer *eb;
646 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 626 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
647 627
648 tree = &BTRFS_I(page->mapping->host)->io_tree; 628 eb = (struct extent_buffer *)page->private;
649 if (page->private == EXTENT_PAGE_PRIVATE) 629 if (page != eb->pages[0])
650 goto out; 630 return -EIO;
651 if (!page->private)
652 goto out;
653
654 len = page->private >> 2;
655 WARN_ON(len == 0);
656
657 eb = alloc_extent_buffer(tree, start, len);
658 if (eb == NULL)
659 goto out;
660 631
661 if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) { 632 if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
662 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags); 633 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
663 btree_readahead_hook(root, eb, eb->start, -EIO); 634 btree_readahead_hook(root, eb, eb->start, -EIO);
664 } 635 }
665 free_extent_buffer(eb);
666
667out:
668 return -EIO; /* we fixed nothing */ 636 return -EIO; /* we fixed nothing */
669} 637}
670 638
@@ -955,10 +923,8 @@ static int btree_readpage(struct file *file, struct page *page)
955 923
956static int btree_releasepage(struct page *page, gfp_t gfp_flags) 924static int btree_releasepage(struct page *page, gfp_t gfp_flags)
957{ 925{
958 struct extent_io_tree *tree;
959 struct extent_map_tree *map; 926 struct extent_map_tree *map;
960 struct extent_buffer *eb; 927 struct extent_io_tree *tree;
961 struct btrfs_root *root;
962 int ret; 928 int ret;
963 929
964 if (PageWriteback(page) || PageDirty(page)) 930 if (PageWriteback(page) || PageDirty(page))
@@ -967,13 +933,6 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
967 tree = &BTRFS_I(page->mapping->host)->io_tree; 933 tree = &BTRFS_I(page->mapping->host)->io_tree;
968 map = &BTRFS_I(page->mapping->host)->extent_tree; 934 map = &BTRFS_I(page->mapping->host)->extent_tree;
969 935
970 root = BTRFS_I(page->mapping->host)->root;
971 if (page->private == EXTENT_PAGE_PRIVATE) {
972 eb = find_eb_for_page(tree, page, max(root->leafsize, root->nodesize));
973 free_extent_buffer(eb);
974 if (eb)
975 return 0;
976 }
977 /* 936 /*
978 * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing 937 * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
979 * slab allocation from alloc_extent_state down the callchain where 938 * slab allocation from alloc_extent_state down the callchain where
@@ -985,14 +944,7 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
985 if (!ret) 944 if (!ret)
986 return 0; 945 return 0;
987 946
988 ret = try_release_extent_buffer(tree, page); 947 return try_release_extent_buffer(tree, page);
989 if (ret == 1) {
990 ClearPagePrivate(page);
991 set_page_private(page, 0);
992 page_cache_release(page);
993 }
994
995 return ret;
996} 948}
997 949
998static void btree_invalidatepage(struct page *page, unsigned long offset) 950static void btree_invalidatepage(struct page *page, unsigned long offset)
@@ -3219,17 +3171,21 @@ static int btree_lock_page_hook(struct page *page, void *data,
3219{ 3171{
3220 struct inode *inode = page->mapping->host; 3172 struct inode *inode = page->mapping->host;
3221 struct btrfs_root *root = BTRFS_I(inode)->root; 3173 struct btrfs_root *root = BTRFS_I(inode)->root;
3222 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3223 struct extent_buffer *eb; 3174 struct extent_buffer *eb;
3224 unsigned long len;
3225 u64 bytenr = page_offset(page);
3226 3175
3227 if (page->private == EXTENT_PAGE_PRIVATE) 3176 /*
3177 * We culled this eb but the page is still hanging out on the mapping,
3178 * carry on.
3179 */
3180 if (!PagePrivate(page))
3228 goto out; 3181 goto out;
3229 3182
3230 len = page->private >> 2; 3183 eb = (struct extent_buffer *)page->private;
3231 eb = find_extent_buffer(io_tree, bytenr, len); 3184 if (!eb) {
3232 if (!eb) 3185 WARN_ON(1);
3186 goto out;
3187 }
3188 if (page != eb->pages[0])
3233 goto out; 3189 goto out;
3234 3190
3235 if (!btrfs_try_tree_write_lock(eb)) { 3191 if (!btrfs_try_tree_write_lock(eb)) {
@@ -3248,7 +3204,6 @@ static int btree_lock_page_hook(struct page *page, void *data,
3248 } 3204 }
3249 3205
3250 btrfs_tree_unlock(eb); 3206 btrfs_tree_unlock(eb);
3251 free_extent_buffer(eb);
3252out: 3207out:
3253 if (!trylock_page(page)) { 3208 if (!trylock_page(page)) {
3254 flush_fn(data); 3209 flush_fn(data);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c6c9ce463c86..0381b6007ae4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2473,19 +2473,24 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2473 return ret; 2473 return ret;
2474} 2474}
2475 2475
2476void set_page_extent_mapped(struct page *page) 2476void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2477{ 2477{
2478 if (!PagePrivate(page)) { 2478 if (!PagePrivate(page)) {
2479 SetPagePrivate(page); 2479 SetPagePrivate(page);
2480 page_cache_get(page); 2480 page_cache_get(page);
2481 set_page_private(page, EXTENT_PAGE_PRIVATE); 2481 set_page_private(page, (unsigned long)eb);
2482 } else {
2483 WARN_ON(page->private != (unsigned long)eb);
2482 } 2484 }
2483} 2485}
2484 2486
2485static void set_page_extent_head(struct page *page, unsigned long len) 2487void set_page_extent_mapped(struct page *page)
2486{ 2488{
2487 WARN_ON(!PagePrivate(page)); 2489 if (!PagePrivate(page)) {
2488 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); 2490 SetPagePrivate(page);
2491 page_cache_get(page);
2492 set_page_private(page, EXTENT_PAGE_PRIVATE);
2493 }
2489} 2494}
2490 2495
2491/* 2496/*
@@ -3585,6 +3590,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3585 return NULL; 3590 return NULL;
3586 eb->start = start; 3591 eb->start = start;
3587 eb->len = len; 3592 eb->len = len;
3593 eb->tree = tree;
3588 rwlock_init(&eb->lock); 3594 rwlock_init(&eb->lock);
3589 atomic_set(&eb->write_locks, 0); 3595 atomic_set(&eb->write_locks, 0);
3590 atomic_set(&eb->read_locks, 0); 3596 atomic_set(&eb->read_locks, 0);
@@ -3637,8 +3643,31 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3637 do { 3643 do {
3638 index--; 3644 index--;
3639 page = extent_buffer_page(eb, index); 3645 page = extent_buffer_page(eb, index);
3640 if (page) 3646 if (page) {
3647 spin_lock(&page->mapping->private_lock);
3648 /*
3649 * We do this since we'll remove the pages after we've
3650 * removed the eb from the radix tree, so we could race
3651 * and have this page now attached to the new eb. So
3652 * only clear page_private if it's still connected to
3653 * this eb.
3654 */
3655 if (PagePrivate(page) &&
3656 page->private == (unsigned long)eb) {
3657 /*
3658 * We need to make sure we haven't be attached
3659 * to a new eb.
3660 */
3661 ClearPagePrivate(page);
3662 set_page_private(page, 0);
3663 /* One for the page private */
3664 page_cache_release(page);
3665 }
3666 spin_unlock(&page->mapping->private_lock);
3667
3668 /* One for when we alloced the page */
3641 page_cache_release(page); 3669 page_cache_release(page);
3670 }
3642 } while (index != start_idx); 3671 } while (index != start_idx);
3643} 3672}
3644 3673
@@ -3683,6 +3712,32 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3683 WARN_ON(1); 3712 WARN_ON(1);
3684 goto free_eb; 3713 goto free_eb;
3685 } 3714 }
3715
3716 spin_lock(&mapping->private_lock);
3717 if (PagePrivate(p)) {
3718 /*
3719 * We could have already allocated an eb for this page
3720 * and attached one so lets see if we can get a ref on
3721 * the existing eb, and if we can we know it's good and
3722 * we can just return that one, else we know we can just
3723 * overwrite page->private.
3724 */
3725 exists = (struct extent_buffer *)p->private;
3726 if (atomic_inc_not_zero(&exists->refs)) {
3727 spin_unlock(&mapping->private_lock);
3728 unlock_page(p);
3729 goto free_eb;
3730 }
3731
3732 /*
3733 * Do this so attach doesn't complain and we need to
3734 * drop the ref the old guy had.
3735 */
3736 ClearPagePrivate(p);
3737 page_cache_release(p);
3738 }
3739 attach_extent_buffer_page(eb, p);
3740 spin_unlock(&mapping->private_lock);
3686 mark_page_accessed(p); 3741 mark_page_accessed(p);
3687 eb->pages[i] = p; 3742 eb->pages[i] = p;
3688 if (!PageUptodate(p)) 3743 if (!PageUptodate(p))
@@ -3705,7 +3760,6 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3705 if (ret == -EEXIST) { 3760 if (ret == -EEXIST) {
3706 exists = radix_tree_lookup(&tree->buffer, 3761 exists = radix_tree_lookup(&tree->buffer,
3707 start >> PAGE_CACHE_SHIFT); 3762 start >> PAGE_CACHE_SHIFT);
3708 /* add one reference for the caller */
3709 atomic_inc(&exists->refs); 3763 atomic_inc(&exists->refs);
3710 spin_unlock(&tree->buffer_lock); 3764 spin_unlock(&tree->buffer_lock);
3711 radix_tree_preload_end(); 3765 radix_tree_preload_end();
@@ -3725,12 +3779,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3725 * after the extent buffer is in the radix tree so 3779 * after the extent buffer is in the radix tree so
3726 * it doesn't get lost 3780 * it doesn't get lost
3727 */ 3781 */
3728 set_page_extent_mapped(eb->pages[0]);
3729 set_page_extent_head(eb->pages[0], eb->len);
3730 SetPageChecked(eb->pages[0]); 3782 SetPageChecked(eb->pages[0]);
3731 for (i = 1; i < num_pages; i++) { 3783 for (i = 1; i < num_pages; i++) {
3732 p = extent_buffer_page(eb, i); 3784 p = extent_buffer_page(eb, i);
3733 set_page_extent_mapped(p);
3734 ClearPageChecked(p); 3785 ClearPageChecked(p);
3735 unlock_page(p); 3786 unlock_page(p);
3736 } 3787 }
@@ -3794,10 +3845,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3794 lock_page(page); 3845 lock_page(page);
3795 WARN_ON(!PagePrivate(page)); 3846 WARN_ON(!PagePrivate(page));
3796 3847
3797 set_page_extent_mapped(page);
3798 if (i == 0)
3799 set_page_extent_head(page, eb->len);
3800
3801 clear_page_dirty_for_io(page); 3848 clear_page_dirty_for_io(page);
3802 spin_lock_irq(&page->mapping->tree_lock); 3849 spin_lock_irq(&page->mapping->tree_lock);
3803 if (!PageDirty(page)) { 3850 if (!PageDirty(page)) {
@@ -4010,9 +4057,6 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4010 atomic_set(&eb->pages_reading, num_reads); 4057 atomic_set(&eb->pages_reading, num_reads);
4011 for (i = start_i; i < num_pages; i++) { 4058 for (i = start_i; i < num_pages; i++) {
4012 page = extent_buffer_page(eb, i); 4059 page = extent_buffer_page(eb, i);
4013 set_page_extent_mapped(page);
4014 if (i == 0)
4015 set_page_extent_head(page, eb->len);
4016 if (!PageUptodate(page)) { 4060 if (!PageUptodate(page)) {
4017 ClearPageError(page); 4061 ClearPageError(page);
4018 err = __extent_read_full_page(tree, page, 4062 err = __extent_read_full_page(tree, page,
@@ -4395,22 +4439,19 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4395 struct extent_buffer *eb = 4439 struct extent_buffer *eb =
4396 container_of(head, struct extent_buffer, rcu_head); 4440 container_of(head, struct extent_buffer, rcu_head);
4397 4441
4398 btrfs_release_extent_buffer(eb); 4442 __free_extent_buffer(eb);
4399} 4443}
4400 4444
4401int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) 4445int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4402{ 4446{
4403 u64 start = page_offset(page); 4447 u64 start = page_offset(page);
4404 struct extent_buffer *eb; 4448 struct extent_buffer *eb = (struct extent_buffer *)page->private;
4405 int ret = 1; 4449 int ret = 1;
4406 4450
4407 spin_lock(&tree->buffer_lock); 4451 if (!PagePrivate(page) || !eb)
4408 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 4452 return 1;
4409 if (!eb) {
4410 spin_unlock(&tree->buffer_lock);
4411 return ret;
4412 }
4413 4453
4454 spin_lock(&tree->buffer_lock);
4414 if (atomic_read(&eb->refs) > 1 || 4455 if (atomic_read(&eb->refs) > 1 ||
4415 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { 4456 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4416 ret = 0; 4457 ret = 0;
@@ -4426,6 +4467,7 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4426 goto out; 4467 goto out;
4427 } 4468 }
4428 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT); 4469 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4470 btrfs_release_extent_buffer_page(eb, 0);
4429out: 4471out:
4430 spin_unlock(&tree->buffer_lock); 4472 spin_unlock(&tree->buffer_lock);
4431 4473
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4e38a3d9631a..83e432da2e26 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -127,6 +127,7 @@ struct extent_buffer {
127 unsigned long map_start; 127 unsigned long map_start;
128 unsigned long map_len; 128 unsigned long map_len;
129 unsigned long bflags; 129 unsigned long bflags;
130 struct extent_io_tree *tree;
130 atomic_t refs; 131 atomic_t refs;
131 atomic_t pages_reading; 132 atomic_t pages_reading;
132 struct list_head leak_list; 133 struct list_head leak_list;