diff options
author | Chris Mason <chris.mason@oracle.com> | 2009-03-13 11:00:37 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2009-03-24 16:14:28 -0400 |
commit | b9473439d3e84d9fc1a0a83faca69cc1b7566341 (patch) | |
tree | bef8321b80589026b617d61d0fabaf545d459269 /fs/btrfs/extent_io.c | |
parent | 89573b9c516b24af8a3b9958dd5afca8fa874e3d (diff) |
Btrfs: leave btree locks spinning more often
btrfs_mark_buffer dirty would set dirty bits in the extent_io tree
for the buffers it was dirtying. This may require a kmalloc and it
was not atomic. So, anyone who called btrfs_mark_buffer_dirty had to
set any btree locks they were holding to blocking first.
This commit changes dirty tracking for extent buffers to just use a flag
in the extent buffer. Now that we have one and only one extent buffer
per page, this can be safely done without losing dirty bits along the way.
This also introduces a path->leave_spinning flag that callers of
btrfs_search_slot can use to indicate they will properly deal with a
path returned where all the locks are spinning instead of blocking.
Many of the btree search callers now expect spinning paths,
resulting in better btree concurrency overall.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 51 |
1 files changed, 9 insertions, 42 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ebe6b29e6069..08085af089e2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -3124,20 +3124,15 @@ void free_extent_buffer(struct extent_buffer *eb) | |||
3124 | int clear_extent_buffer_dirty(struct extent_io_tree *tree, | 3124 | int clear_extent_buffer_dirty(struct extent_io_tree *tree, |
3125 | struct extent_buffer *eb) | 3125 | struct extent_buffer *eb) |
3126 | { | 3126 | { |
3127 | int set; | ||
3128 | unsigned long i; | 3127 | unsigned long i; |
3129 | unsigned long num_pages; | 3128 | unsigned long num_pages; |
3130 | struct page *page; | 3129 | struct page *page; |
3131 | 3130 | ||
3132 | u64 start = eb->start; | ||
3133 | u64 end = start + eb->len - 1; | ||
3134 | |||
3135 | set = clear_extent_dirty(tree, start, end, GFP_NOFS); | ||
3136 | num_pages = num_extent_pages(eb->start, eb->len); | 3131 | num_pages = num_extent_pages(eb->start, eb->len); |
3137 | 3132 | ||
3138 | for (i = 0; i < num_pages; i++) { | 3133 | for (i = 0; i < num_pages; i++) { |
3139 | page = extent_buffer_page(eb, i); | 3134 | page = extent_buffer_page(eb, i); |
3140 | if (!set && !PageDirty(page)) | 3135 | if (!PageDirty(page)) |
3141 | continue; | 3136 | continue; |
3142 | 3137 | ||
3143 | lock_page(page); | 3138 | lock_page(page); |
@@ -3146,22 +3141,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3146 | else | 3141 | else |
3147 | set_page_private(page, EXTENT_PAGE_PRIVATE); | 3142 | set_page_private(page, EXTENT_PAGE_PRIVATE); |
3148 | 3143 | ||
3149 | /* | ||
3150 | * if we're on the last page or the first page and the | ||
3151 | * block isn't aligned on a page boundary, do extra checks | ||
3152 | * to make sure we don't clean page that is partially dirty | ||
3153 | */ | ||
3154 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || | ||
3155 | ((i == num_pages - 1) && | ||
3156 | ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) { | ||
3157 | start = (u64)page->index << PAGE_CACHE_SHIFT; | ||
3158 | end = start + PAGE_CACHE_SIZE - 1; | ||
3159 | if (test_range_bit(tree, start, end, | ||
3160 | EXTENT_DIRTY, 0)) { | ||
3161 | unlock_page(page); | ||
3162 | continue; | ||
3163 | } | ||
3164 | } | ||
3165 | clear_page_dirty_for_io(page); | 3144 | clear_page_dirty_for_io(page); |
3166 | spin_lock_irq(&page->mapping->tree_lock); | 3145 | spin_lock_irq(&page->mapping->tree_lock); |
3167 | if (!PageDirty(page)) { | 3146 | if (!PageDirty(page)) { |
@@ -3187,29 +3166,13 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3187 | { | 3166 | { |
3188 | unsigned long i; | 3167 | unsigned long i; |
3189 | unsigned long num_pages; | 3168 | unsigned long num_pages; |
3169 | int was_dirty = 0; | ||
3190 | 3170 | ||
3171 | was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); | ||
3191 | num_pages = num_extent_pages(eb->start, eb->len); | 3172 | num_pages = num_extent_pages(eb->start, eb->len); |
3192 | for (i = 0; i < num_pages; i++) { | 3173 | for (i = 0; i < num_pages; i++) |
3193 | struct page *page = extent_buffer_page(eb, i); | ||
3194 | /* writepage may need to do something special for the | ||
3195 | * first page, we have to make sure page->private is | ||
3196 | * properly set. releasepage may drop page->private | ||
3197 | * on us if the page isn't already dirty. | ||
3198 | */ | ||
3199 | lock_page(page); | ||
3200 | if (i == 0) { | ||
3201 | set_page_extent_head(page, eb->len); | ||
3202 | } else if (PagePrivate(page) && | ||
3203 | page->private != EXTENT_PAGE_PRIVATE) { | ||
3204 | set_page_extent_mapped(page); | ||
3205 | } | ||
3206 | __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); | 3174 | __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); |
3207 | set_extent_dirty(tree, page_offset(page), | 3175 | return was_dirty; |
3208 | page_offset(page) + PAGE_CACHE_SIZE - 1, | ||
3209 | GFP_NOFS); | ||
3210 | unlock_page(page); | ||
3211 | } | ||
3212 | return 0; | ||
3213 | } | 3176 | } |
3214 | 3177 | ||
3215 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | 3178 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, |
@@ -3789,6 +3752,10 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) | |||
3789 | ret = 0; | 3752 | ret = 0; |
3790 | goto out; | 3753 | goto out; |
3791 | } | 3754 | } |
3755 | if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { | ||
3756 | ret = 0; | ||
3757 | goto out; | ||
3758 | } | ||
3792 | /* at this point we can safely release the extent buffer */ | 3759 | /* at this point we can safely release the extent buffer */ |
3793 | num_pages = num_extent_pages(eb->start, eb->len); | 3760 | num_pages = num_extent_pages(eb->start, eb->len); |
3794 | for (i = 0; i < num_pages; i++) | 3761 | for (i = 0; i < num_pages; i++) |