diff options
author | Chris Mason <chris.mason@oracle.com> | 2011-02-10 12:35:00 -0500 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2011-02-14 13:03:52 -0500 |
commit | eb14ab8ed24a0405fd056068b28c33a1cd846024 (patch) | |
tree | 1451cad453a3ff66bc5369dc83daf7e3a50c4897 /fs/btrfs/extent_io.c | |
parent | 3a90983dbdcb2f4f48c0d771d8e5b4d88f27fae6 (diff) |
Btrfs: fix page->private races
There is a race where btrfs_releasepage can drop the
page->private contents just as alloc_extent_buffer is setting
up pages for metadata. Because of how the Btrfs page flags work,
this results in us skipping the crc on the page during IO.
This patch sovles the race by waiting until after the extent buffer
is inserted into the radix tree before it sets page private.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 38 |
1 files changed, 35 insertions, 3 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8862dda46ff6..0418bf2c9757 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1946,6 +1946,7 @@ void set_page_extent_mapped(struct page *page) | |||
1946 | 1946 | ||
1947 | static void set_page_extent_head(struct page *page, unsigned long len) | 1947 | static void set_page_extent_head(struct page *page, unsigned long len) |
1948 | { | 1948 | { |
1949 | WARN_ON(!PagePrivate(page)); | ||
1949 | set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); | 1950 | set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); |
1950 | } | 1951 | } |
1951 | 1952 | ||
@@ -3195,7 +3196,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3195 | } | 3196 | } |
3196 | if (!PageUptodate(p)) | 3197 | if (!PageUptodate(p)) |
3197 | uptodate = 0; | 3198 | uptodate = 0; |
3198 | unlock_page(p); | 3199 | |
3200 | /* | ||
3201 | * see below about how we avoid a nasty race with release page | ||
3202 | * and why we unlock later | ||
3203 | */ | ||
3204 | if (i != 0) | ||
3205 | unlock_page(p); | ||
3199 | } | 3206 | } |
3200 | if (uptodate) | 3207 | if (uptodate) |
3201 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 3208 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
@@ -3219,9 +3226,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3219 | atomic_inc(&eb->refs); | 3226 | atomic_inc(&eb->refs); |
3220 | spin_unlock(&tree->buffer_lock); | 3227 | spin_unlock(&tree->buffer_lock); |
3221 | radix_tree_preload_end(); | 3228 | radix_tree_preload_end(); |
3229 | |||
3230 | /* | ||
3231 | * there is a race where release page may have | ||
3232 | * tried to find this extent buffer in the radix | ||
3233 | * but failed. It will tell the VM it is safe to | ||
3234 | * reclaim the, and it will clear the page private bit. | ||
3235 | * We must make sure to set the page private bit properly | ||
3236 | * after the extent buffer is in the radix tree so | ||
3237 | * it doesn't get lost | ||
3238 | */ | ||
3239 | set_page_extent_mapped(eb->first_page); | ||
3240 | set_page_extent_head(eb->first_page, eb->len); | ||
3241 | if (!page0) | ||
3242 | unlock_page(eb->first_page); | ||
3222 | return eb; | 3243 | return eb; |
3223 | 3244 | ||
3224 | free_eb: | 3245 | free_eb: |
3246 | if (eb->first_page && !page0) | ||
3247 | unlock_page(eb->first_page); | ||
3248 | |||
3225 | if (!atomic_dec_and_test(&eb->refs)) | 3249 | if (!atomic_dec_and_test(&eb->refs)) |
3226 | return exists; | 3250 | return exists; |
3227 | btrfs_release_extent_buffer(eb); | 3251 | btrfs_release_extent_buffer(eb); |
@@ -3272,10 +3296,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3272 | continue; | 3296 | continue; |
3273 | 3297 | ||
3274 | lock_page(page); | 3298 | lock_page(page); |
3299 | WARN_ON(!PagePrivate(page)); | ||
3300 | |||
3301 | set_page_extent_mapped(page); | ||
3275 | if (i == 0) | 3302 | if (i == 0) |
3276 | set_page_extent_head(page, eb->len); | 3303 | set_page_extent_head(page, eb->len); |
3277 | else | ||
3278 | set_page_private(page, EXTENT_PAGE_PRIVATE); | ||
3279 | 3304 | ||
3280 | clear_page_dirty_for_io(page); | 3305 | clear_page_dirty_for_io(page); |
3281 | spin_lock_irq(&page->mapping->tree_lock); | 3306 | spin_lock_irq(&page->mapping->tree_lock); |
@@ -3465,6 +3490,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
3465 | 3490 | ||
3466 | for (i = start_i; i < num_pages; i++) { | 3491 | for (i = start_i; i < num_pages; i++) { |
3467 | page = extent_buffer_page(eb, i); | 3492 | page = extent_buffer_page(eb, i); |
3493 | |||
3494 | WARN_ON(!PagePrivate(page)); | ||
3495 | |||
3496 | set_page_extent_mapped(page); | ||
3497 | if (i == 0) | ||
3498 | set_page_extent_head(page, eb->len); | ||
3499 | |||
3468 | if (inc_all_pages) | 3500 | if (inc_all_pages) |
3469 | page_cache_get(page); | 3501 | page_cache_get(page); |
3470 | if (!PageUptodate(page)) { | 3502 | if (!PageUptodate(page)) { |