diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 48 |
1 files changed, 44 insertions, 4 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5e76a474cb7e..92ac5192c518 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1946,6 +1946,7 @@ void set_page_extent_mapped(struct page *page) | |||
1946 | 1946 | ||
1947 | static void set_page_extent_head(struct page *page, unsigned long len) | 1947 | static void set_page_extent_head(struct page *page, unsigned long len) |
1948 | { | 1948 | { |
1949 | WARN_ON(!PagePrivate(page)); | ||
1949 | set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); | 1950 | set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); |
1950 | } | 1951 | } |
1951 | 1952 | ||
@@ -2821,9 +2822,17 @@ int try_release_extent_state(struct extent_map_tree *map, | |||
2821 | * at this point we can safely clear everything except the | 2822 | * at this point we can safely clear everything except the |
2822 | * locked bit and the nodatasum bit | 2823 | * locked bit and the nodatasum bit |
2823 | */ | 2824 | */ |
2824 | clear_extent_bit(tree, start, end, | 2825 | ret = clear_extent_bit(tree, start, end, |
2825 | ~(EXTENT_LOCKED | EXTENT_NODATASUM), | 2826 | ~(EXTENT_LOCKED | EXTENT_NODATASUM), |
2826 | 0, 0, NULL, mask); | 2827 | 0, 0, NULL, mask); |
2828 | |||
2829 | /* if clear_extent_bit failed for enomem reasons, | ||
2830 | * we can't allow the release to continue. | ||
2831 | */ | ||
2832 | if (ret < 0) | ||
2833 | ret = 0; | ||
2834 | else | ||
2835 | ret = 1; | ||
2827 | } | 2836 | } |
2828 | return ret; | 2837 | return ret; |
2829 | } | 2838 | } |
@@ -3194,7 +3203,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3194 | } | 3203 | } |
3195 | if (!PageUptodate(p)) | 3204 | if (!PageUptodate(p)) |
3196 | uptodate = 0; | 3205 | uptodate = 0; |
3197 | unlock_page(p); | 3206 | |
3207 | /* | ||
3208 | * see below about how we avoid a nasty race with release page | ||
3209 | * and why we unlock later | ||
3210 | */ | ||
3211 | if (i != 0) | ||
3212 | unlock_page(p); | ||
3198 | } | 3213 | } |
3199 | if (uptodate) | 3214 | if (uptodate) |
3200 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 3215 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
@@ -3218,9 +3233,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3218 | atomic_inc(&eb->refs); | 3233 | atomic_inc(&eb->refs); |
3219 | spin_unlock(&tree->buffer_lock); | 3234 | spin_unlock(&tree->buffer_lock); |
3220 | radix_tree_preload_end(); | 3235 | radix_tree_preload_end(); |
3236 | |||
3237 | /* | ||
3238 | * there is a race where release page may have | ||
3239 | * tried to find this extent buffer in the radix | ||
3240 | * but failed. It will tell the VM it is safe to | ||
3241 | * reclaim the, and it will clear the page private bit. | ||
3242 | * We must make sure to set the page private bit properly | ||
3243 | * after the extent buffer is in the radix tree so | ||
3244 | * it doesn't get lost | ||
3245 | */ | ||
3246 | set_page_extent_mapped(eb->first_page); | ||
3247 | set_page_extent_head(eb->first_page, eb->len); | ||
3248 | if (!page0) | ||
3249 | unlock_page(eb->first_page); | ||
3221 | return eb; | 3250 | return eb; |
3222 | 3251 | ||
3223 | free_eb: | 3252 | free_eb: |
3253 | if (eb->first_page && !page0) | ||
3254 | unlock_page(eb->first_page); | ||
3255 | |||
3224 | if (!atomic_dec_and_test(&eb->refs)) | 3256 | if (!atomic_dec_and_test(&eb->refs)) |
3225 | return exists; | 3257 | return exists; |
3226 | btrfs_release_extent_buffer(eb); | 3258 | btrfs_release_extent_buffer(eb); |
@@ -3271,10 +3303,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3271 | continue; | 3303 | continue; |
3272 | 3304 | ||
3273 | lock_page(page); | 3305 | lock_page(page); |
3306 | WARN_ON(!PagePrivate(page)); | ||
3307 | |||
3308 | set_page_extent_mapped(page); | ||
3274 | if (i == 0) | 3309 | if (i == 0) |
3275 | set_page_extent_head(page, eb->len); | 3310 | set_page_extent_head(page, eb->len); |
3276 | else | ||
3277 | set_page_private(page, EXTENT_PAGE_PRIVATE); | ||
3278 | 3311 | ||
3279 | clear_page_dirty_for_io(page); | 3312 | clear_page_dirty_for_io(page); |
3280 | spin_lock_irq(&page->mapping->tree_lock); | 3313 | spin_lock_irq(&page->mapping->tree_lock); |
@@ -3464,6 +3497,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
3464 | 3497 | ||
3465 | for (i = start_i; i < num_pages; i++) { | 3498 | for (i = start_i; i < num_pages; i++) { |
3466 | page = extent_buffer_page(eb, i); | 3499 | page = extent_buffer_page(eb, i); |
3500 | |||
3501 | WARN_ON(!PagePrivate(page)); | ||
3502 | |||
3503 | set_page_extent_mapped(page); | ||
3504 | if (i == 0) | ||
3505 | set_page_extent_head(page, eb->len); | ||
3506 | |||
3467 | if (inc_all_pages) | 3507 | if (inc_all_pages) |
3468 | page_cache_get(page); | 3508 | page_cache_get(page); |
3469 | if (!PageUptodate(page)) { | 3509 | if (!PageUptodate(page)) { |