diff options
author | Chris Mason <chris.mason@oracle.com> | 2009-09-11 19:07:25 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2009-09-11 19:07:25 -0400 |
commit | 83ebade34bc1a90d0c3f77b87b940f336d075fda (patch) | |
tree | 99b6366c52e6bec88119ae995399c985fc61e900 /fs/btrfs/inode.c | |
parent | 74fca6a42863ffacaf7ba6f1936a9f228950f657 (diff) | |
parent | 93c82d575055f1bd0277acae6f966bebafd80dd5 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r-- | fs/btrfs/inode.c | 112 |
1 files changed, 74 insertions, 38 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 59cba180fe83..941f1b71cd22 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -231,7 +231,8 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, | |||
231 | } | 231 | } |
232 | 232 | ||
233 | ret = btrfs_drop_extents(trans, root, inode, start, | 233 | ret = btrfs_drop_extents(trans, root, inode, start, |
234 | aligned_end, aligned_end, start, &hint_byte); | 234 | aligned_end, aligned_end, start, |
235 | &hint_byte, 1); | ||
235 | BUG_ON(ret); | 236 | BUG_ON(ret); |
236 | 237 | ||
237 | if (isize > actual_end) | 238 | if (isize > actual_end) |
@@ -240,7 +241,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, | |||
240 | inline_len, compressed_size, | 241 | inline_len, compressed_size, |
241 | compressed_pages); | 242 | compressed_pages); |
242 | BUG_ON(ret); | 243 | BUG_ON(ret); |
243 | btrfs_drop_extent_cache(inode, start, aligned_end, 0); | 244 | btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); |
244 | return 0; | 245 | return 0; |
245 | } | 246 | } |
246 | 247 | ||
@@ -425,7 +426,7 @@ again: | |||
425 | extent_clear_unlock_delalloc(inode, | 426 | extent_clear_unlock_delalloc(inode, |
426 | &BTRFS_I(inode)->io_tree, | 427 | &BTRFS_I(inode)->io_tree, |
427 | start, end, NULL, 1, 0, | 428 | start, end, NULL, 1, 0, |
428 | 0, 1, 1, 1); | 429 | 0, 1, 1, 1, 0); |
429 | ret = 0; | 430 | ret = 0; |
430 | goto free_pages_out; | 431 | goto free_pages_out; |
431 | } | 432 | } |
@@ -611,9 +612,9 @@ static noinline int submit_compressed_extents(struct inode *inode, | |||
611 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | 612 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
612 | 613 | ||
613 | while (1) { | 614 | while (1) { |
614 | spin_lock(&em_tree->lock); | 615 | write_lock(&em_tree->lock); |
615 | ret = add_extent_mapping(em_tree, em); | 616 | ret = add_extent_mapping(em_tree, em); |
616 | spin_unlock(&em_tree->lock); | 617 | write_unlock(&em_tree->lock); |
617 | if (ret != -EEXIST) { | 618 | if (ret != -EEXIST) { |
618 | free_extent_map(em); | 619 | free_extent_map(em); |
619 | break; | 620 | break; |
@@ -640,7 +641,7 @@ static noinline int submit_compressed_extents(struct inode *inode, | |||
640 | async_extent->start, | 641 | async_extent->start, |
641 | async_extent->start + | 642 | async_extent->start + |
642 | async_extent->ram_size - 1, | 643 | async_extent->ram_size - 1, |
643 | NULL, 1, 1, 0, 1, 1, 0); | 644 | NULL, 1, 1, 0, 1, 1, 0, 0); |
644 | 645 | ||
645 | ret = btrfs_submit_compressed_write(inode, | 646 | ret = btrfs_submit_compressed_write(inode, |
646 | async_extent->start, | 647 | async_extent->start, |
@@ -713,7 +714,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
713 | extent_clear_unlock_delalloc(inode, | 714 | extent_clear_unlock_delalloc(inode, |
714 | &BTRFS_I(inode)->io_tree, | 715 | &BTRFS_I(inode)->io_tree, |
715 | start, end, NULL, 1, 1, | 716 | start, end, NULL, 1, 1, |
716 | 1, 1, 1, 1); | 717 | 1, 1, 1, 1, 0); |
717 | *nr_written = *nr_written + | 718 | *nr_written = *nr_written + |
718 | (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; | 719 | (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; |
719 | *page_started = 1; | 720 | *page_started = 1; |
@@ -747,9 +748,9 @@ static noinline int cow_file_range(struct inode *inode, | |||
747 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | 748 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
748 | 749 | ||
749 | while (1) { | 750 | while (1) { |
750 | spin_lock(&em_tree->lock); | 751 | write_lock(&em_tree->lock); |
751 | ret = add_extent_mapping(em_tree, em); | 752 | ret = add_extent_mapping(em_tree, em); |
752 | spin_unlock(&em_tree->lock); | 753 | write_unlock(&em_tree->lock); |
753 | if (ret != -EEXIST) { | 754 | if (ret != -EEXIST) { |
754 | free_extent_map(em); | 755 | free_extent_map(em); |
755 | break; | 756 | break; |
@@ -776,11 +777,14 @@ static noinline int cow_file_range(struct inode *inode, | |||
776 | /* we're not doing compressed IO, don't unlock the first | 777 | /* we're not doing compressed IO, don't unlock the first |
777 | * page (which the caller expects to stay locked), don't | 778 | * page (which the caller expects to stay locked), don't |
778 | * clear any dirty bits and don't set any writeback bits | 779 | * clear any dirty bits and don't set any writeback bits |
780 | * | ||
781 | * Do set the Private2 bit so we know this page was properly | ||
782 | * setup for writepage | ||
779 | */ | 783 | */ |
780 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, | 784 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, |
781 | start, start + ram_size - 1, | 785 | start, start + ram_size - 1, |
782 | locked_page, unlock, 1, | 786 | locked_page, unlock, 1, |
783 | 1, 0, 0, 0); | 787 | 1, 0, 0, 0, 1); |
784 | disk_num_bytes -= cur_alloc_size; | 788 | disk_num_bytes -= cur_alloc_size; |
785 | num_bytes -= cur_alloc_size; | 789 | num_bytes -= cur_alloc_size; |
786 | alloc_hint = ins.objectid + ins.offset; | 790 | alloc_hint = ins.objectid + ins.offset; |
@@ -853,7 +857,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |||
853 | int limit = 10 * 1024 * 1042; | 857 | int limit = 10 * 1024 * 1042; |
854 | 858 | ||
855 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | | 859 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | |
856 | EXTENT_DELALLOC, 1, 0, GFP_NOFS); | 860 | EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS); |
857 | while (start < end) { | 861 | while (start < end) { |
858 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); | 862 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
859 | async_cow->inode = inode; | 863 | async_cow->inode = inode; |
@@ -1080,9 +1084,9 @@ out_check: | |||
1080 | em->bdev = root->fs_info->fs_devices->latest_bdev; | 1084 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
1081 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | 1085 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
1082 | while (1) { | 1086 | while (1) { |
1083 | spin_lock(&em_tree->lock); | 1087 | write_lock(&em_tree->lock); |
1084 | ret = add_extent_mapping(em_tree, em); | 1088 | ret = add_extent_mapping(em_tree, em); |
1085 | spin_unlock(&em_tree->lock); | 1089 | write_unlock(&em_tree->lock); |
1086 | if (ret != -EEXIST) { | 1090 | if (ret != -EEXIST) { |
1087 | free_extent_map(em); | 1091 | free_extent_map(em); |
1088 | break; | 1092 | break; |
@@ -1101,7 +1105,7 @@ out_check: | |||
1101 | 1105 | ||
1102 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, | 1106 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, |
1103 | cur_offset, cur_offset + num_bytes - 1, | 1107 | cur_offset, cur_offset + num_bytes - 1, |
1104 | locked_page, 1, 1, 1, 0, 0, 0); | 1108 | locked_page, 1, 1, 1, 0, 0, 0, 1); |
1105 | cur_offset = extent_end; | 1109 | cur_offset = extent_end; |
1106 | if (cur_offset > end) | 1110 | if (cur_offset > end) |
1107 | break; | 1111 | break; |
@@ -1374,10 +1378,8 @@ again: | |||
1374 | lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); | 1378 | lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); |
1375 | 1379 | ||
1376 | /* already ordered? We're done */ | 1380 | /* already ordered? We're done */ |
1377 | if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, | 1381 | if (PagePrivate2(page)) |
1378 | EXTENT_ORDERED, 0)) { | ||
1379 | goto out; | 1382 | goto out; |
1380 | } | ||
1381 | 1383 | ||
1382 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | 1384 | ordered = btrfs_lookup_ordered_extent(inode, page_start); |
1383 | if (ordered) { | 1385 | if (ordered) { |
@@ -1413,11 +1415,9 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) | |||
1413 | struct inode *inode = page->mapping->host; | 1415 | struct inode *inode = page->mapping->host; |
1414 | struct btrfs_writepage_fixup *fixup; | 1416 | struct btrfs_writepage_fixup *fixup; |
1415 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1417 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1416 | int ret; | ||
1417 | 1418 | ||
1418 | ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end, | 1419 | /* this page is properly in the ordered list */ |
1419 | EXTENT_ORDERED, 0); | 1420 | if (TestClearPagePrivate2(page)) |
1420 | if (ret) | ||
1421 | return 0; | 1421 | return 0; |
1422 | 1422 | ||
1423 | if (PageChecked(page)) | 1423 | if (PageChecked(page)) |
@@ -1455,9 +1455,19 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
1455 | BUG_ON(!path); | 1455 | BUG_ON(!path); |
1456 | 1456 | ||
1457 | path->leave_spinning = 1; | 1457 | path->leave_spinning = 1; |
1458 | |||
1459 | /* | ||
1460 | * we may be replacing one extent in the tree with another. | ||
1461 | * The new extent is pinned in the extent map, and we don't want | ||
1462 | * to drop it from the cache until it is completely in the btree. | ||
1463 | * | ||
1464 | * So, tell btrfs_drop_extents to leave this extent in the cache. | ||
1465 | * the caller is expected to unpin it and allow it to be merged | ||
1466 | * with the others. | ||
1467 | */ | ||
1458 | ret = btrfs_drop_extents(trans, root, inode, file_pos, | 1468 | ret = btrfs_drop_extents(trans, root, inode, file_pos, |
1459 | file_pos + num_bytes, locked_end, | 1469 | file_pos + num_bytes, locked_end, |
1460 | file_pos, &hint); | 1470 | file_pos, &hint, 0); |
1461 | BUG_ON(ret); | 1471 | BUG_ON(ret); |
1462 | 1472 | ||
1463 | ins.objectid = inode->i_ino; | 1473 | ins.objectid = inode->i_ino; |
@@ -1485,7 +1495,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
1485 | btrfs_mark_buffer_dirty(leaf); | 1495 | btrfs_mark_buffer_dirty(leaf); |
1486 | 1496 | ||
1487 | inode_add_bytes(inode, num_bytes); | 1497 | inode_add_bytes(inode, num_bytes); |
1488 | btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0); | ||
1489 | 1498 | ||
1490 | ins.objectid = disk_bytenr; | 1499 | ins.objectid = disk_bytenr; |
1491 | ins.offset = disk_num_bytes; | 1500 | ins.offset = disk_num_bytes; |
@@ -1596,6 +1605,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1596 | ordered_extent->len, | 1605 | ordered_extent->len, |
1597 | compressed, 0, 0, | 1606 | compressed, 0, 0, |
1598 | BTRFS_FILE_EXTENT_REG); | 1607 | BTRFS_FILE_EXTENT_REG); |
1608 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, | ||
1609 | ordered_extent->file_offset, | ||
1610 | ordered_extent->len); | ||
1599 | BUG_ON(ret); | 1611 | BUG_ON(ret); |
1600 | } | 1612 | } |
1601 | unlock_extent(io_tree, ordered_extent->file_offset, | 1613 | unlock_extent(io_tree, ordered_extent->file_offset, |
@@ -1623,6 +1635,7 @@ nocow: | |||
1623 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, | 1635 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
1624 | struct extent_state *state, int uptodate) | 1636 | struct extent_state *state, int uptodate) |
1625 | { | 1637 | { |
1638 | ClearPagePrivate2(page); | ||
1626 | return btrfs_finish_ordered_io(page->mapping->host, start, end); | 1639 | return btrfs_finish_ordered_io(page->mapping->host, start, end); |
1627 | } | 1640 | } |
1628 | 1641 | ||
@@ -1669,13 +1682,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, | |||
1669 | failrec->last_mirror = 0; | 1682 | failrec->last_mirror = 0; |
1670 | failrec->bio_flags = 0; | 1683 | failrec->bio_flags = 0; |
1671 | 1684 | ||
1672 | spin_lock(&em_tree->lock); | 1685 | read_lock(&em_tree->lock); |
1673 | em = lookup_extent_mapping(em_tree, start, failrec->len); | 1686 | em = lookup_extent_mapping(em_tree, start, failrec->len); |
1674 | if (em->start > start || em->start + em->len < start) { | 1687 | if (em->start > start || em->start + em->len < start) { |
1675 | free_extent_map(em); | 1688 | free_extent_map(em); |
1676 | em = NULL; | 1689 | em = NULL; |
1677 | } | 1690 | } |
1678 | spin_unlock(&em_tree->lock); | 1691 | read_unlock(&em_tree->lock); |
1679 | 1692 | ||
1680 | if (!em || IS_ERR(em)) { | 1693 | if (!em || IS_ERR(em)) { |
1681 | kfree(failrec); | 1694 | kfree(failrec); |
@@ -1794,7 +1807,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
1794 | return 0; | 1807 | return 0; |
1795 | 1808 | ||
1796 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && | 1809 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && |
1797 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) { | 1810 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { |
1798 | clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, | 1811 | clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, |
1799 | GFP_NOFS); | 1812 | GFP_NOFS); |
1800 | return 0; | 1813 | return 0; |
@@ -2935,7 +2948,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
2935 | cur_offset, | 2948 | cur_offset, |
2936 | cur_offset + hole_size, | 2949 | cur_offset + hole_size, |
2937 | block_end, | 2950 | block_end, |
2938 | cur_offset, &hint_byte); | 2951 | cur_offset, &hint_byte, 1); |
2939 | if (err) | 2952 | if (err) |
2940 | break; | 2953 | break; |
2941 | err = btrfs_insert_file_extent(trans, root, | 2954 | err = btrfs_insert_file_extent(trans, root, |
@@ -4064,11 +4077,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, | |||
4064 | int compressed; | 4077 | int compressed; |
4065 | 4078 | ||
4066 | again: | 4079 | again: |
4067 | spin_lock(&em_tree->lock); | 4080 | read_lock(&em_tree->lock); |
4068 | em = lookup_extent_mapping(em_tree, start, len); | 4081 | em = lookup_extent_mapping(em_tree, start, len); |
4069 | if (em) | 4082 | if (em) |
4070 | em->bdev = root->fs_info->fs_devices->latest_bdev; | 4083 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
4071 | spin_unlock(&em_tree->lock); | 4084 | read_unlock(&em_tree->lock); |
4072 | 4085 | ||
4073 | if (em) { | 4086 | if (em) { |
4074 | if (em->start > start || em->start + em->len <= start) | 4087 | if (em->start > start || em->start + em->len <= start) |
@@ -4215,6 +4228,11 @@ again: | |||
4215 | map = kmap(page); | 4228 | map = kmap(page); |
4216 | read_extent_buffer(leaf, map + pg_offset, ptr, | 4229 | read_extent_buffer(leaf, map + pg_offset, ptr, |
4217 | copy_size); | 4230 | copy_size); |
4231 | if (pg_offset + copy_size < PAGE_CACHE_SIZE) { | ||
4232 | memset(map + pg_offset + copy_size, 0, | ||
4233 | PAGE_CACHE_SIZE - pg_offset - | ||
4234 | copy_size); | ||
4235 | } | ||
4218 | kunmap(page); | 4236 | kunmap(page); |
4219 | } | 4237 | } |
4220 | flush_dcache_page(page); | 4238 | flush_dcache_page(page); |
@@ -4259,7 +4277,7 @@ insert: | |||
4259 | } | 4277 | } |
4260 | 4278 | ||
4261 | err = 0; | 4279 | err = 0; |
4262 | spin_lock(&em_tree->lock); | 4280 | write_lock(&em_tree->lock); |
4263 | ret = add_extent_mapping(em_tree, em); | 4281 | ret = add_extent_mapping(em_tree, em); |
4264 | /* it is possible that someone inserted the extent into the tree | 4282 | /* it is possible that someone inserted the extent into the tree |
4265 | * while we had the lock dropped. It is also possible that | 4283 | * while we had the lock dropped. It is also possible that |
@@ -4299,7 +4317,7 @@ insert: | |||
4299 | err = 0; | 4317 | err = 0; |
4300 | } | 4318 | } |
4301 | } | 4319 | } |
4302 | spin_unlock(&em_tree->lock); | 4320 | write_unlock(&em_tree->lock); |
4303 | out: | 4321 | out: |
4304 | if (path) | 4322 | if (path) |
4305 | btrfs_free_path(path); | 4323 | btrfs_free_path(path); |
@@ -4398,13 +4416,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
4398 | u64 page_start = page_offset(page); | 4416 | u64 page_start = page_offset(page); |
4399 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | 4417 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; |
4400 | 4418 | ||
4419 | |||
4420 | /* | ||
4421 | * we have the page locked, so new writeback can't start, | ||
4422 | * and the dirty bit won't be cleared while we are here. | ||
4423 | * | ||
4424 | * Wait for IO on this page so that we can safely clear | ||
4425 | * the PagePrivate2 bit and do ordered accounting | ||
4426 | */ | ||
4401 | wait_on_page_writeback(page); | 4427 | wait_on_page_writeback(page); |
4428 | |||
4402 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 4429 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
4403 | if (offset) { | 4430 | if (offset) { |
4404 | btrfs_releasepage(page, GFP_NOFS); | 4431 | btrfs_releasepage(page, GFP_NOFS); |
4405 | return; | 4432 | return; |
4406 | } | 4433 | } |
4407 | |||
4408 | lock_extent(tree, page_start, page_end, GFP_NOFS); | 4434 | lock_extent(tree, page_start, page_end, GFP_NOFS); |
4409 | ordered = btrfs_lookup_ordered_extent(page->mapping->host, | 4435 | ordered = btrfs_lookup_ordered_extent(page->mapping->host, |
4410 | page_offset(page)); | 4436 | page_offset(page)); |
@@ -4415,16 +4441,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
4415 | */ | 4441 | */ |
4416 | clear_extent_bit(tree, page_start, page_end, | 4442 | clear_extent_bit(tree, page_start, page_end, |
4417 | EXTENT_DIRTY | EXTENT_DELALLOC | | 4443 | EXTENT_DIRTY | EXTENT_DELALLOC | |
4418 | EXTENT_LOCKED, 1, 0, GFP_NOFS); | 4444 | EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); |
4419 | btrfs_finish_ordered_io(page->mapping->host, | 4445 | /* |
4420 | page_start, page_end); | 4446 | * whoever cleared the private bit is responsible |
4447 | * for the finish_ordered_io | ||
4448 | */ | ||
4449 | if (TestClearPagePrivate2(page)) { | ||
4450 | btrfs_finish_ordered_io(page->mapping->host, | ||
4451 | page_start, page_end); | ||
4452 | } | ||
4421 | btrfs_put_ordered_extent(ordered); | 4453 | btrfs_put_ordered_extent(ordered); |
4422 | lock_extent(tree, page_start, page_end, GFP_NOFS); | 4454 | lock_extent(tree, page_start, page_end, GFP_NOFS); |
4423 | } | 4455 | } |
4424 | clear_extent_bit(tree, page_start, page_end, | 4456 | clear_extent_bit(tree, page_start, page_end, |
4425 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | | 4457 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, |
4426 | EXTENT_ORDERED, | 4458 | 1, 1, NULL, GFP_NOFS); |
4427 | 1, 1, GFP_NOFS); | ||
4428 | __btrfs_releasepage(page, GFP_NOFS); | 4459 | __btrfs_releasepage(page, GFP_NOFS); |
4429 | 4460 | ||
4430 | ClearPageChecked(page); | 4461 | ClearPageChecked(page); |
@@ -4521,11 +4552,14 @@ again: | |||
4521 | } | 4552 | } |
4522 | ClearPageChecked(page); | 4553 | ClearPageChecked(page); |
4523 | set_page_dirty(page); | 4554 | set_page_dirty(page); |
4555 | SetPageUptodate(page); | ||
4524 | 4556 | ||
4525 | BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; | 4557 | BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; |
4526 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 4558 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); |
4527 | 4559 | ||
4528 | out_unlock: | 4560 | out_unlock: |
4561 | if (!ret) | ||
4562 | return VM_FAULT_LOCKED; | ||
4529 | unlock_page(page); | 4563 | unlock_page(page); |
4530 | out: | 4564 | out: |
4531 | return ret; | 4565 | return ret; |
@@ -5058,6 +5092,8 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans, | |||
5058 | 0, 0, 0, | 5092 | 0, 0, 0, |
5059 | BTRFS_FILE_EXTENT_PREALLOC); | 5093 | BTRFS_FILE_EXTENT_PREALLOC); |
5060 | BUG_ON(ret); | 5094 | BUG_ON(ret); |
5095 | btrfs_drop_extent_cache(inode, cur_offset, | ||
5096 | cur_offset + ins.offset -1, 0); | ||
5061 | num_bytes -= ins.offset; | 5097 | num_bytes -= ins.offset; |
5062 | cur_offset += ins.offset; | 5098 | cur_offset += ins.offset; |
5063 | alloc_hint = ins.objectid + ins.offset; | 5099 | alloc_hint = ins.objectid + ins.offset; |