diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index bf3f424e0013..4ebabd237153 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -595,9 +595,14 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
595 | clear = 1; | 595 | clear = 1; |
596 | again: | 596 | again: |
597 | if (!prealloc && (mask & __GFP_WAIT)) { | 597 | if (!prealloc && (mask & __GFP_WAIT)) { |
598 | /* | ||
599 | * Don't care for allocation failure here because we might end | ||
600 | * up not needing the pre-allocated extent state at all, which | ||
601 | * is the case if we only have in the tree extent states that | ||
602 | * cover our input range and don't cover too any other range. | ||
603 | * If we end up needing a new extent state we allocate it later. | ||
604 | */ | ||
598 | prealloc = alloc_extent_state(mask); | 605 | prealloc = alloc_extent_state(mask); |
599 | if (!prealloc) | ||
600 | return -ENOMEM; | ||
601 | } | 606 | } |
602 | 607 | ||
603 | spin_lock(&tree->lock); | 608 | spin_lock(&tree->lock); |
@@ -796,17 +801,25 @@ static void set_state_bits(struct extent_io_tree *tree, | |||
796 | state->state |= bits_to_set; | 801 | state->state |= bits_to_set; |
797 | } | 802 | } |
798 | 803 | ||
799 | static void cache_state(struct extent_state *state, | 804 | static void cache_state_if_flags(struct extent_state *state, |
800 | struct extent_state **cached_ptr) | 805 | struct extent_state **cached_ptr, |
806 | const u64 flags) | ||
801 | { | 807 | { |
802 | if (cached_ptr && !(*cached_ptr)) { | 808 | if (cached_ptr && !(*cached_ptr)) { |
803 | if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { | 809 | if (!flags || (state->state & flags)) { |
804 | *cached_ptr = state; | 810 | *cached_ptr = state; |
805 | atomic_inc(&state->refs); | 811 | atomic_inc(&state->refs); |
806 | } | 812 | } |
807 | } | 813 | } |
808 | } | 814 | } |
809 | 815 | ||
816 | static void cache_state(struct extent_state *state, | ||
817 | struct extent_state **cached_ptr) | ||
818 | { | ||
819 | return cache_state_if_flags(state, cached_ptr, | ||
820 | EXTENT_IOBITS | EXTENT_BOUNDARY); | ||
821 | } | ||
822 | |||
810 | /* | 823 | /* |
811 | * set some bits on a range in the tree. This may require allocations or | 824 | * set some bits on a range in the tree. This may require allocations or |
812 | * sleeping, so the gfp mask is used to indicate what is allowed. | 825 | * sleeping, so the gfp mask is used to indicate what is allowed. |
@@ -1058,13 +1071,21 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
1058 | int err = 0; | 1071 | int err = 0; |
1059 | u64 last_start; | 1072 | u64 last_start; |
1060 | u64 last_end; | 1073 | u64 last_end; |
1074 | bool first_iteration = true; | ||
1061 | 1075 | ||
1062 | btrfs_debug_check_extent_io_range(tree, start, end); | 1076 | btrfs_debug_check_extent_io_range(tree, start, end); |
1063 | 1077 | ||
1064 | again: | 1078 | again: |
1065 | if (!prealloc && (mask & __GFP_WAIT)) { | 1079 | if (!prealloc && (mask & __GFP_WAIT)) { |
1080 | /* | ||
1081 | * Best effort, don't worry if extent state allocation fails | ||
1082 | * here for the first iteration. We might have a cached state | ||
1083 | * that matches exactly the target range, in which case no | ||
1084 | * extent state allocations are needed. We'll only know this | ||
1085 | * after locking the tree. | ||
1086 | */ | ||
1066 | prealloc = alloc_extent_state(mask); | 1087 | prealloc = alloc_extent_state(mask); |
1067 | if (!prealloc) | 1088 | if (!prealloc && !first_iteration) |
1068 | return -ENOMEM; | 1089 | return -ENOMEM; |
1069 | } | 1090 | } |
1070 | 1091 | ||
@@ -1234,6 +1255,7 @@ search_again: | |||
1234 | spin_unlock(&tree->lock); | 1255 | spin_unlock(&tree->lock); |
1235 | if (mask & __GFP_WAIT) | 1256 | if (mask & __GFP_WAIT) |
1236 | cond_resched(); | 1257 | cond_resched(); |
1258 | first_iteration = false; | ||
1237 | goto again; | 1259 | goto again; |
1238 | } | 1260 | } |
1239 | 1261 | ||
@@ -1482,7 +1504,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, | |||
1482 | state = find_first_extent_bit_state(tree, start, bits); | 1504 | state = find_first_extent_bit_state(tree, start, bits); |
1483 | got_it: | 1505 | got_it: |
1484 | if (state) { | 1506 | if (state) { |
1485 | cache_state(state, cached_state); | 1507 | cache_state_if_flags(state, cached_state, 0); |
1486 | *start_ret = state->start; | 1508 | *start_ret = state->start; |
1487 | *end_ret = state->end; | 1509 | *end_ret = state->end; |
1488 | ret = 0; | 1510 | ret = 0; |
@@ -1746,6 +1768,9 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, | |||
1746 | if (page_ops == 0) | 1768 | if (page_ops == 0) |
1747 | return 0; | 1769 | return 0; |
1748 | 1770 | ||
1771 | if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0) | ||
1772 | mapping_set_error(inode->i_mapping, -EIO); | ||
1773 | |||
1749 | while (nr_pages > 0) { | 1774 | while (nr_pages > 0) { |
1750 | ret = find_get_pages_contig(inode->i_mapping, index, | 1775 | ret = find_get_pages_contig(inode->i_mapping, index, |
1751 | min_t(unsigned long, | 1776 | min_t(unsigned long, |
@@ -1763,6 +1788,8 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, | |||
1763 | clear_page_dirty_for_io(pages[i]); | 1788 | clear_page_dirty_for_io(pages[i]); |
1764 | if (page_ops & PAGE_SET_WRITEBACK) | 1789 | if (page_ops & PAGE_SET_WRITEBACK) |
1765 | set_page_writeback(pages[i]); | 1790 | set_page_writeback(pages[i]); |
1791 | if (page_ops & PAGE_SET_ERROR) | ||
1792 | SetPageError(pages[i]); | ||
1766 | if (page_ops & PAGE_END_WRITEBACK) | 1793 | if (page_ops & PAGE_END_WRITEBACK) |
1767 | end_page_writeback(pages[i]); | 1794 | end_page_writeback(pages[i]); |
1768 | if (page_ops & PAGE_UNLOCK) | 1795 | if (page_ops & PAGE_UNLOCK) |