diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-27 19:37:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-27 19:37:36 -0400 |
commit | 559b6d90a0beb375c46dffe18133012bfa29f441 (patch) | |
tree | 9b4d9153bb8166a0792c30706e15a2e2ce88527c /fs | |
parent | aa00edc1287a693eadc7bc67a3d73555d969b35d (diff) | |
parent | 56244ef151c3cd11f505020ab0b3f45454363bcc (diff) |
Merge branch 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs cleanups and fixes from Chris Mason:
"We have another round of fixes and a few cleanups.
I have a fix for short returns from btrfs_copy_from_user, which
finally nails down a very hard to find regression we added in v4.6.
Dave is pushing around gfp parameters, mostly to cleanup internal apis
and make it a little more consistent.
The rest are smaller fixes, and one speelling fixup patch"
* 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (22 commits)
Btrfs: fix handling of faults from btrfs_copy_from_user
btrfs: fix string and comment grammatical issues and typos
btrfs: scrub: Set bbio to NULL before calling btrfs_map_block
Btrfs: fix unexpected return value of fiemap
Btrfs: free sys_array eb as soon as possible
btrfs: sink gfp parameter to convert_extent_bit
btrfs: make state preallocation more speculative in __set_extent_bit
btrfs: untangle gotos a bit in convert_extent_bit
btrfs: untangle gotos a bit in __clear_extent_bit
btrfs: untangle gotos a bit in __set_extent_bit
btrfs: sink gfp parameter to set_record_extent_bits
btrfs: sink gfp parameter to set_extent_new
btrfs: sink gfp parameter to set_extent_defrag
btrfs: sink gfp parameter to set_extent_delalloc
btrfs: sink gfp parameter to clear_extent_dirty
btrfs: sink gfp parameter to clear_record_extent_bits
btrfs: sink gfp parameter to clear_extent_bits
btrfs: sink gfp parameter to set_extent_bits
btrfs: make find_workspace warn if there are no workspaces
btrfs: make find_workspace always succeed
...
Diffstat (limited to 'fs')
36 files changed, 219 insertions, 210 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index d3090187fd76..8bb3509099e8 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -1939,7 +1939,7 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, | |||
1939 | * from ipath->fspath->val[i]. | 1939 | * from ipath->fspath->val[i]. |
1940 | * when it returns, there are ipath->fspath->elem_cnt number of paths available | 1940 | * when it returns, there are ipath->fspath->elem_cnt number of paths available |
1941 | * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the | 1941 | * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the |
1942 | * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, | 1942 | * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise, |
1943 | * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would | 1943 | * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would |
1944 | * have been needed to return all paths. | 1944 | * have been needed to return all paths. |
1945 | */ | 1945 | */ |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 1da5753d886d..4919aedb5fc1 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -313,7 +313,7 @@ struct btrfs_dio_private { | |||
313 | struct bio *dio_bio; | 313 | struct bio *dio_bio; |
314 | 314 | ||
315 | /* | 315 | /* |
316 | * The original bio may be splited to several sub-bios, this is | 316 | * The original bio may be split to several sub-bios, this is |
317 | * done during endio of sub-bios | 317 | * done during endio of sub-bios |
318 | */ | 318 | */ |
319 | int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int); | 319 | int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int); |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 516e19d1d202..b677a6ea6001 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -1939,7 +1939,7 @@ again: | |||
1939 | /* | 1939 | /* |
1940 | * Clear all references of this block. Do not free | 1940 | * Clear all references of this block. Do not free |
1941 | * the block itself even if is not referenced anymore | 1941 | * the block itself even if is not referenced anymore |
1942 | * because it still carries valueable information | 1942 | * because it still carries valuable information |
1943 | * like whether it was ever written and IO completed. | 1943 | * like whether it was ever written and IO completed. |
1944 | */ | 1944 | */ |
1945 | list_for_each_entry_safe(l, tmp, &block->ref_to_list, | 1945 | list_for_each_entry_safe(l, tmp, &block->ref_to_list, |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index decd0a3f5d61..427c36b430a6 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -156,7 +156,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) | |||
156 | 156 | ||
157 | /* | 157 | /* |
158 | * RCU really hurts here, we could free up the root node because | 158 | * RCU really hurts here, we could free up the root node because |
159 | * it was cow'ed but we may not get the new root node yet so do | 159 | * it was COWed but we may not get the new root node yet so do |
160 | * the inc_not_zero dance and if it doesn't work then | 160 | * the inc_not_zero dance and if it doesn't work then |
161 | * synchronize_rcu and try again. | 161 | * synchronize_rcu and try again. |
162 | */ | 162 | */ |
@@ -955,7 +955,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root, | |||
955 | struct extent_buffer *buf) | 955 | struct extent_buffer *buf) |
956 | { | 956 | { |
957 | /* | 957 | /* |
958 | * Tree blocks not in refernece counted trees and tree roots | 958 | * Tree blocks not in reference counted trees and tree roots |
959 | * are never shared. If a block was allocated after the last | 959 | * are never shared. If a block was allocated after the last |
960 | * snapshot and the block was not allocated by tree relocation, | 960 | * snapshot and the block was not allocated by tree relocation, |
961 | * we know the block is not shared. | 961 | * we know the block is not shared. |
@@ -1270,7 +1270,7 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, | |||
1270 | 1270 | ||
1271 | /* | 1271 | /* |
1272 | * tm is a pointer to the first operation to rewind within eb. then, all | 1272 | * tm is a pointer to the first operation to rewind within eb. then, all |
1273 | * previous operations will be rewinded (until we reach something older than | 1273 | * previous operations will be rewound (until we reach something older than |
1274 | * time_seq). | 1274 | * time_seq). |
1275 | */ | 1275 | */ |
1276 | static void | 1276 | static void |
@@ -1345,7 +1345,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, | |||
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | /* | 1347 | /* |
1348 | * Called with eb read locked. If the buffer cannot be rewinded, the same buffer | 1348 | * Called with eb read locked. If the buffer cannot be rewound, the same buffer |
1349 | * is returned. If rewind operations happen, a fresh buffer is returned. The | 1349 | * is returned. If rewind operations happen, a fresh buffer is returned. The |
1350 | * returned buffer is always read-locked. If the returned buffer is not the | 1350 | * returned buffer is always read-locked. If the returned buffer is not the |
1351 | * input buffer, the lock on the input buffer is released and the input buffer | 1351 | * input buffer, the lock on the input buffer is released and the input buffer |
@@ -1516,7 +1516,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans, | |||
1516 | * 3) the root is not forced COW. | 1516 | * 3) the root is not forced COW. |
1517 | * | 1517 | * |
1518 | * What is forced COW: | 1518 | * What is forced COW: |
1519 | * when we create snapshot during commiting the transaction, | 1519 | * when we create snapshot during committing the transaction, |
1520 | * after we've finished coping src root, we must COW the shared | 1520 | * after we've finished coping src root, we must COW the shared |
1521 | * block to ensure the metadata consistency. | 1521 | * block to ensure the metadata consistency. |
1522 | */ | 1522 | */ |
@@ -1531,7 +1531,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans, | |||
1531 | 1531 | ||
1532 | /* | 1532 | /* |
1533 | * cows a single block, see __btrfs_cow_block for the real work. | 1533 | * cows a single block, see __btrfs_cow_block for the real work. |
1534 | * This version of it has extra checks so that a block isn't cow'd more than | 1534 | * This version of it has extra checks so that a block isn't COWed more than |
1535 | * once per transaction, as long as it hasn't been written yet | 1535 | * once per transaction, as long as it hasn't been written yet |
1536 | */ | 1536 | */ |
1537 | noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, | 1537 | noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, |
@@ -2986,7 +2986,7 @@ again: | |||
2986 | btrfs_unlock_up_safe(p, level + 1); | 2986 | btrfs_unlock_up_safe(p, level + 1); |
2987 | 2987 | ||
2988 | /* | 2988 | /* |
2989 | * Since we can unwind eb's we want to do a real search every | 2989 | * Since we can unwind ebs we want to do a real search every |
2990 | * time. | 2990 | * time. |
2991 | */ | 2991 | */ |
2992 | prev_cmp = -1; | 2992 | prev_cmp = -1; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ddcc58f03c79..101c3cfd3f7c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -89,7 +89,7 @@ static const int btrfs_csum_sizes[] = { 4 }; | |||
89 | /* four bytes for CRC32 */ | 89 | /* four bytes for CRC32 */ |
90 | #define BTRFS_EMPTY_DIR_SIZE 0 | 90 | #define BTRFS_EMPTY_DIR_SIZE 0 |
91 | 91 | ||
92 | /* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */ | 92 | /* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */ |
93 | #define REQ_GET_READ_MIRRORS (1 << 30) | 93 | #define REQ_GET_READ_MIRRORS (1 << 30) |
94 | 94 | ||
95 | /* ioprio of readahead is set to idle */ | 95 | /* ioprio of readahead is set to idle */ |
@@ -431,7 +431,7 @@ struct btrfs_space_info { | |||
431 | * bytes_pinned does not reflect the bytes that will be pinned once the | 431 | * bytes_pinned does not reflect the bytes that will be pinned once the |
432 | * delayed refs are flushed, so this counter is inc'ed every time we | 432 | * delayed refs are flushed, so this counter is inc'ed every time we |
433 | * call btrfs_free_extent so it is a realtime count of what will be | 433 | * call btrfs_free_extent so it is a realtime count of what will be |
434 | * freed once the transaction is committed. It will be zero'ed every | 434 | * freed once the transaction is committed. It will be zeroed every |
435 | * time the transaction commits. | 435 | * time the transaction commits. |
436 | */ | 436 | */ |
437 | struct percpu_counter total_bytes_pinned; | 437 | struct percpu_counter total_bytes_pinned; |
@@ -1401,7 +1401,7 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token) | |||
1401 | token->kaddr = NULL; | 1401 | token->kaddr = NULL; |
1402 | } | 1402 | } |
1403 | 1403 | ||
1404 | /* some macros to generate set/get funcs for the struct fields. This | 1404 | /* some macros to generate set/get functions for the struct fields. This |
1405 | * assumes there is a lefoo_to_cpu for every type, so lets make a simple | 1405 | * assumes there is a lefoo_to_cpu for every type, so lets make a simple |
1406 | * one for u8: | 1406 | * one for u8: |
1407 | */ | 1407 | */ |
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index c24b653c7343..5fca9534a271 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h | |||
@@ -188,7 +188,7 @@ struct btrfs_delayed_ref_root { | |||
188 | 188 | ||
189 | /* | 189 | /* |
190 | * To make qgroup to skip given root. | 190 | * To make qgroup to skip given root. |
191 | * This is for snapshot, as btrfs_qgroup_inherit() will manully | 191 | * This is for snapshot, as btrfs_qgroup_inherit() will manually |
192 | * modify counters for snapshot and its source, so we should skip | 192 | * modify counters for snapshot and its source, so we should skip |
193 | * the snapshot in new_root/old_roots or it will get calculated twice | 193 | * the snapshot in new_root/old_roots or it will get calculated twice |
194 | */ | 194 | */ |
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 85f12e6e28d2..63ef9cdf0144 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c | |||
@@ -450,7 +450,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root, | |||
450 | } | 450 | } |
451 | 451 | ||
452 | /* | 452 | /* |
453 | * blocked until all flighting bios are finished. | 453 | * blocked until all in-flight bios operations are finished. |
454 | */ | 454 | */ |
455 | static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) | 455 | static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) |
456 | { | 456 | { |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 91d123938cef..6628fca9f4ed 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -384,7 +384,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, | |||
384 | /* | 384 | /* |
385 | * Things reading via commit roots that don't have normal protection, | 385 | * Things reading via commit roots that don't have normal protection, |
386 | * like send, can have a really old block in cache that may point at a | 386 | * like send, can have a really old block in cache that may point at a |
387 | * block that has been free'd and re-allocated. So don't clear uptodate | 387 | * block that has been freed and re-allocated. So don't clear uptodate |
388 | * if we find an eb that is under IO (dirty/writeback) because we could | 388 | * if we find an eb that is under IO (dirty/writeback) because we could |
389 | * end up reading in the stale data and then writing it back out and | 389 | * end up reading in the stale data and then writing it back out and |
390 | * making everybody very sad. | 390 | * making everybody very sad. |
@@ -418,7 +418,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb) | |||
418 | /* | 418 | /* |
419 | * The super_block structure does not span the whole | 419 | * The super_block structure does not span the whole |
420 | * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space | 420 | * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space |
421 | * is filled with zeros and is included in the checkum. | 421 | * is filled with zeros and is included in the checksum. |
422 | */ | 422 | */ |
423 | crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, | 423 | crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, |
424 | crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); | 424 | crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); |
@@ -600,7 +600,7 @@ static noinline int check_leaf(struct btrfs_root *root, | |||
600 | 600 | ||
601 | /* | 601 | /* |
602 | * Check to make sure that we don't point outside of the leaf, | 602 | * Check to make sure that we don't point outside of the leaf, |
603 | * just incase all the items are consistent to eachother, but | 603 | * just in case all the items are consistent to each other, but |
604 | * all point outside of the leaf. | 604 | * all point outside of the leaf. |
605 | */ | 605 | */ |
606 | if (btrfs_item_end_nr(leaf, slot) > | 606 | if (btrfs_item_end_nr(leaf, slot) > |
@@ -3022,7 +3022,7 @@ retry_root_backup: | |||
3022 | } | 3022 | } |
3023 | 3023 | ||
3024 | /* | 3024 | /* |
3025 | * Mount does not set all options immediatelly, we can do it now and do | 3025 | * Mount does not set all options immediately, we can do it now and do |
3026 | * not have to wait for transaction commit | 3026 | * not have to wait for transaction commit |
3027 | */ | 3027 | */ |
3028 | btrfs_apply_pending_changes(fs_info); | 3028 | btrfs_apply_pending_changes(fs_info); |
@@ -3255,7 +3255,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
3255 | btrfs_warn_rl_in_rcu(device->dev_root->fs_info, | 3255 | btrfs_warn_rl_in_rcu(device->dev_root->fs_info, |
3256 | "lost page write due to IO error on %s", | 3256 | "lost page write due to IO error on %s", |
3257 | rcu_str_deref(device->name)); | 3257 | rcu_str_deref(device->name)); |
3258 | /* note, we dont' set_buffer_write_io_error because we have | 3258 | /* note, we don't set_buffer_write_io_error because we have |
3259 | * our own ways of dealing with the IO errors | 3259 | * our own ways of dealing with the IO errors |
3260 | */ | 3260 | */ |
3261 | clear_buffer_uptodate(bh); | 3261 | clear_buffer_uptodate(bh); |
@@ -4367,7 +4367,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
4367 | if (ret) | 4367 | if (ret) |
4368 | break; | 4368 | break; |
4369 | 4369 | ||
4370 | clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); | 4370 | clear_extent_bits(dirty_pages, start, end, mark); |
4371 | while (start <= end) { | 4371 | while (start <= end) { |
4372 | eb = btrfs_find_tree_block(root->fs_info, start); | 4372 | eb = btrfs_find_tree_block(root->fs_info, start); |
4373 | start += root->nodesize; | 4373 | start += root->nodesize; |
@@ -4402,7 +4402,7 @@ again: | |||
4402 | if (ret) | 4402 | if (ret) |
4403 | break; | 4403 | break; |
4404 | 4404 | ||
4405 | clear_extent_dirty(unpin, start, end, GFP_NOFS); | 4405 | clear_extent_dirty(unpin, start, end); |
4406 | btrfs_error_unpin_extent_range(root, start, end); | 4406 | btrfs_error_unpin_extent_range(root, start, end); |
4407 | cond_resched(); | 4407 | cond_resched(); |
4408 | } | 4408 | } |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9424864fd01a..a400951e8678 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -231,9 +231,9 @@ static int add_excluded_extent(struct btrfs_root *root, | |||
231 | { | 231 | { |
232 | u64 end = start + num_bytes - 1; | 232 | u64 end = start + num_bytes - 1; |
233 | set_extent_bits(&root->fs_info->freed_extents[0], | 233 | set_extent_bits(&root->fs_info->freed_extents[0], |
234 | start, end, EXTENT_UPTODATE, GFP_NOFS); | 234 | start, end, EXTENT_UPTODATE); |
235 | set_extent_bits(&root->fs_info->freed_extents[1], | 235 | set_extent_bits(&root->fs_info->freed_extents[1], |
236 | start, end, EXTENT_UPTODATE, GFP_NOFS); | 236 | start, end, EXTENT_UPTODATE); |
237 | return 0; | 237 | return 0; |
238 | } | 238 | } |
239 | 239 | ||
@@ -246,9 +246,9 @@ static void free_excluded_extents(struct btrfs_root *root, | |||
246 | end = start + cache->key.offset - 1; | 246 | end = start + cache->key.offset - 1; |
247 | 247 | ||
248 | clear_extent_bits(&root->fs_info->freed_extents[0], | 248 | clear_extent_bits(&root->fs_info->freed_extents[0], |
249 | start, end, EXTENT_UPTODATE, GFP_NOFS); | 249 | start, end, EXTENT_UPTODATE); |
250 | clear_extent_bits(&root->fs_info->freed_extents[1], | 250 | clear_extent_bits(&root->fs_info->freed_extents[1], |
251 | start, end, EXTENT_UPTODATE, GFP_NOFS); | 251 | start, end, EXTENT_UPTODATE); |
252 | } | 252 | } |
253 | 253 | ||
254 | static int exclude_super_stripes(struct btrfs_root *root, | 254 | static int exclude_super_stripes(struct btrfs_root *root, |
@@ -980,7 +980,7 @@ out_free: | |||
980 | * event that tree block loses its owner tree's reference and do the | 980 | * event that tree block loses its owner tree's reference and do the |
981 | * back refs conversion. | 981 | * back refs conversion. |
982 | * | 982 | * |
983 | * When a tree block is COW'd through a tree, there are four cases: | 983 | * When a tree block is COWed through a tree, there are four cases: |
984 | * | 984 | * |
985 | * The reference count of the block is one and the tree is the block's | 985 | * The reference count of the block is one and the tree is the block's |
986 | * owner tree. Nothing to do in this case. | 986 | * owner tree. Nothing to do in this case. |
@@ -2595,7 +2595,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
2595 | } | 2595 | } |
2596 | 2596 | ||
2597 | /* | 2597 | /* |
2598 | * Need to drop our head ref lock and re-aqcuire the | 2598 | * Need to drop our head ref lock and re-acquire the |
2599 | * delayed ref lock and then re-check to make sure | 2599 | * delayed ref lock and then re-check to make sure |
2600 | * nobody got added. | 2600 | * nobody got added. |
2601 | */ | 2601 | */ |
@@ -2747,7 +2747,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads) | |||
2747 | 2747 | ||
2748 | /* | 2748 | /* |
2749 | * We don't ever fill up leaves all the way so multiply by 2 just to be | 2749 | * We don't ever fill up leaves all the way so multiply by 2 just to be |
2750 | * closer to what we're really going to want to ouse. | 2750 | * closer to what we're really going to want to use. |
2751 | */ | 2751 | */ |
2752 | return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root)); | 2752 | return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root)); |
2753 | } | 2753 | } |
@@ -2851,7 +2851,7 @@ static void delayed_ref_async_start(struct btrfs_work *work) | |||
2851 | } | 2851 | } |
2852 | 2852 | ||
2853 | /* | 2853 | /* |
2854 | * trans->sync means that when we call end_transaciton, we won't | 2854 | * trans->sync means that when we call end_transaction, we won't |
2855 | * wait on delayed refs | 2855 | * wait on delayed refs |
2856 | */ | 2856 | */ |
2857 | trans->sync = true; | 2857 | trans->sync = true; |
@@ -4296,7 +4296,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, | |||
4296 | * Called if we need to clear a data reservation for this inode | 4296 | * Called if we need to clear a data reservation for this inode |
4297 | * Normally in a error case. | 4297 | * Normally in a error case. |
4298 | * | 4298 | * |
4299 | * This one will handle the per-indoe data rsv map for accurate reserved | 4299 | * This one will handle the per-inode data rsv map for accurate reserved |
4300 | * space framework. | 4300 | * space framework. |
4301 | */ | 4301 | */ |
4302 | void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len) | 4302 | void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len) |
@@ -4967,7 +4967,7 @@ void btrfs_init_async_reclaim_work(struct work_struct *work) | |||
4967 | * @orig_bytes - the number of bytes we want | 4967 | * @orig_bytes - the number of bytes we want |
4968 | * @flush - whether or not we can flush to make our reservation | 4968 | * @flush - whether or not we can flush to make our reservation |
4969 | * | 4969 | * |
4970 | * This will reserve orgi_bytes number of bytes from the space info associated | 4970 | * This will reserve orig_bytes number of bytes from the space info associated |
4971 | * with the block_rsv. If there is not enough space it will make an attempt to | 4971 | * with the block_rsv. If there is not enough space it will make an attempt to |
4972 | * flush out space to make room. It will do this by flushing delalloc if | 4972 | * flush out space to make room. It will do this by flushing delalloc if |
4973 | * possible or committing the transaction. If flush is 0 then no attempts to | 4973 | * possible or committing the transaction. If flush is 0 then no attempts to |
@@ -5572,7 +5572,7 @@ void btrfs_orphan_release_metadata(struct inode *inode) | |||
5572 | * common file/directory operations, they change two fs/file trees | 5572 | * common file/directory operations, they change two fs/file trees |
5573 | * and root tree, the number of items that the qgroup reserves is | 5573 | * and root tree, the number of items that the qgroup reserves is |
5574 | * different with the free space reservation. So we can not use | 5574 | * different with the free space reservation. So we can not use |
5575 | * the space reseravtion mechanism in start_transaction(). | 5575 | * the space reservation mechanism in start_transaction(). |
5576 | */ | 5576 | */ |
5577 | int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, | 5577 | int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, |
5578 | struct btrfs_block_rsv *rsv, | 5578 | struct btrfs_block_rsv *rsv, |
@@ -5621,7 +5621,7 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root, | |||
5621 | /** | 5621 | /** |
5622 | * drop_outstanding_extent - drop an outstanding extent | 5622 | * drop_outstanding_extent - drop an outstanding extent |
5623 | * @inode: the inode we're dropping the extent for | 5623 | * @inode: the inode we're dropping the extent for |
5624 | * @num_bytes: the number of bytes we're relaseing. | 5624 | * @num_bytes: the number of bytes we're releasing. |
5625 | * | 5625 | * |
5626 | * This is called when we are freeing up an outstanding extent, either called | 5626 | * This is called when we are freeing up an outstanding extent, either called |
5627 | * after an error or after an extent is written. This will return the number of | 5627 | * after an error or after an extent is written. This will return the number of |
@@ -5647,7 +5647,7 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) | |||
5647 | drop_inode_space = 1; | 5647 | drop_inode_space = 1; |
5648 | 5648 | ||
5649 | /* | 5649 | /* |
5650 | * If we have more or the same amount of outsanding extents than we have | 5650 | * If we have more or the same amount of outstanding extents than we have |
5651 | * reserved then we need to leave the reserved extents count alone. | 5651 | * reserved then we need to leave the reserved extents count alone. |
5652 | */ | 5652 | */ |
5653 | if (BTRFS_I(inode)->outstanding_extents >= | 5653 | if (BTRFS_I(inode)->outstanding_extents >= |
@@ -5661,8 +5661,8 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) | |||
5661 | } | 5661 | } |
5662 | 5662 | ||
5663 | /** | 5663 | /** |
5664 | * calc_csum_metadata_size - return the amount of metada space that must be | 5664 | * calc_csum_metadata_size - return the amount of metadata space that must be |
5665 | * reserved/free'd for the given bytes. | 5665 | * reserved/freed for the given bytes. |
5666 | * @inode: the inode we're manipulating | 5666 | * @inode: the inode we're manipulating |
5667 | * @num_bytes: the number of bytes in question | 5667 | * @num_bytes: the number of bytes in question |
5668 | * @reserve: 1 if we are reserving space, 0 if we are freeing space | 5668 | * @reserve: 1 if we are reserving space, 0 if we are freeing space |
@@ -5814,7 +5814,7 @@ out_fail: | |||
5814 | 5814 | ||
5815 | /* | 5815 | /* |
5816 | * This is tricky, but first we need to figure out how much we | 5816 | * This is tricky, but first we need to figure out how much we |
5817 | * free'd from any free-ers that occurred during this | 5817 | * freed from any free-ers that occurred during this |
5818 | * reservation, so we reset ->csum_bytes to the csum_bytes | 5818 | * reservation, so we reset ->csum_bytes to the csum_bytes |
5819 | * before we dropped our lock, and then call the free for the | 5819 | * before we dropped our lock, and then call the free for the |
5820 | * number of bytes that were freed while we were trying our | 5820 | * number of bytes that were freed while we were trying our |
@@ -5836,7 +5836,7 @@ out_fail: | |||
5836 | 5836 | ||
5837 | /* | 5837 | /* |
5838 | * Now reset ->csum_bytes to what it should be. If bytes is | 5838 | * Now reset ->csum_bytes to what it should be. If bytes is |
5839 | * more than to_free then we would have free'd more space had we | 5839 | * more than to_free then we would have freed more space had we |
5840 | * not had an artificially high ->csum_bytes, so we need to free | 5840 | * not had an artificially high ->csum_bytes, so we need to free |
5841 | * the remainder. If bytes is the same or less then we don't | 5841 | * the remainder. If bytes is the same or less then we don't |
5842 | * need to do anything, the other free-ers did the correct | 5842 | * need to do anything, the other free-ers did the correct |
@@ -6515,7 +6515,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |||
6515 | ret = btrfs_discard_extent(root, start, | 6515 | ret = btrfs_discard_extent(root, start, |
6516 | end + 1 - start, NULL); | 6516 | end + 1 - start, NULL); |
6517 | 6517 | ||
6518 | clear_extent_dirty(unpin, start, end, GFP_NOFS); | 6518 | clear_extent_dirty(unpin, start, end); |
6519 | unpin_extent_range(root, start, end, true); | 6519 | unpin_extent_range(root, start, end, true); |
6520 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | 6520 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
6521 | cond_resched(); | 6521 | cond_resched(); |
@@ -7578,7 +7578,7 @@ loop: | |||
7578 | if (loop == LOOP_CACHING_NOWAIT) { | 7578 | if (loop == LOOP_CACHING_NOWAIT) { |
7579 | /* | 7579 | /* |
7580 | * We want to skip the LOOP_CACHING_WAIT step if we | 7580 | * We want to skip the LOOP_CACHING_WAIT step if we |
7581 | * don't have any unached bgs and we've alrelady done a | 7581 | * don't have any uncached bgs and we've already done a |
7582 | * full search through. | 7582 | * full search through. |
7583 | */ | 7583 | */ |
7584 | if (orig_have_caching_bg || !full_search) | 7584 | if (orig_have_caching_bg || !full_search) |
@@ -7982,7 +7982,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, | |||
7982 | 7982 | ||
7983 | /* | 7983 | /* |
7984 | * Mixed block groups will exclude before processing the log so we only | 7984 | * Mixed block groups will exclude before processing the log so we only |
7985 | * need to do the exlude dance if this fs isn't mixed. | 7985 | * need to do the exclude dance if this fs isn't mixed. |
7986 | */ | 7986 | */ |
7987 | if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) { | 7987 | if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) { |
7988 | ret = __exclude_logged_extent(root, ins->objectid, ins->offset); | 7988 | ret = __exclude_logged_extent(root, ins->objectid, ins->offset); |
@@ -8032,7 +8032,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
8032 | buf->start + buf->len - 1, GFP_NOFS); | 8032 | buf->start + buf->len - 1, GFP_NOFS); |
8033 | else | 8033 | else |
8034 | set_extent_new(&root->dirty_log_pages, buf->start, | 8034 | set_extent_new(&root->dirty_log_pages, buf->start, |
8035 | buf->start + buf->len - 1, GFP_NOFS); | 8035 | buf->start + buf->len - 1); |
8036 | } else { | 8036 | } else { |
8037 | buf->log_index = -1; | 8037 | buf->log_index = -1; |
8038 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, | 8038 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, |
@@ -9426,7 +9426,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) | |||
9426 | u64 free_bytes = 0; | 9426 | u64 free_bytes = 0; |
9427 | int factor; | 9427 | int factor; |
9428 | 9428 | ||
9429 | /* It's df, we don't care if it's racey */ | 9429 | /* It's df, we don't care if it's racy */ |
9430 | if (list_empty(&sinfo->ro_bgs)) | 9430 | if (list_empty(&sinfo->ro_bgs)) |
9431 | return 0; | 9431 | return 0; |
9432 | 9432 | ||
@@ -10635,14 +10635,14 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) | |||
10635 | */ | 10635 | */ |
10636 | mutex_lock(&fs_info->unused_bg_unpin_mutex); | 10636 | mutex_lock(&fs_info->unused_bg_unpin_mutex); |
10637 | ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, | 10637 | ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, |
10638 | EXTENT_DIRTY, GFP_NOFS); | 10638 | EXTENT_DIRTY); |
10639 | if (ret) { | 10639 | if (ret) { |
10640 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | 10640 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
10641 | btrfs_dec_block_group_ro(root, block_group); | 10641 | btrfs_dec_block_group_ro(root, block_group); |
10642 | goto end_trans; | 10642 | goto end_trans; |
10643 | } | 10643 | } |
10644 | ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, | 10644 | ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, |
10645 | EXTENT_DIRTY, GFP_NOFS); | 10645 | EXTENT_DIRTY); |
10646 | if (ret) { | 10646 | if (ret) { |
10647 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | 10647 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
10648 | btrfs_dec_block_group_ro(root, block_group); | 10648 | btrfs_dec_block_group_ro(root, block_group); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2f83448d34fe..3cd57825c75f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -726,14 +726,6 @@ next: | |||
726 | start = last_end + 1; | 726 | start = last_end + 1; |
727 | if (start <= end && state && !need_resched()) | 727 | if (start <= end && state && !need_resched()) |
728 | goto hit_next; | 728 | goto hit_next; |
729 | goto search_again; | ||
730 | |||
731 | out: | ||
732 | spin_unlock(&tree->lock); | ||
733 | if (prealloc) | ||
734 | free_extent_state(prealloc); | ||
735 | |||
736 | return 0; | ||
737 | 729 | ||
738 | search_again: | 730 | search_again: |
739 | if (start > end) | 731 | if (start > end) |
@@ -742,6 +734,14 @@ search_again: | |||
742 | if (gfpflags_allow_blocking(mask)) | 734 | if (gfpflags_allow_blocking(mask)) |
743 | cond_resched(); | 735 | cond_resched(); |
744 | goto again; | 736 | goto again; |
737 | |||
738 | out: | ||
739 | spin_unlock(&tree->lock); | ||
740 | if (prealloc) | ||
741 | free_extent_state(prealloc); | ||
742 | |||
743 | return 0; | ||
744 | |||
745 | } | 745 | } |
746 | 746 | ||
747 | static void wait_on_state(struct extent_io_tree *tree, | 747 | static void wait_on_state(struct extent_io_tree *tree, |
@@ -873,8 +873,14 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
873 | bits |= EXTENT_FIRST_DELALLOC; | 873 | bits |= EXTENT_FIRST_DELALLOC; |
874 | again: | 874 | again: |
875 | if (!prealloc && gfpflags_allow_blocking(mask)) { | 875 | if (!prealloc && gfpflags_allow_blocking(mask)) { |
876 | /* | ||
877 | * Don't care for allocation failure here because we might end | ||
878 | * up not needing the pre-allocated extent state at all, which | ||
879 | * is the case if we only have in the tree extent states that | ||
880 | * cover our input range and don't cover too any other range. | ||
881 | * If we end up needing a new extent state we allocate it later. | ||
882 | */ | ||
876 | prealloc = alloc_extent_state(mask); | 883 | prealloc = alloc_extent_state(mask); |
877 | BUG_ON(!prealloc); | ||
878 | } | 884 | } |
879 | 885 | ||
880 | spin_lock(&tree->lock); | 886 | spin_lock(&tree->lock); |
@@ -1037,7 +1043,13 @@ hit_next: | |||
1037 | goto out; | 1043 | goto out; |
1038 | } | 1044 | } |
1039 | 1045 | ||
1040 | goto search_again; | 1046 | search_again: |
1047 | if (start > end) | ||
1048 | goto out; | ||
1049 | spin_unlock(&tree->lock); | ||
1050 | if (gfpflags_allow_blocking(mask)) | ||
1051 | cond_resched(); | ||
1052 | goto again; | ||
1041 | 1053 | ||
1042 | out: | 1054 | out: |
1043 | spin_unlock(&tree->lock); | 1055 | spin_unlock(&tree->lock); |
@@ -1046,13 +1058,6 @@ out: | |||
1046 | 1058 | ||
1047 | return err; | 1059 | return err; |
1048 | 1060 | ||
1049 | search_again: | ||
1050 | if (start > end) | ||
1051 | goto out; | ||
1052 | spin_unlock(&tree->lock); | ||
1053 | if (gfpflags_allow_blocking(mask)) | ||
1054 | cond_resched(); | ||
1055 | goto again; | ||
1056 | } | 1061 | } |
1057 | 1062 | ||
1058 | int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | 1063 | int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
@@ -1073,17 +1078,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
1073 | * @bits: the bits to set in this range | 1078 | * @bits: the bits to set in this range |
1074 | * @clear_bits: the bits to clear in this range | 1079 | * @clear_bits: the bits to clear in this range |
1075 | * @cached_state: state that we're going to cache | 1080 | * @cached_state: state that we're going to cache |
1076 | * @mask: the allocation mask | ||
1077 | * | 1081 | * |
1078 | * This will go through and set bits for the given range. If any states exist | 1082 | * This will go through and set bits for the given range. If any states exist |
1079 | * already in this range they are set with the given bit and cleared of the | 1083 | * already in this range they are set with the given bit and cleared of the |
1080 | * clear_bits. This is only meant to be used by things that are mergeable, ie | 1084 | * clear_bits. This is only meant to be used by things that are mergeable, ie |
1081 | * converting from say DELALLOC to DIRTY. This is not meant to be used with | 1085 | * converting from say DELALLOC to DIRTY. This is not meant to be used with |
1082 | * boundary bits like LOCK. | 1086 | * boundary bits like LOCK. |
1087 | * | ||
1088 | * All allocations are done with GFP_NOFS. | ||
1083 | */ | 1089 | */ |
1084 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | 1090 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
1085 | unsigned bits, unsigned clear_bits, | 1091 | unsigned bits, unsigned clear_bits, |
1086 | struct extent_state **cached_state, gfp_t mask) | 1092 | struct extent_state **cached_state) |
1087 | { | 1093 | { |
1088 | struct extent_state *state; | 1094 | struct extent_state *state; |
1089 | struct extent_state *prealloc = NULL; | 1095 | struct extent_state *prealloc = NULL; |
@@ -1098,7 +1104,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
1098 | btrfs_debug_check_extent_io_range(tree, start, end); | 1104 | btrfs_debug_check_extent_io_range(tree, start, end); |
1099 | 1105 | ||
1100 | again: | 1106 | again: |
1101 | if (!prealloc && gfpflags_allow_blocking(mask)) { | 1107 | if (!prealloc) { |
1102 | /* | 1108 | /* |
1103 | * Best effort, don't worry if extent state allocation fails | 1109 | * Best effort, don't worry if extent state allocation fails |
1104 | * here for the first iteration. We might have a cached state | 1110 | * here for the first iteration. We might have a cached state |
@@ -1106,7 +1112,7 @@ again: | |||
1106 | * extent state allocations are needed. We'll only know this | 1112 | * extent state allocations are needed. We'll only know this |
1107 | * after locking the tree. | 1113 | * after locking the tree. |
1108 | */ | 1114 | */ |
1109 | prealloc = alloc_extent_state(mask); | 1115 | prealloc = alloc_extent_state(GFP_NOFS); |
1110 | if (!prealloc && !first_iteration) | 1116 | if (!prealloc && !first_iteration) |
1111 | return -ENOMEM; | 1117 | return -ENOMEM; |
1112 | } | 1118 | } |
@@ -1263,7 +1269,13 @@ hit_next: | |||
1263 | goto out; | 1269 | goto out; |
1264 | } | 1270 | } |
1265 | 1271 | ||
1266 | goto search_again; | 1272 | search_again: |
1273 | if (start > end) | ||
1274 | goto out; | ||
1275 | spin_unlock(&tree->lock); | ||
1276 | cond_resched(); | ||
1277 | first_iteration = false; | ||
1278 | goto again; | ||
1267 | 1279 | ||
1268 | out: | 1280 | out: |
1269 | spin_unlock(&tree->lock); | 1281 | spin_unlock(&tree->lock); |
@@ -1271,21 +1283,11 @@ out: | |||
1271 | free_extent_state(prealloc); | 1283 | free_extent_state(prealloc); |
1272 | 1284 | ||
1273 | return err; | 1285 | return err; |
1274 | |||
1275 | search_again: | ||
1276 | if (start > end) | ||
1277 | goto out; | ||
1278 | spin_unlock(&tree->lock); | ||
1279 | if (gfpflags_allow_blocking(mask)) | ||
1280 | cond_resched(); | ||
1281 | first_iteration = false; | ||
1282 | goto again; | ||
1283 | } | 1286 | } |
1284 | 1287 | ||
1285 | /* wrappers around set/clear extent bit */ | 1288 | /* wrappers around set/clear extent bit */ |
1286 | int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, | 1289 | int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
1287 | unsigned bits, gfp_t mask, | 1290 | unsigned bits, struct extent_changeset *changeset) |
1288 | struct extent_changeset *changeset) | ||
1289 | { | 1291 | { |
1290 | /* | 1292 | /* |
1291 | * We don't support EXTENT_LOCKED yet, as current changeset will | 1293 | * We don't support EXTENT_LOCKED yet, as current changeset will |
@@ -1295,7 +1297,7 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, | |||
1295 | */ | 1297 | */ |
1296 | BUG_ON(bits & EXTENT_LOCKED); | 1298 | BUG_ON(bits & EXTENT_LOCKED); |
1297 | 1299 | ||
1298 | return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask, | 1300 | return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS, |
1299 | changeset); | 1301 | changeset); |
1300 | } | 1302 | } |
1301 | 1303 | ||
@@ -1308,8 +1310,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
1308 | } | 1310 | } |
1309 | 1311 | ||
1310 | int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, | 1312 | int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
1311 | unsigned bits, gfp_t mask, | 1313 | unsigned bits, struct extent_changeset *changeset) |
1312 | struct extent_changeset *changeset) | ||
1313 | { | 1314 | { |
1314 | /* | 1315 | /* |
1315 | * Don't support EXTENT_LOCKED case, same reason as | 1316 | * Don't support EXTENT_LOCKED case, same reason as |
@@ -1317,7 +1318,7 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, | |||
1317 | */ | 1318 | */ |
1318 | BUG_ON(bits & EXTENT_LOCKED); | 1319 | BUG_ON(bits & EXTENT_LOCKED); |
1319 | 1320 | ||
1320 | return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask, | 1321 | return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS, |
1321 | changeset); | 1322 | changeset); |
1322 | } | 1323 | } |
1323 | 1324 | ||
@@ -1975,13 +1976,13 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec) | |||
1975 | set_state_failrec(failure_tree, rec->start, NULL); | 1976 | set_state_failrec(failure_tree, rec->start, NULL); |
1976 | ret = clear_extent_bits(failure_tree, rec->start, | 1977 | ret = clear_extent_bits(failure_tree, rec->start, |
1977 | rec->start + rec->len - 1, | 1978 | rec->start + rec->len - 1, |
1978 | EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); | 1979 | EXTENT_LOCKED | EXTENT_DIRTY); |
1979 | if (ret) | 1980 | if (ret) |
1980 | err = ret; | 1981 | err = ret; |
1981 | 1982 | ||
1982 | ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, | 1983 | ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, |
1983 | rec->start + rec->len - 1, | 1984 | rec->start + rec->len - 1, |
1984 | EXTENT_DAMAGED, GFP_NOFS); | 1985 | EXTENT_DAMAGED); |
1985 | if (ret && !err) | 1986 | if (ret && !err) |
1986 | err = ret; | 1987 | err = ret; |
1987 | 1988 | ||
@@ -2232,13 +2233,12 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, | |||
2232 | 2233 | ||
2233 | /* set the bits in the private failure tree */ | 2234 | /* set the bits in the private failure tree */ |
2234 | ret = set_extent_bits(failure_tree, start, end, | 2235 | ret = set_extent_bits(failure_tree, start, end, |
2235 | EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); | 2236 | EXTENT_LOCKED | EXTENT_DIRTY); |
2236 | if (ret >= 0) | 2237 | if (ret >= 0) |
2237 | ret = set_state_failrec(failure_tree, start, failrec); | 2238 | ret = set_state_failrec(failure_tree, start, failrec); |
2238 | /* set the bits in the inode's tree */ | 2239 | /* set the bits in the inode's tree */ |
2239 | if (ret >= 0) | 2240 | if (ret >= 0) |
2240 | ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, | 2241 | ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED); |
2241 | GFP_NOFS); | ||
2242 | if (ret < 0) { | 2242 | if (ret < 0) { |
2243 | kfree(failrec); | 2243 | kfree(failrec); |
2244 | return ret; | 2244 | return ret; |
@@ -4389,8 +4389,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
4389 | if (ret < 0) { | 4389 | if (ret < 0) { |
4390 | btrfs_free_path(path); | 4390 | btrfs_free_path(path); |
4391 | return ret; | 4391 | return ret; |
4392 | } else { | ||
4393 | WARN_ON(!ret); | ||
4394 | if (ret == 1) | ||
4395 | ret = 0; | ||
4392 | } | 4396 | } |
4393 | WARN_ON(!ret); | 4397 | |
4394 | path->slots[0]--; | 4398 | path->slots[0]--; |
4395 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); | 4399 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); |
4396 | found_type = found_key.type; | 4400 | found_type = found_key.type; |
@@ -4601,7 +4605,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) | |||
4601 | if (mapped) | 4605 | if (mapped) |
4602 | spin_unlock(&page->mapping->private_lock); | 4606 | spin_unlock(&page->mapping->private_lock); |
4603 | 4607 | ||
4604 | /* One for when we alloced the page */ | 4608 | /* One for when we allocated the page */ |
4605 | put_page(page); | 4609 | put_page(page); |
4606 | } while (index != 0); | 4610 | } while (index != 0); |
4607 | } | 4611 | } |
@@ -5761,7 +5765,7 @@ int try_release_extent_buffer(struct page *page) | |||
5761 | struct extent_buffer *eb; | 5765 | struct extent_buffer *eb; |
5762 | 5766 | ||
5763 | /* | 5767 | /* |
5764 | * We need to make sure noboody is attaching this page to an eb right | 5768 | * We need to make sure nobody is attaching this page to an eb right |
5765 | * now. | 5769 | * now. |
5766 | */ | 5770 | */ |
5767 | spin_lock(&page->mapping->private_lock); | 5771 | spin_lock(&page->mapping->private_lock); |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 981f402bf754..1baf19c9b79d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -220,8 +220,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
220 | unsigned bits, int filled, | 220 | unsigned bits, int filled, |
221 | struct extent_state *cached_state); | 221 | struct extent_state *cached_state); |
222 | int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, | 222 | int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
223 | unsigned bits, gfp_t mask, | 223 | unsigned bits, struct extent_changeset *changeset); |
224 | struct extent_changeset *changeset); | ||
225 | int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | 224 | int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
226 | unsigned bits, int wake, int delete, | 225 | unsigned bits, int wake, int delete, |
227 | struct extent_state **cached, gfp_t mask); | 226 | struct extent_state **cached, gfp_t mask); |
@@ -240,27 +239,27 @@ static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, | |||
240 | } | 239 | } |
241 | 240 | ||
242 | static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, | 241 | static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, |
243 | u64 end, unsigned bits, gfp_t mask) | 242 | u64 end, unsigned bits) |
244 | { | 243 | { |
245 | int wake = 0; | 244 | int wake = 0; |
246 | 245 | ||
247 | if (bits & EXTENT_LOCKED) | 246 | if (bits & EXTENT_LOCKED) |
248 | wake = 1; | 247 | wake = 1; |
249 | 248 | ||
250 | return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask); | 249 | return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, |
250 | GFP_NOFS); | ||
251 | } | 251 | } |
252 | 252 | ||
253 | int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, | 253 | int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
254 | unsigned bits, gfp_t mask, | 254 | unsigned bits, struct extent_changeset *changeset); |
255 | struct extent_changeset *changeset); | ||
256 | int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | 255 | int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
257 | unsigned bits, u64 *failed_start, | 256 | unsigned bits, u64 *failed_start, |
258 | struct extent_state **cached_state, gfp_t mask); | 257 | struct extent_state **cached_state, gfp_t mask); |
259 | 258 | ||
260 | static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, | 259 | static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, |
261 | u64 end, unsigned bits, gfp_t mask) | 260 | u64 end, unsigned bits) |
262 | { | 261 | { |
263 | return set_extent_bit(tree, start, end, bits, NULL, NULL, mask); | 262 | return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); |
264 | } | 263 | } |
265 | 264 | ||
266 | static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, | 265 | static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, |
@@ -278,37 +277,38 @@ static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, | |||
278 | } | 277 | } |
279 | 278 | ||
280 | static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, | 279 | static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, |
281 | u64 end, gfp_t mask) | 280 | u64 end) |
282 | { | 281 | { |
283 | return clear_extent_bit(tree, start, end, | 282 | return clear_extent_bit(tree, start, end, |
284 | EXTENT_DIRTY | EXTENT_DELALLOC | | 283 | EXTENT_DIRTY | EXTENT_DELALLOC | |
285 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask); | 284 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); |
286 | } | 285 | } |
287 | 286 | ||
288 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | 287 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
289 | unsigned bits, unsigned clear_bits, | 288 | unsigned bits, unsigned clear_bits, |
290 | struct extent_state **cached_state, gfp_t mask); | 289 | struct extent_state **cached_state); |
291 | 290 | ||
292 | static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, | 291 | static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, |
293 | u64 end, struct extent_state **cached_state, gfp_t mask) | 292 | u64 end, struct extent_state **cached_state) |
294 | { | 293 | { |
295 | return set_extent_bit(tree, start, end, | 294 | return set_extent_bit(tree, start, end, |
296 | EXTENT_DELALLOC | EXTENT_UPTODATE, | 295 | EXTENT_DELALLOC | EXTENT_UPTODATE, |
297 | NULL, cached_state, mask); | 296 | NULL, cached_state, GFP_NOFS); |
298 | } | 297 | } |
299 | 298 | ||
300 | static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, | 299 | static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, |
301 | u64 end, struct extent_state **cached_state, gfp_t mask) | 300 | u64 end, struct extent_state **cached_state) |
302 | { | 301 | { |
303 | return set_extent_bit(tree, start, end, | 302 | return set_extent_bit(tree, start, end, |
304 | EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, | 303 | EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, |
305 | NULL, cached_state, mask); | 304 | NULL, cached_state, GFP_NOFS); |
306 | } | 305 | } |
307 | 306 | ||
308 | static inline int set_extent_new(struct extent_io_tree *tree, u64 start, | 307 | static inline int set_extent_new(struct extent_io_tree *tree, u64 start, |
309 | u64 end, gfp_t mask) | 308 | u64 end) |
310 | { | 309 | { |
311 | return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, mask); | 310 | return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, |
311 | GFP_NOFS); | ||
312 | } | 312 | } |
313 | 313 | ||
314 | static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, | 314 | static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 318b048eb254..e0715fcfb11e 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -62,7 +62,7 @@ struct extent_map *alloc_extent_map(void) | |||
62 | 62 | ||
63 | /** | 63 | /** |
64 | * free_extent_map - drop reference count of an extent_map | 64 | * free_extent_map - drop reference count of an extent_map |
65 | * @em: extent map being releasead | 65 | * @em: extent map being released |
66 | * | 66 | * |
67 | * Drops the reference out on @em by one and free the structure | 67 | * Drops the reference out on @em by one and free the structure |
68 | * if the reference count hits zero. | 68 | * if the reference count hits zero. |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 7a7d6e253cfc..62a81ee13a5f 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -248,7 +248,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, | |||
248 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | 248 | BTRFS_DATA_RELOC_TREE_OBJECTID) { |
249 | set_extent_bits(io_tree, offset, | 249 | set_extent_bits(io_tree, offset, |
250 | offset + root->sectorsize - 1, | 250 | offset + root->sectorsize - 1, |
251 | EXTENT_NODATASUM, GFP_NOFS); | 251 | EXTENT_NODATASUM); |
252 | } else { | 252 | } else { |
253 | btrfs_info(BTRFS_I(inode)->root->fs_info, | 253 | btrfs_info(BTRFS_I(inode)->root->fs_info, |
254 | "no csum found for inode %llu start %llu", | 254 | "no csum found for inode %llu start %llu", |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c98805c35bab..e0c9bd3fb02d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1596,6 +1596,13 @@ again: | |||
1596 | 1596 | ||
1597 | copied = btrfs_copy_from_user(pos, write_bytes, pages, i); | 1597 | copied = btrfs_copy_from_user(pos, write_bytes, pages, i); |
1598 | 1598 | ||
1599 | num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, | ||
1600 | reserve_bytes); | ||
1601 | dirty_sectors = round_up(copied + sector_offset, | ||
1602 | root->sectorsize); | ||
1603 | dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, | ||
1604 | dirty_sectors); | ||
1605 | |||
1599 | /* | 1606 | /* |
1600 | * if we have trouble faulting in the pages, fall | 1607 | * if we have trouble faulting in the pages, fall |
1601 | * back to one page at a time | 1608 | * back to one page at a time |
@@ -1605,6 +1612,7 @@ again: | |||
1605 | 1612 | ||
1606 | if (copied == 0) { | 1613 | if (copied == 0) { |
1607 | force_page_uptodate = true; | 1614 | force_page_uptodate = true; |
1615 | dirty_sectors = 0; | ||
1608 | dirty_pages = 0; | 1616 | dirty_pages = 0; |
1609 | } else { | 1617 | } else { |
1610 | force_page_uptodate = false; | 1618 | force_page_uptodate = false; |
@@ -1615,20 +1623,19 @@ again: | |||
1615 | /* | 1623 | /* |
1616 | * If we had a short copy we need to release the excess delaloc | 1624 | * If we had a short copy we need to release the excess delaloc |
1617 | * bytes we reserved. We need to increment outstanding_extents | 1625 | * bytes we reserved. We need to increment outstanding_extents |
1618 | * because btrfs_delalloc_release_space will decrement it, but | 1626 | * because btrfs_delalloc_release_space and |
1627 | * btrfs_delalloc_release_metadata will decrement it, but | ||
1619 | * we still have an outstanding extent for the chunk we actually | 1628 | * we still have an outstanding extent for the chunk we actually |
1620 | * managed to copy. | 1629 | * managed to copy. |
1621 | */ | 1630 | */ |
1622 | num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, | ||
1623 | reserve_bytes); | ||
1624 | dirty_sectors = round_up(copied + sector_offset, | ||
1625 | root->sectorsize); | ||
1626 | dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, | ||
1627 | dirty_sectors); | ||
1628 | |||
1629 | if (num_sectors > dirty_sectors) { | 1631 | if (num_sectors > dirty_sectors) { |
1630 | release_bytes = (write_bytes - copied) | 1632 | /* |
1631 | & ~((u64)root->sectorsize - 1); | 1633 | * we round down because we don't want to count |
1634 | * any partial blocks actually sent through the | ||
1635 | * IO machines | ||
1636 | */ | ||
1637 | release_bytes = round_down(release_bytes - copied, | ||
1638 | root->sectorsize); | ||
1632 | if (copied > 0) { | 1639 | if (copied > 0) { |
1633 | spin_lock(&BTRFS_I(inode)->lock); | 1640 | spin_lock(&BTRFS_I(inode)->lock); |
1634 | BTRFS_I(inode)->outstanding_extents++; | 1641 | BTRFS_I(inode)->outstanding_extents++; |
@@ -2022,7 +2029,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
2022 | BTRFS_I(inode)->last_trans | 2029 | BTRFS_I(inode)->last_trans |
2023 | <= root->fs_info->last_trans_committed)) { | 2030 | <= root->fs_info->last_trans_committed)) { |
2024 | /* | 2031 | /* |
2025 | * We'v had everything committed since the last time we were | 2032 | * We've had everything committed since the last time we were |
2026 | * modified so clear this flag in case it was set for whatever | 2033 | * modified so clear this flag in case it was set for whatever |
2027 | * reason, it's no longer relevant. | 2034 | * reason, it's no longer relevant. |
2028 | */ | 2035 | */ |
@@ -2370,7 +2377,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
2370 | 2377 | ||
2371 | /* Check the aligned pages after the first unaligned page, | 2378 | /* Check the aligned pages after the first unaligned page, |
2372 | * if offset != orig_start, which means the first unaligned page | 2379 | * if offset != orig_start, which means the first unaligned page |
2373 | * including serveral following pages are already in holes, | 2380 | * including several following pages are already in holes, |
2374 | * the extra check can be skipped */ | 2381 | * the extra check can be skipped */ |
2375 | if (offset == orig_start) { | 2382 | if (offset == orig_start) { |
2376 | /* after truncate page, check hole again */ | 2383 | /* after truncate page, check hole again */ |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 5e6062c26129..c6dc1183f542 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -1983,7 +1983,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1983 | /* | 1983 | /* |
1984 | * If this block group has some small extents we don't want to | 1984 | * If this block group has some small extents we don't want to |
1985 | * use up all of our free slots in the cache with them, we want | 1985 | * use up all of our free slots in the cache with them, we want |
1986 | * to reserve them to larger extents, however if we have plent | 1986 | * to reserve them to larger extents, however if we have plenty |
1987 | * of cache left then go ahead an dadd them, no sense in adding | 1987 | * of cache left then go ahead an dadd them, no sense in adding |
1988 | * the overhead of a bitmap if we don't have to. | 1988 | * the overhead of a bitmap if we don't have to. |
1989 | */ | 1989 | */ |
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 33178c490ace..3af651c2bbc7 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
@@ -123,7 +123,7 @@ int btrfs_return_cluster_to_free_space( | |||
123 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | 123 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, |
124 | u64 *trimmed, u64 start, u64 end, u64 minlen); | 124 | u64 *trimmed, u64 start, u64 end, u64 minlen); |
125 | 125 | ||
126 | /* Support functions for runnint our sanity tests */ | 126 | /* Support functions for running our sanity tests */ |
127 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 127 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
128 | int test_add_free_space_entry(struct btrfs_block_group_cache *cache, | 128 | int test_add_free_space_entry(struct btrfs_block_group_cache *cache, |
129 | u64 offset, u64 bytes, bool bitmap); | 129 | u64 offset, u64 bytes, bool bitmap); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 91419ef79b00..270499598ed4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -455,7 +455,7 @@ again: | |||
455 | 455 | ||
456 | /* | 456 | /* |
457 | * skip compression for a small file range(<=blocksize) that | 457 | * skip compression for a small file range(<=blocksize) that |
458 | * isn't an inline extent, since it dosen't save disk space at all. | 458 | * isn't an inline extent, since it doesn't save disk space at all. |
459 | */ | 459 | */ |
460 | if (total_compressed <= blocksize && | 460 | if (total_compressed <= blocksize && |
461 | (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) | 461 | (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) |
@@ -1978,7 +1978,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, | |||
1978 | { | 1978 | { |
1979 | WARN_ON((end & (PAGE_SIZE - 1)) == 0); | 1979 | WARN_ON((end & (PAGE_SIZE - 1)) == 0); |
1980 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, | 1980 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, |
1981 | cached_state, GFP_NOFS); | 1981 | cached_state); |
1982 | } | 1982 | } |
1983 | 1983 | ||
1984 | /* see btrfs_writepage_start_hook for details on why this is required */ | 1984 | /* see btrfs_writepage_start_hook for details on why this is required */ |
@@ -3119,8 +3119,7 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, | |||
3119 | 3119 | ||
3120 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && | 3120 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && |
3121 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { | 3121 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { |
3122 | clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, | 3122 | clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM); |
3123 | GFP_NOFS); | ||
3124 | return 0; | 3123 | return 0; |
3125 | } | 3124 | } |
3126 | 3125 | ||
@@ -3722,7 +3721,7 @@ cache_index: | |||
3722 | * and doesn't have an inode ref with the name "bar" anymore. | 3721 | * and doesn't have an inode ref with the name "bar" anymore. |
3723 | * | 3722 | * |
3724 | * Setting last_unlink_trans to last_trans is a pessimistic approach, | 3723 | * Setting last_unlink_trans to last_trans is a pessimistic approach, |
3725 | * but it guarantees correctness at the expense of ocassional full | 3724 | * but it guarantees correctness at the expense of occasional full |
3726 | * transaction commits on fsync if our inode is a directory, or if our | 3725 | * transaction commits on fsync if our inode is a directory, or if our |
3727 | * inode is not a directory, logging its parent unnecessarily. | 3726 | * inode is not a directory, logging its parent unnecessarily. |
3728 | */ | 3727 | */ |
@@ -4978,7 +4977,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) | |||
4978 | * be instantly completed which will give us extents that need | 4977 | * be instantly completed which will give us extents that need |
4979 | * to be truncated. If we fail to get an orphan inode down we | 4978 | * to be truncated. If we fail to get an orphan inode down we |
4980 | * could have left over extents that were never meant to live, | 4979 | * could have left over extents that were never meant to live, |
4981 | * so we need to garuntee from this point on that everything | 4980 | * so we need to guarantee from this point on that everything |
4982 | * will be consistent. | 4981 | * will be consistent. |
4983 | */ | 4982 | */ |
4984 | ret = btrfs_orphan_add(trans, inode); | 4983 | ret = btrfs_orphan_add(trans, inode); |
@@ -5248,7 +5247,7 @@ void btrfs_evict_inode(struct inode *inode) | |||
5248 | } | 5247 | } |
5249 | 5248 | ||
5250 | /* | 5249 | /* |
5251 | * We can't just steal from the global reserve, we need tomake | 5250 | * We can't just steal from the global reserve, we need to make |
5252 | * sure there is room to do it, if not we need to commit and try | 5251 | * sure there is room to do it, if not we need to commit and try |
5253 | * again. | 5252 | * again. |
5254 | */ | 5253 | */ |
@@ -7433,7 +7432,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, | |||
7433 | cached_state); | 7432 | cached_state); |
7434 | /* | 7433 | /* |
7435 | * We're concerned with the entire range that we're going to be | 7434 | * We're concerned with the entire range that we're going to be |
7436 | * doing DIO to, so we need to make sure theres no ordered | 7435 | * doing DIO to, so we need to make sure there's no ordered |
7437 | * extents in this range. | 7436 | * extents in this range. |
7438 | */ | 7437 | */ |
7439 | ordered = btrfs_lookup_ordered_range(inode, lockstart, | 7438 | ordered = btrfs_lookup_ordered_range(inode, lockstart, |
@@ -7595,7 +7594,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7595 | if (current->journal_info) { | 7594 | if (current->journal_info) { |
7596 | /* | 7595 | /* |
7597 | * Need to pull our outstanding extents and set journal_info to NULL so | 7596 | * Need to pull our outstanding extents and set journal_info to NULL so |
7598 | * that anything that needs to check if there's a transction doesn't get | 7597 | * that anything that needs to check if there's a transaction doesn't get |
7599 | * confused. | 7598 | * confused. |
7600 | */ | 7599 | */ |
7601 | dio_data = current->journal_info; | 7600 | dio_data = current->journal_info; |
@@ -7628,7 +7627,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7628 | * decompress it, so there will be buffering required no matter what we | 7627 | * decompress it, so there will be buffering required no matter what we |
7629 | * do, so go ahead and fallback to buffered. | 7628 | * do, so go ahead and fallback to buffered. |
7630 | * | 7629 | * |
7631 | * We return -ENOTBLK because thats what makes DIO go ahead and go back | 7630 | * We return -ENOTBLK because that's what makes DIO go ahead and go back |
7632 | * to buffered IO. Don't blame me, this is the price we pay for using | 7631 | * to buffered IO. Don't blame me, this is the price we pay for using |
7633 | * the generic code. | 7632 | * the generic code. |
7634 | */ | 7633 | */ |
@@ -9041,7 +9040,7 @@ static int btrfs_truncate(struct inode *inode) | |||
9041 | return ret; | 9040 | return ret; |
9042 | 9041 | ||
9043 | /* | 9042 | /* |
9044 | * Yes ladies and gentelment, this is indeed ugly. The fact is we have | 9043 | * Yes ladies and gentlemen, this is indeed ugly. The fact is we have |
9045 | * 3 things going on here | 9044 | * 3 things going on here |
9046 | * | 9045 | * |
9047 | * 1) We need to reserve space for our orphan item and the space to | 9046 | * 1) We need to reserve space for our orphan item and the space to |
@@ -9055,15 +9054,15 @@ static int btrfs_truncate(struct inode *inode) | |||
9055 | * space reserved in case it uses space during the truncate (thank you | 9054 | * space reserved in case it uses space during the truncate (thank you |
9056 | * very much snapshotting). | 9055 | * very much snapshotting). |
9057 | * | 9056 | * |
9058 | * And we need these to all be seperate. The fact is we can use alot of | 9057 | * And we need these to all be separate. The fact is we can use a lot of |
9059 | * space doing the truncate, and we have no earthly idea how much space | 9058 | * space doing the truncate, and we have no earthly idea how much space |
9060 | * we will use, so we need the truncate reservation to be seperate so it | 9059 | * we will use, so we need the truncate reservation to be separate so it |
9061 | * doesn't end up using space reserved for updating the inode or | 9060 | * doesn't end up using space reserved for updating the inode or |
9062 | * removing the orphan item. We also need to be able to stop the | 9061 | * removing the orphan item. We also need to be able to stop the |
9063 | * transaction and start a new one, which means we need to be able to | 9062 | * transaction and start a new one, which means we need to be able to |
9064 | * update the inode several times, and we have no idea of knowing how | 9063 | * update the inode several times, and we have no idea of knowing how |
9065 | * many times that will be, so we can't just reserve 1 item for the | 9064 | * many times that will be, so we can't just reserve 1 item for the |
9066 | * entirety of the opration, so that has to be done seperately as well. | 9065 | * entirety of the operation, so that has to be done separately as well. |
9067 | * Then there is the orphan item, which does indeed need to be held on | 9066 | * Then there is the orphan item, which does indeed need to be held on |
9068 | * to for the whole operation, and we need nobody to touch this reserved | 9067 | * to for the whole operation, and we need nobody to touch this reserved |
9069 | * space except the orphan code. | 9068 | * space except the orphan code. |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4e700694b741..001c111e5627 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -296,7 +296,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
296 | } | 296 | } |
297 | } else { | 297 | } else { |
298 | /* | 298 | /* |
299 | * Revert back under same assuptions as above | 299 | * Revert back under same assumptions as above |
300 | */ | 300 | */ |
301 | if (S_ISREG(mode)) { | 301 | if (S_ISREG(mode)) { |
302 | if (inode->i_size == 0) | 302 | if (inode->i_size == 0) |
@@ -465,7 +465,7 @@ static noinline int create_subvol(struct inode *dir, | |||
465 | 465 | ||
466 | /* | 466 | /* |
467 | * Don't create subvolume whose level is not zero. Or qgroup will be | 467 | * Don't create subvolume whose level is not zero. Or qgroup will be |
468 | * screwed up since it assume subvolme qgroup's level to be 0. | 468 | * screwed up since it assumes subvolume qgroup's level to be 0. |
469 | */ | 469 | */ |
470 | if (btrfs_qgroup_level(objectid)) { | 470 | if (btrfs_qgroup_level(objectid)) { |
471 | ret = -ENOSPC; | 471 | ret = -ENOSPC; |
@@ -780,7 +780,7 @@ free_pending: | |||
780 | * a. be owner of dir, or | 780 | * a. be owner of dir, or |
781 | * b. be owner of victim, or | 781 | * b. be owner of victim, or |
782 | * c. have CAP_FOWNER capability | 782 | * c. have CAP_FOWNER capability |
783 | * 6. If the victim is append-only or immutable we can't do antyhing with | 783 | * 6. If the victim is append-only or immutable we can't do anything with |
784 | * links pointing to it. | 784 | * links pointing to it. |
785 | * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. | 785 | * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. |
786 | * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. | 786 | * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. |
@@ -1239,7 +1239,7 @@ again: | |||
1239 | 1239 | ||
1240 | 1240 | ||
1241 | set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, | 1241 | set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, |
1242 | &cached_state, GFP_NOFS); | 1242 | &cached_state); |
1243 | 1243 | ||
1244 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | 1244 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, |
1245 | page_start, page_end - 1, &cached_state, | 1245 | page_start, page_end - 1, &cached_state, |
@@ -4654,7 +4654,7 @@ again: | |||
4654 | } | 4654 | } |
4655 | 4655 | ||
4656 | /* | 4656 | /* |
4657 | * mut. excl. ops lock is locked. Three possibilites: | 4657 | * mut. excl. ops lock is locked. Three possibilities: |
4658 | * (1) some other op is running | 4658 | * (1) some other op is running |
4659 | * (2) balance is running | 4659 | * (2) balance is running |
4660 | * (3) balance is paused -- special case (think resume) | 4660 | * (3) balance is paused -- special case (think resume) |
@@ -5571,7 +5571,7 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
5571 | ret = btrfs_sync_fs(file_inode(file)->i_sb, 1); | 5571 | ret = btrfs_sync_fs(file_inode(file)->i_sb, 1); |
5572 | /* | 5572 | /* |
5573 | * The transaction thread may want to do more work, | 5573 | * The transaction thread may want to do more work, |
5574 | * namely it pokes the cleaner ktread that will start | 5574 | * namely it pokes the cleaner kthread that will start |
5575 | * processing uncleaned subvols. | 5575 | * processing uncleaned subvols. |
5576 | */ | 5576 | */ |
5577 | wake_up_process(root->fs_info->transaction_kthread); | 5577 | wake_up_process(root->fs_info->transaction_kthread); |
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 8ef12623d65c..2049c9be85ee 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
@@ -58,7 +58,7 @@ struct btrfs_ordered_sum { | |||
58 | 58 | ||
59 | #define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */ | 59 | #define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */ |
60 | 60 | ||
61 | #define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ | 61 | #define BTRFS_ORDERED_PREALLOC 4 /* set when writing to preallocated extent */ |
62 | 62 | ||
63 | #define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */ | 63 | #define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */ |
64 | 64 | ||
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 9e119552ed32..9d4c05b14f6e 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -85,7 +85,7 @@ struct btrfs_qgroup { | |||
85 | 85 | ||
86 | /* | 86 | /* |
87 | * temp variables for accounting operations | 87 | * temp variables for accounting operations |
88 | * Refer to qgroup_shared_accouting() for details. | 88 | * Refer to qgroup_shared_accounting() for details. |
89 | */ | 89 | */ |
90 | u64 old_refcnt; | 90 | u64 old_refcnt; |
91 | u64 new_refcnt; | 91 | u64 new_refcnt; |
@@ -499,7 +499,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) | |||
499 | } | 499 | } |
500 | /* | 500 | /* |
501 | * we call btrfs_free_qgroup_config() when umounting | 501 | * we call btrfs_free_qgroup_config() when umounting |
502 | * filesystem and disabling quota, so we set qgroup_ulit | 502 | * filesystem and disabling quota, so we set qgroup_ulist |
503 | * to be null here to avoid double free. | 503 | * to be null here to avoid double free. |
504 | */ | 504 | */ |
505 | ulist_free(fs_info->qgroup_ulist); | 505 | ulist_free(fs_info->qgroup_ulist); |
@@ -1036,7 +1036,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info, | |||
1036 | 1036 | ||
1037 | /* | 1037 | /* |
1038 | * The easy accounting, if we are adding/removing the only ref for an extent | 1038 | * The easy accounting, if we are adding/removing the only ref for an extent |
1039 | * then this qgroup and all of the parent qgroups get their refrence and | 1039 | * then this qgroup and all of the parent qgroups get their reference and |
1040 | * exclusive counts adjusted. | 1040 | * exclusive counts adjusted. |
1041 | * | 1041 | * |
1042 | * Caller should hold fs_info->qgroup_lock. | 1042 | * Caller should hold fs_info->qgroup_lock. |
@@ -1436,7 +1436,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans, | |||
1436 | 1436 | ||
1437 | /* | 1437 | /* |
1438 | * No need to do lock, since this function will only be called in | 1438 | * No need to do lock, since this function will only be called in |
1439 | * btrfs_commmit_transaction(). | 1439 | * btrfs_commit_transaction(). |
1440 | */ | 1440 | */ |
1441 | node = rb_first(&delayed_refs->dirty_extent_root); | 1441 | node = rb_first(&delayed_refs->dirty_extent_root); |
1442 | while (node) { | 1442 | while (node) { |
@@ -1557,7 +1557,7 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, | |||
1557 | * A: cur_old_roots < nr_old_roots (not exclusive before) | 1557 | * A: cur_old_roots < nr_old_roots (not exclusive before) |
1558 | * !A: cur_old_roots == nr_old_roots (possible exclusive before) | 1558 | * !A: cur_old_roots == nr_old_roots (possible exclusive before) |
1559 | * B: cur_new_roots < nr_new_roots (not exclusive now) | 1559 | * B: cur_new_roots < nr_new_roots (not exclusive now) |
1560 | * !B: cur_new_roots == nr_new_roots (possible exclsuive now) | 1560 | * !B: cur_new_roots == nr_new_roots (possible exclusive now) |
1561 | * | 1561 | * |
1562 | * Results: | 1562 | * Results: |
1563 | * +: Possible sharing -> exclusive -: Possible exclusive -> sharing | 1563 | * +: Possible sharing -> exclusive -: Possible exclusive -> sharing |
@@ -1851,7 +1851,7 @@ out: | |||
1851 | } | 1851 | } |
1852 | 1852 | ||
1853 | /* | 1853 | /* |
1854 | * Copy the acounting information between qgroups. This is necessary | 1854 | * Copy the accounting information between qgroups. This is necessary |
1855 | * when a snapshot or a subvolume is created. Throwing an error will | 1855 | * when a snapshot or a subvolume is created. Throwing an error will |
1856 | * cause a transaction abort so we take extra care here to only error | 1856 | * cause a transaction abort so we take extra care here to only error |
1857 | * when a readonly fs is a reasonable outcome. | 1857 | * when a readonly fs is a reasonable outcome. |
@@ -2340,7 +2340,7 @@ out: | |||
2340 | mutex_unlock(&fs_info->qgroup_rescan_lock); | 2340 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
2341 | 2341 | ||
2342 | /* | 2342 | /* |
2343 | * only update status, since the previous part has alreay updated the | 2343 | * only update status, since the previous part has already updated the |
2344 | * qgroup info. | 2344 | * qgroup info. |
2345 | */ | 2345 | */ |
2346 | trans = btrfs_start_transaction(fs_info->quota_root, 1); | 2346 | trans = btrfs_start_transaction(fs_info->quota_root, 1); |
@@ -2542,8 +2542,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len) | |||
2542 | changeset.bytes_changed = 0; | 2542 | changeset.bytes_changed = 0; |
2543 | changeset.range_changed = ulist_alloc(GFP_NOFS); | 2543 | changeset.range_changed = ulist_alloc(GFP_NOFS); |
2544 | ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, | 2544 | ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, |
2545 | start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS, | 2545 | start + len -1, EXTENT_QGROUP_RESERVED, &changeset); |
2546 | &changeset); | ||
2547 | trace_btrfs_qgroup_reserve_data(inode, start, len, | 2546 | trace_btrfs_qgroup_reserve_data(inode, start, len, |
2548 | changeset.bytes_changed, | 2547 | changeset.bytes_changed, |
2549 | QGROUP_RESERVE); | 2548 | QGROUP_RESERVE); |
@@ -2580,8 +2579,7 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len, | |||
2580 | return -ENOMEM; | 2579 | return -ENOMEM; |
2581 | 2580 | ||
2582 | ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, | 2581 | ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, |
2583 | start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS, | 2582 | start + len -1, EXTENT_QGROUP_RESERVED, &changeset); |
2584 | &changeset); | ||
2585 | if (ret < 0) | 2583 | if (ret < 0) |
2586 | goto out; | 2584 | goto out; |
2587 | 2585 | ||
@@ -2672,7 +2670,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) | |||
2672 | } | 2670 | } |
2673 | 2671 | ||
2674 | /* | 2672 | /* |
2675 | * Check qgroup reserved space leaking, normally at destory inode | 2673 | * Check qgroup reserved space leaking, normally at destroy inode |
2676 | * time | 2674 | * time |
2677 | */ | 2675 | */ |
2678 | void btrfs_qgroup_check_reserved_leak(struct inode *inode) | 2676 | void btrfs_qgroup_check_reserved_leak(struct inode *inode) |
@@ -2688,7 +2686,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) | |||
2688 | return; | 2686 | return; |
2689 | 2687 | ||
2690 | ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | 2688 | ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, |
2691 | EXTENT_QGROUP_RESERVED, GFP_NOFS, &changeset); | 2689 | EXTENT_QGROUP_RESERVED, &changeset); |
2692 | 2690 | ||
2693 | WARN_ON(ret < 0); | 2691 | WARN_ON(ret < 0); |
2694 | if (WARN_ON(changeset.bytes_changed)) { | 2692 | if (WARN_ON(changeset.bytes_changed)) { |
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 0b7792e02dd5..f8b6d411a034 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
@@ -576,7 +576,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, | |||
576 | * we can't merge with cached rbios, since the | 576 | * we can't merge with cached rbios, since the |
577 | * idea is that when we merge the destination | 577 | * idea is that when we merge the destination |
578 | * rbio is going to run our IO for us. We can | 578 | * rbio is going to run our IO for us. We can |
579 | * steal from cached rbio's though, other functions | 579 | * steal from cached rbios though, other functions |
580 | * handle that. | 580 | * handle that. |
581 | */ | 581 | */ |
582 | if (test_bit(RBIO_CACHE_BIT, &last->flags) || | 582 | if (test_bit(RBIO_CACHE_BIT, &last->flags) || |
@@ -2368,7 +2368,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, | |||
2368 | run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); | 2368 | run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); |
2369 | } | 2369 | } |
2370 | 2370 | ||
2371 | /* Check scrubbing pairty and repair it */ | 2371 | /* Check scrubbing parity and repair it */ |
2372 | p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); | 2372 | p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); |
2373 | parity = kmap(p); | 2373 | parity = kmap(p); |
2374 | if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) | 2374 | if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) |
@@ -2493,7 +2493,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) | |||
2493 | /* | 2493 | /* |
2494 | * Here means we got one corrupted data stripe and one | 2494 | * Here means we got one corrupted data stripe and one |
2495 | * corrupted parity on RAID6, if the corrupted parity | 2495 | * corrupted parity on RAID6, if the corrupted parity |
2496 | * is scrubbing parity, luckly, use the other one to repair | 2496 | * is scrubbing parity, luckily, use the other one to repair |
2497 | * the data, or we can not repair the data stripe. | 2497 | * the data, or we can not repair the data stripe. |
2498 | */ | 2498 | */ |
2499 | if (failp != rbio->scrubp) | 2499 | if (failp != rbio->scrubp) |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 1cfd35cfac76..0477dca154ed 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -668,8 +668,8 @@ int find_inline_backref(struct extent_buffer *leaf, int slot, | |||
668 | * roots of b-trees that reference the tree block. | 668 | * roots of b-trees that reference the tree block. |
669 | * | 669 | * |
670 | * the basic idea of this function is check backrefs of a given block | 670 | * the basic idea of this function is check backrefs of a given block |
671 | * to find upper level blocks that refernece the block, and then check | 671 | * to find upper level blocks that reference the block, and then check |
672 | * bakcrefs of these upper level blocks recursively. the recursion stop | 672 | * backrefs of these upper level blocks recursively. the recursion stop |
673 | * when tree root is reached or backrefs for the block is cached. | 673 | * when tree root is reached or backrefs for the block is cached. |
674 | * | 674 | * |
675 | * NOTE: if we find backrefs for a block are cached, we know backrefs | 675 | * NOTE: if we find backrefs for a block are cached, we know backrefs |
@@ -1160,7 +1160,7 @@ out: | |||
1160 | if (!RB_EMPTY_NODE(&upper->rb_node)) | 1160 | if (!RB_EMPTY_NODE(&upper->rb_node)) |
1161 | continue; | 1161 | continue; |
1162 | 1162 | ||
1163 | /* Add this guy's upper edges to the list to proces */ | 1163 | /* Add this guy's upper edges to the list to process */ |
1164 | list_for_each_entry(edge, &upper->upper, list[LOWER]) | 1164 | list_for_each_entry(edge, &upper->upper, list[LOWER]) |
1165 | list_add_tail(&edge->list[UPPER], &list); | 1165 | list_add_tail(&edge->list[UPPER], &list); |
1166 | if (list_empty(&upper->upper)) | 1166 | if (list_empty(&upper->upper)) |
@@ -2396,7 +2396,7 @@ again: | |||
2396 | } | 2396 | } |
2397 | 2397 | ||
2398 | /* | 2398 | /* |
2399 | * we keep the old last snapshod transid in rtranid when we | 2399 | * we keep the old last snapshot transid in rtranid when we |
2400 | * created the relocation tree. | 2400 | * created the relocation tree. |
2401 | */ | 2401 | */ |
2402 | last_snap = btrfs_root_rtransid(&reloc_root->root_item); | 2402 | last_snap = btrfs_root_rtransid(&reloc_root->root_item); |
@@ -2616,7 +2616,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans, | |||
2616 | * only one thread can access block_rsv at this point, | 2616 | * only one thread can access block_rsv at this point, |
2617 | * so we don't need hold lock to protect block_rsv. | 2617 | * so we don't need hold lock to protect block_rsv. |
2618 | * we expand more reservation size here to allow enough | 2618 | * we expand more reservation size here to allow enough |
2619 | * space for relocation and we will return eailer in | 2619 | * space for relocation and we will return earlier in |
2620 | * enospc case. | 2620 | * enospc case. |
2621 | */ | 2621 | */ |
2622 | rc->block_rsv->size = tmp + rc->extent_root->nodesize * | 2622 | rc->block_rsv->size = tmp + rc->extent_root->nodesize * |
@@ -2814,7 +2814,7 @@ static void mark_block_processed(struct reloc_control *rc, | |||
2814 | u64 bytenr, u32 blocksize) | 2814 | u64 bytenr, u32 blocksize) |
2815 | { | 2815 | { |
2816 | set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, | 2816 | set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, |
2817 | EXTENT_DIRTY, GFP_NOFS); | 2817 | EXTENT_DIRTY); |
2818 | } | 2818 | } |
2819 | 2819 | ||
2820 | static void __mark_block_processed(struct reloc_control *rc, | 2820 | static void __mark_block_processed(struct reloc_control *rc, |
@@ -3182,7 +3182,7 @@ static int relocate_file_extent_cluster(struct inode *inode, | |||
3182 | page_start + offset == cluster->boundary[nr]) { | 3182 | page_start + offset == cluster->boundary[nr]) { |
3183 | set_extent_bits(&BTRFS_I(inode)->io_tree, | 3183 | set_extent_bits(&BTRFS_I(inode)->io_tree, |
3184 | page_start, page_end, | 3184 | page_start, page_end, |
3185 | EXTENT_BOUNDARY, GFP_NOFS); | 3185 | EXTENT_BOUNDARY); |
3186 | nr++; | 3186 | nr++; |
3187 | } | 3187 | } |
3188 | 3188 | ||
@@ -4059,8 +4059,7 @@ restart: | |||
4059 | } | 4059 | } |
4060 | 4060 | ||
4061 | btrfs_release_path(path); | 4061 | btrfs_release_path(path); |
4062 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, | 4062 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); |
4063 | GFP_NOFS); | ||
4064 | 4063 | ||
4065 | if (trans) { | 4064 | if (trans) { |
4066 | btrfs_end_transaction_throttle(trans, rc->extent_root); | 4065 | btrfs_end_transaction_throttle(trans, rc->extent_root); |
@@ -4591,7 +4590,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, | |||
4591 | 4590 | ||
4592 | /* | 4591 | /* |
4593 | * called before creating snapshot. it calculates metadata reservation | 4592 | * called before creating snapshot. it calculates metadata reservation |
4594 | * requried for relocating tree blocks in the snapshot | 4593 | * required for relocating tree blocks in the snapshot |
4595 | */ | 4594 | */ |
4596 | void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, | 4595 | void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, |
4597 | u64 *bytes_to_reserve) | 4596 | u64 *bytes_to_reserve) |
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index b2b14e7115f1..f1c30861d062 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c | |||
@@ -71,9 +71,9 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot, | |||
71 | * search_key: the key to search | 71 | * search_key: the key to search |
72 | * path: the path we search | 72 | * path: the path we search |
73 | * root_item: the root item of the tree we look for | 73 | * root_item: the root item of the tree we look for |
74 | * root_key: the reak key of the tree we look for | 74 | * root_key: the root key of the tree we look for |
75 | * | 75 | * |
76 | * If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset | 76 | * If ->offset of 'search_key' is -1ULL, it means we are not sure the offset |
77 | * of the search key, just lookup the root with the highest offset for a | 77 | * of the search key, just lookup the root with the highest offset for a |
78 | * given objectid. | 78 | * given objectid. |
79 | * | 79 | * |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index fa35cdc46494..46d847f66e4b 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -745,7 +745,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) | |||
745 | * sure we read the bad mirror. | 745 | * sure we read the bad mirror. |
746 | */ | 746 | */ |
747 | ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, | 747 | ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, |
748 | EXTENT_DAMAGED, GFP_NOFS); | 748 | EXTENT_DAMAGED); |
749 | if (ret) { | 749 | if (ret) { |
750 | /* set_extent_bits should give proper error */ | 750 | /* set_extent_bits should give proper error */ |
751 | WARN_ON(ret > 0); | 751 | WARN_ON(ret > 0); |
@@ -763,7 +763,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) | |||
763 | end, EXTENT_DAMAGED, 0, NULL); | 763 | end, EXTENT_DAMAGED, 0, NULL); |
764 | if (!corrected) | 764 | if (!corrected) |
765 | clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, | 765 | clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, |
766 | EXTENT_DAMAGED, GFP_NOFS); | 766 | EXTENT_DAMAGED); |
767 | } | 767 | } |
768 | 768 | ||
769 | out: | 769 | out: |
@@ -1044,7 +1044,7 @@ nodatasum_case: | |||
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | * !is_metadata and !have_csum, this means that the data | 1046 | * !is_metadata and !have_csum, this means that the data |
1047 | * might not be COW'ed, that it might be modified | 1047 | * might not be COWed, that it might be modified |
1048 | * concurrently. The general strategy to work on the | 1048 | * concurrently. The general strategy to work on the |
1049 | * commit root does not help in the case when COW is not | 1049 | * commit root does not help in the case when COW is not |
1050 | * used. | 1050 | * used. |
@@ -1125,7 +1125,7 @@ nodatasum_case: | |||
1125 | * the 2nd page of mirror #1 faces I/O errors, and the 2nd page | 1125 | * the 2nd page of mirror #1 faces I/O errors, and the 2nd page |
1126 | * of mirror #2 is readable but the final checksum test fails, | 1126 | * of mirror #2 is readable but the final checksum test fails, |
1127 | * then the 2nd page of mirror #3 could be tried, whether now | 1127 | * then the 2nd page of mirror #3 could be tried, whether now |
1128 | * the final checksum succeedes. But this would be a rare | 1128 | * the final checksum succeeds. But this would be a rare |
1129 | * exception and is therefore not implemented. At least it is | 1129 | * exception and is therefore not implemented. At least it is |
1130 | * avoided that the good copy is overwritten. | 1130 | * avoided that the good copy is overwritten. |
1131 | * A more useful improvement would be to pick the sectors | 1131 | * A more useful improvement would be to pick the sectors |
@@ -2181,7 +2181,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) | |||
2181 | struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; | 2181 | struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; |
2182 | u64 length = sblock->page_count * PAGE_SIZE; | 2182 | u64 length = sblock->page_count * PAGE_SIZE; |
2183 | u64 logical = sblock->pagev[0]->logical; | 2183 | u64 logical = sblock->pagev[0]->logical; |
2184 | struct btrfs_bio *bbio; | 2184 | struct btrfs_bio *bbio = NULL; |
2185 | struct bio *bio; | 2185 | struct bio *bio; |
2186 | struct btrfs_raid_bio *rbio; | 2186 | struct btrfs_raid_bio *rbio; |
2187 | int ret; | 2187 | int ret; |
@@ -2982,6 +2982,7 @@ again: | |||
2982 | extent_len); | 2982 | extent_len); |
2983 | 2983 | ||
2984 | mapped_length = extent_len; | 2984 | mapped_length = extent_len; |
2985 | bbio = NULL; | ||
2985 | ret = btrfs_map_block(fs_info, READ, extent_logical, | 2986 | ret = btrfs_map_block(fs_info, READ, extent_logical, |
2986 | &mapped_length, &bbio, 0); | 2987 | &mapped_length, &bbio, 0); |
2987 | if (!ret) { | 2988 | if (!ret) { |
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 6a8c86074aa4..b71dd298385c 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -1831,7 +1831,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, | |||
1831 | 1831 | ||
1832 | /* | 1832 | /* |
1833 | * If we have a parent root we need to verify that the parent dir was | 1833 | * If we have a parent root we need to verify that the parent dir was |
1834 | * not delted and then re-created, if it was then we have no overwrite | 1834 | * not deleted and then re-created, if it was then we have no overwrite |
1835 | * and we can just unlink this entry. | 1835 | * and we can just unlink this entry. |
1836 | */ | 1836 | */ |
1837 | if (sctx->parent_root) { | 1837 | if (sctx->parent_root) { |
@@ -4192,9 +4192,9 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, | |||
4192 | return -ENOMEM; | 4192 | return -ENOMEM; |
4193 | 4193 | ||
4194 | /* | 4194 | /* |
4195 | * This hack is needed because empty acl's are stored as zero byte | 4195 | * This hack is needed because empty acls are stored as zero byte |
4196 | * data in xattrs. Problem with that is, that receiving these zero byte | 4196 | * data in xattrs. Problem with that is, that receiving these zero byte |
4197 | * acl's will fail later. To fix this, we send a dummy acl list that | 4197 | * acls will fail later. To fix this, we send a dummy acl list that |
4198 | * only contains the version number and no entries. | 4198 | * only contains the version number and no entries. |
4199 | */ | 4199 | */ |
4200 | if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || | 4200 | if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || |
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index e05619f241be..875c757e73e2 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c | |||
@@ -36,7 +36,7 @@ static inline void put_unaligned_le8(u8 val, void *p) | |||
36 | * | 36 | * |
37 | * The end result is that anyone who #includes ctree.h gets a | 37 | * The end result is that anyone who #includes ctree.h gets a |
38 | * declaration for the btrfs_set_foo functions and btrfs_foo functions, | 38 | * declaration for the btrfs_set_foo functions and btrfs_foo functions, |
39 | * which are wappers of btrfs_set_token_#bits functions and | 39 | * which are wrappers of btrfs_set_token_#bits functions and |
40 | * btrfs_get_token_#bits functions, which are defined in this file. | 40 | * btrfs_get_token_#bits functions, which are defined in this file. |
41 | * | 41 | * |
42 | * These setget functions do all the extent_buffer related mapping | 42 | * These setget functions do all the extent_buffer related mapping |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index bf71071ab6f6..4e59a91a11e0 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -112,7 +112,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info) | |||
112 | * Note that a running device replace operation is not | 112 | * Note that a running device replace operation is not |
113 | * canceled here although there is no way to update | 113 | * canceled here although there is no way to update |
114 | * the progress. It would add the risk of a deadlock, | 114 | * the progress. It would add the risk of a deadlock, |
115 | * therefore the canceling is ommited. The only penalty | 115 | * therefore the canceling is omitted. The only penalty |
116 | * is that some I/O remains active until the procedure | 116 | * is that some I/O remains active until the procedure |
117 | * completes. The next time when the filesystem is | 117 | * completes. The next time when the filesystem is |
118 | * mounted writeable again, the device replace | 118 | * mounted writeable again, the device replace |
@@ -1877,7 +1877,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1877 | int ret; | 1877 | int ret; |
1878 | 1878 | ||
1879 | /* | 1879 | /* |
1880 | * We aren't under the device list lock, so this is racey-ish, but good | 1880 | * We aren't under the device list lock, so this is racy-ish, but good |
1881 | * enough for our purposes. | 1881 | * enough for our purposes. |
1882 | */ | 1882 | */ |
1883 | nr_devices = fs_info->fs_devices->open_devices; | 1883 | nr_devices = fs_info->fs_devices->open_devices; |
@@ -1896,7 +1896,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1896 | if (!devices_info) | 1896 | if (!devices_info) |
1897 | return -ENOMEM; | 1897 | return -ENOMEM; |
1898 | 1898 | ||
1899 | /* calc min stripe number for data space alloction */ | 1899 | /* calc min stripe number for data space allocation */ |
1900 | type = btrfs_get_alloc_profile(root, 1); | 1900 | type = btrfs_get_alloc_profile(root, 1); |
1901 | if (type & BTRFS_BLOCK_GROUP_RAID0) { | 1901 | if (type & BTRFS_BLOCK_GROUP_RAID0) { |
1902 | min_stripes = 2; | 1902 | min_stripes = 2; |
@@ -1932,7 +1932,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1932 | avail_space *= BTRFS_STRIPE_LEN; | 1932 | avail_space *= BTRFS_STRIPE_LEN; |
1933 | 1933 | ||
1934 | /* | 1934 | /* |
1935 | * In order to avoid overwritting the superblock on the drive, | 1935 | * In order to avoid overwriting the superblock on the drive, |
1936 | * btrfs starts at an offset of at least 1MB when doing chunk | 1936 | * btrfs starts at an offset of at least 1MB when doing chunk |
1937 | * allocation. | 1937 | * allocation. |
1938 | */ | 1938 | */ |
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 70948b13bc81..55724607f79b 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c | |||
@@ -113,7 +113,7 @@ static int test_find_delalloc(void) | |||
113 | * |--- delalloc ---| | 113 | * |--- delalloc ---| |
114 | * |--- search ---| | 114 | * |--- search ---| |
115 | */ | 115 | */ |
116 | set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL); | 116 | set_extent_delalloc(&tmp, 0, 4095, NULL); |
117 | start = 0; | 117 | start = 0; |
118 | end = 0; | 118 | end = 0; |
119 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 119 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
@@ -144,7 +144,7 @@ static int test_find_delalloc(void) | |||
144 | test_msg("Couldn't find the locked page\n"); | 144 | test_msg("Couldn't find the locked page\n"); |
145 | goto out_bits; | 145 | goto out_bits; |
146 | } | 146 | } |
147 | set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL); | 147 | set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL); |
148 | start = test_start; | 148 | start = test_start; |
149 | end = 0; | 149 | end = 0; |
150 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 150 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
@@ -176,7 +176,7 @@ static int test_find_delalloc(void) | |||
176 | locked_page = find_lock_page(inode->i_mapping, test_start >> | 176 | locked_page = find_lock_page(inode->i_mapping, test_start >> |
177 | PAGE_SHIFT); | 177 | PAGE_SHIFT); |
178 | if (!locked_page) { | 178 | if (!locked_page) { |
179 | test_msg("Could'nt find the locked page\n"); | 179 | test_msg("Couldn't find the locked page\n"); |
180 | goto out_bits; | 180 | goto out_bits; |
181 | } | 181 | } |
182 | start = test_start; | 182 | start = test_start; |
@@ -199,7 +199,7 @@ static int test_find_delalloc(void) | |||
199 | * | 199 | * |
200 | * We are re-using our test_start from above since it works out well. | 200 | * We are re-using our test_start from above since it works out well. |
201 | */ | 201 | */ |
202 | set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL); | 202 | set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL); |
203 | start = test_start; | 203 | start = test_start; |
204 | end = 0; | 204 | end = 0; |
205 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 205 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
@@ -262,7 +262,7 @@ static int test_find_delalloc(void) | |||
262 | } | 262 | } |
263 | ret = 0; | 263 | ret = 0; |
264 | out_bits: | 264 | out_bits: |
265 | clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL); | 265 | clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1); |
266 | out: | 266 | out: |
267 | if (locked_page) | 267 | if (locked_page) |
268 | put_page(locked_page); | 268 | put_page(locked_page); |
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index 514247515312..0eeb8f3d6b67 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #define BITS_PER_BITMAP (PAGE_SIZE * 8) | 25 | #define BITS_PER_BITMAP (PAGE_SIZE * 8) |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * This test just does basic sanity checking, making sure we can add an exten | 28 | * This test just does basic sanity checking, making sure we can add an extent |
29 | * entry and remove space from either end and the middle, and make sure we can | 29 | * entry and remove space from either end and the middle, and make sure we can |
30 | * remove space that covers adjacent extent entries. | 30 | * remove space that covers adjacent extent entries. |
31 | */ | 31 | */ |
@@ -396,8 +396,9 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache) | |||
396 | * wasn't optimal as they could be spread all over the block group while under | 396 | * wasn't optimal as they could be spread all over the block group while under |
397 | * concurrency (extra overhead and fragmentation). | 397 | * concurrency (extra overhead and fragmentation). |
398 | * | 398 | * |
399 | * This stealing approach is benefical, since we always prefer to allocate from | 399 | * This stealing approach is beneficial, since we always prefer to allocate |
400 | * extent entries, both for clustered and non-clustered allocation requests. | 400 | * from extent entries, both for clustered and non-clustered allocation |
401 | * requests. | ||
401 | */ | 402 | */ |
402 | static int | 403 | static int |
403 | test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | 404 | test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) |
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 863a6a3af1f8..8a25fe8b7c45 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c | |||
@@ -264,7 +264,7 @@ static noinline int test_btrfs_get_extent(void) | |||
264 | 264 | ||
265 | /* | 265 | /* |
266 | * We will just free a dummy node if it's ref count is 2 so we need an | 266 | * We will just free a dummy node if it's ref count is 2 so we need an |
267 | * extra ref so our searches don't accidently release our page. | 267 | * extra ref so our searches don't accidentally release our page. |
268 | */ | 268 | */ |
269 | extent_buffer_get(root->node); | 269 | extent_buffer_get(root->node); |
270 | btrfs_set_header_nritems(root->node, 0); | 270 | btrfs_set_header_nritems(root->node, 0); |
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index 8ea5d34bc5a2..8aa4ded31326 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c | |||
@@ -234,7 +234,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
234 | } | 234 | } |
235 | 235 | ||
236 | /* | 236 | /* |
237 | * Since the test trans doesn't havee the complicated delayed refs, | 237 | * Since the test trans doesn't have the complicated delayed refs, |
238 | * we can only call btrfs_qgroup_account_extent() directly to test | 238 | * we can only call btrfs_qgroup_account_extent() directly to test |
239 | * quota. | 239 | * quota. |
240 | */ | 240 | */ |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 5b0b758a3f79..f6e24cb423ae 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -944,7 +944,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root, | |||
944 | 944 | ||
945 | err = convert_extent_bit(dirty_pages, start, end, | 945 | err = convert_extent_bit(dirty_pages, start, end, |
946 | EXTENT_NEED_WAIT, | 946 | EXTENT_NEED_WAIT, |
947 | mark, &cached_state, GFP_NOFS); | 947 | mark, &cached_state); |
948 | /* | 948 | /* |
949 | * convert_extent_bit can return -ENOMEM, which is most of the | 949 | * convert_extent_bit can return -ENOMEM, which is most of the |
950 | * time a temporary error. So when it happens, ignore the error | 950 | * time a temporary error. So when it happens, ignore the error |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 72be51f7ca2f..9fe0ec2bf0fe 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
@@ -144,7 +144,7 @@ struct btrfs_pending_snapshot { | |||
144 | /* block reservation for the operation */ | 144 | /* block reservation for the operation */ |
145 | struct btrfs_block_rsv block_rsv; | 145 | struct btrfs_block_rsv block_rsv; |
146 | u64 qgroup_reserved; | 146 | u64 qgroup_reserved; |
147 | /* extra metadata reseration for relocation */ | 147 | /* extra metadata reservation for relocation */ |
148 | int error; | 148 | int error; |
149 | bool readonly; | 149 | bool readonly; |
150 | struct list_head list; | 150 | struct list_head list; |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 8aaca5c6af94..b7665af471d8 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -2330,7 +2330,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, | |||
2330 | break; | 2330 | break; |
2331 | 2331 | ||
2332 | /* for regular files, make sure corresponding | 2332 | /* for regular files, make sure corresponding |
2333 | * orhpan item exist. extents past the new EOF | 2333 | * orphan item exist. extents past the new EOF |
2334 | * will be truncated later by orphan cleanup. | 2334 | * will be truncated later by orphan cleanup. |
2335 | */ | 2335 | */ |
2336 | if (S_ISREG(mode)) { | 2336 | if (S_ISREG(mode)) { |
@@ -3001,7 +3001,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans, | |||
3001 | break; | 3001 | break; |
3002 | 3002 | ||
3003 | clear_extent_bits(&log->dirty_log_pages, start, end, | 3003 | clear_extent_bits(&log->dirty_log_pages, start, end, |
3004 | EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); | 3004 | EXTENT_DIRTY | EXTENT_NEW); |
3005 | } | 3005 | } |
3006 | 3006 | ||
3007 | /* | 3007 | /* |
@@ -4914,7 +4914,7 @@ out_unlock: | |||
4914 | * the actual unlink operation, so if we do this check before a concurrent task | 4914 | * the actual unlink operation, so if we do this check before a concurrent task |
4915 | * sets last_unlink_trans it means we've logged a consistent version/state of | 4915 | * sets last_unlink_trans it means we've logged a consistent version/state of |
4916 | * all the inode items, otherwise we are not sure and must do a transaction | 4916 | * all the inode items, otherwise we are not sure and must do a transaction |
4917 | * commit (the concurrent task migth have only updated last_unlink_trans before | 4917 | * commit (the concurrent task might have only updated last_unlink_trans before |
4918 | * we logged the inode or it might have also done the unlink). | 4918 | * we logged the inode or it might have also done the unlink). |
4919 | */ | 4919 | */ |
4920 | static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, | 4920 | static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, |
@@ -4973,7 +4973,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, | |||
4973 | while (1) { | 4973 | while (1) { |
4974 | /* | 4974 | /* |
4975 | * If we are logging a directory then we start with our inode, | 4975 | * If we are logging a directory then we start with our inode, |
4976 | * not our parents inode, so we need to skipp setting the | 4976 | * not our parent's inode, so we need to skip setting the |
4977 | * logged_trans so that further down in the log code we don't | 4977 | * logged_trans so that further down in the log code we don't |
4978 | * think this inode has already been logged. | 4978 | * think this inode has already been logged. |
4979 | */ | 4979 | */ |
@@ -5357,7 +5357,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, | |||
5357 | log_dentries = true; | 5357 | log_dentries = true; |
5358 | 5358 | ||
5359 | /* | 5359 | /* |
5360 | * On unlink we must make sure all our current and old parent directores | 5360 | * On unlink we must make sure all our current and old parent directory |
5361 | * inodes are fully logged. This is to prevent leaving dangling | 5361 | * inodes are fully logged. This is to prevent leaving dangling |
5362 | * directory index entries in directories that were our parents but are | 5362 | * directory index entries in directories that were our parents but are |
5363 | * not anymore. Not doing this results in old parent directory being | 5363 | * not anymore. Not doing this results in old parent directory being |
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c index 91feb2bdefee..b1434bb57e36 100644 --- a/fs/btrfs/ulist.c +++ b/fs/btrfs/ulist.c | |||
@@ -28,7 +28,7 @@ | |||
28 | * } | 28 | * } |
29 | * ulist_free(ulist); | 29 | * ulist_free(ulist); |
30 | * | 30 | * |
31 | * This assumes the graph nodes are adressable by u64. This stems from the | 31 | * This assumes the graph nodes are addressable by u64. This stems from the |
32 | * usage for tree enumeration in btrfs, where the logical addresses are | 32 | * usage for tree enumeration in btrfs, where the logical addresses are |
33 | * 64 bit. | 33 | * 64 bit. |
34 | * | 34 | * |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2b88127bba5b..bdc62561ede8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -2190,7 +2190,7 @@ static int btrfs_prepare_sprout(struct btrfs_root *root) | |||
2190 | } | 2190 | } |
2191 | 2191 | ||
2192 | /* | 2192 | /* |
2193 | * strore the expected generation for seed devices in device items. | 2193 | * Store the expected generation for seed devices in device items. |
2194 | */ | 2194 | */ |
2195 | static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, | 2195 | static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, |
2196 | struct btrfs_root *root) | 2196 | struct btrfs_root *root) |
@@ -3387,7 +3387,7 @@ static int should_balance_chunk(struct btrfs_root *root, | |||
3387 | } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { | 3387 | } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { |
3388 | /* | 3388 | /* |
3389 | * Same logic as the 'limit' filter; the minimum cannot be | 3389 | * Same logic as the 'limit' filter; the minimum cannot be |
3390 | * determined here because we do not have the global informatoin | 3390 | * determined here because we do not have the global information |
3391 | * about the count of all chunks that satisfy the filters. | 3391 | * about the count of all chunks that satisfy the filters. |
3392 | */ | 3392 | */ |
3393 | if (bargs->limit_max == 0) | 3393 | if (bargs->limit_max == 0) |
@@ -6076,7 +6076,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) | |||
6076 | { | 6076 | { |
6077 | atomic_inc(&bbio->error); | 6077 | atomic_inc(&bbio->error); |
6078 | if (atomic_dec_and_test(&bbio->stripes_pending)) { | 6078 | if (atomic_dec_and_test(&bbio->stripes_pending)) { |
6079 | /* Shoud be the original bio. */ | 6079 | /* Should be the original bio. */ |
6080 | WARN_ON(bio != bbio->orig_bio); | 6080 | WARN_ON(bio != bbio->orig_bio); |
6081 | 6081 | ||
6082 | btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; | 6082 | btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; |
@@ -6560,7 +6560,7 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
6560 | set_extent_buffer_uptodate(sb); | 6560 | set_extent_buffer_uptodate(sb); |
6561 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); | 6561 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); |
6562 | /* | 6562 | /* |
6563 | * The sb extent buffer is artifical and just used to read the system array. | 6563 | * The sb extent buffer is artificial and just used to read the system array. |
6564 | * set_extent_buffer_uptodate() call does not properly mark all it's | 6564 | * set_extent_buffer_uptodate() call does not properly mark all it's |
6565 | * pages up-to-date when the page is larger: extent does not cover the | 6565 | * pages up-to-date when the page is larger: extent does not cover the |
6566 | * whole page and consequently check_page_uptodate does not find all | 6566 | * whole page and consequently check_page_uptodate does not find all |
@@ -6630,13 +6630,13 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
6630 | sb_array_offset += len; | 6630 | sb_array_offset += len; |
6631 | cur_offset += len; | 6631 | cur_offset += len; |
6632 | } | 6632 | } |
6633 | free_extent_buffer(sb); | 6633 | free_extent_buffer_stale(sb); |
6634 | return ret; | 6634 | return ret; |
6635 | 6635 | ||
6636 | out_short_read: | 6636 | out_short_read: |
6637 | printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", | 6637 | printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", |
6638 | len, cur_offset); | 6638 | len, cur_offset); |
6639 | free_extent_buffer(sb); | 6639 | free_extent_buffer_stale(sb); |
6640 | return -EIO; | 6640 | return -EIO; |
6641 | } | 6641 | } |
6642 | 6642 | ||