summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/check-integrity.c2
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/delayed-ref.h2
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent-tree.c32
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file.c4
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode.c22
-rw-r--r--fs/btrfs/ioctl.c10
-rw-r--r--fs/btrfs/ordered-data.h2
-rw-r--r--fs/btrfs/qgroup.c16
-rw-r--r--fs/btrfs/raid56.c6
-rw-r--r--fs/btrfs/relocation.c12
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/struct-funcs.c2
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/tests/extent-io-tests.c2
-rw-r--r--fs/btrfs/tests/free-space-tests.c7
-rw-r--r--fs/btrfs/tests/inode-tests.c2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c2
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c8
-rw-r--r--fs/btrfs/ulist.c2
-rw-r--r--fs/btrfs/volumes.c8
33 files changed, 106 insertions, 105 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 80e8472d618b..b8b598740bbd 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1939,7 +1939,7 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1939 * from ipath->fspath->val[i]. 1939 * from ipath->fspath->val[i].
1940 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1940 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1941 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1941 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1942 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, 1942 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
1943 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1943 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1944 * have been needed to return all paths. 1944 * have been needed to return all paths.
1945 */ 1945 */
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 61205e3bbefa..c0a2018761f9 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -303,7 +303,7 @@ struct btrfs_dio_private {
303 struct bio *dio_bio; 303 struct bio *dio_bio;
304 304
305 /* 305 /*
306 * The original bio may be splited to several sub-bios, this is 306 * The original bio may be split to several sub-bios, this is
307 * done during endio of sub-bios 307 * done during endio of sub-bios
308 */ 308 */
309 int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int); 309 int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 516e19d1d202..b677a6ea6001 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1939,7 +1939,7 @@ again:
1939 /* 1939 /*
1940 * Clear all references of this block. Do not free 1940 * Clear all references of this block. Do not free
1941 * the block itself even if is not referenced anymore 1941 * the block itself even if is not referenced anymore
1942 * because it still carries valueable information 1942 * because it still carries valuable information
1943 * like whether it was ever written and IO completed. 1943 * like whether it was ever written and IO completed.
1944 */ 1944 */
1945 list_for_each_entry_safe(l, tmp, &block->ref_to_list, 1945 list_for_each_entry_safe(l, tmp, &block->ref_to_list,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index ec7928a27aaa..4997f175b446 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -156,7 +156,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
156 156
157 /* 157 /*
158 * RCU really hurts here, we could free up the root node because 158 * RCU really hurts here, we could free up the root node because
159 * it was cow'ed but we may not get the new root node yet so do 159 * it was COWed but we may not get the new root node yet so do
160 * the inc_not_zero dance and if it doesn't work then 160 * the inc_not_zero dance and if it doesn't work then
161 * synchronize_rcu and try again. 161 * synchronize_rcu and try again.
162 */ 162 */
@@ -955,7 +955,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
955 struct extent_buffer *buf) 955 struct extent_buffer *buf)
956{ 956{
957 /* 957 /*
958 * Tree blocks not in refernece counted trees and tree roots 958 * Tree blocks not in reference counted trees and tree roots
959 * are never shared. If a block was allocated after the last 959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation, 960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared. 961 * we know the block is not shared.
@@ -1270,7 +1270,7 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1270 1270
1271/* 1271/*
1272 * tm is a pointer to the first operation to rewind within eb. then, all 1272 * tm is a pointer to the first operation to rewind within eb. then, all
1273 * previous operations will be rewinded (until we reach something older than 1273 * previous operations will be rewound (until we reach something older than
1274 * time_seq). 1274 * time_seq).
1275 */ 1275 */
1276static void 1276static void
@@ -1345,7 +1345,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1345} 1345}
1346 1346
1347/* 1347/*
1348 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer 1348 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1349 * is returned. If rewind operations happen, a fresh buffer is returned. The 1349 * is returned. If rewind operations happen, a fresh buffer is returned. The
1350 * returned buffer is always read-locked. If the returned buffer is not the 1350 * returned buffer is always read-locked. If the returned buffer is not the
1351 * input buffer, the lock on the input buffer is released and the input buffer 1351 * input buffer, the lock on the input buffer is released and the input buffer
@@ -1516,7 +1516,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
1516 * 3) the root is not forced COW. 1516 * 3) the root is not forced COW.
1517 * 1517 *
1518 * What is forced COW: 1518 * What is forced COW:
1519 * when we create snapshot during commiting the transaction, 1519 * when we create snapshot during committing the transaction,
1520 * after we've finished coping src root, we must COW the shared 1520 * after we've finished coping src root, we must COW the shared
1521 * block to ensure the metadata consistency. 1521 * block to ensure the metadata consistency.
1522 */ 1522 */
@@ -1531,7 +1531,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
1531 1531
1532/* 1532/*
1533 * cows a single block, see __btrfs_cow_block for the real work. 1533 * cows a single block, see __btrfs_cow_block for the real work.
1534 * This version of it has extra checks so that a block isn't cow'd more than 1534 * This version of it has extra checks so that a block isn't COWed more than
1535 * once per transaction, as long as it hasn't been written yet 1535 * once per transaction, as long as it hasn't been written yet
1536 */ 1536 */
1537noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 1537noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
@@ -2986,7 +2986,7 @@ again:
2986 btrfs_unlock_up_safe(p, level + 1); 2986 btrfs_unlock_up_safe(p, level + 1);
2987 2987
2988 /* 2988 /*
2989 * Since we can unwind eb's we want to do a real search every 2989 * Since we can unwind ebs we want to do a real search every
2990 * time. 2990 * time.
2991 */ 2991 */
2992 prev_cmp = -1; 2992 prev_cmp = -1;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 84a6a5b3384a..65e7a9fa881f 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -186,7 +186,7 @@ static const int btrfs_csum_sizes[] = { 4 };
186/* four bytes for CRC32 */ 186/* four bytes for CRC32 */
187#define BTRFS_EMPTY_DIR_SIZE 0 187#define BTRFS_EMPTY_DIR_SIZE 0
188 188
189/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */ 189/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
190#define REQ_GET_READ_MIRRORS (1 << 30) 190#define REQ_GET_READ_MIRRORS (1 << 30)
191 191
192#define BTRFS_FT_UNKNOWN 0 192#define BTRFS_FT_UNKNOWN 0
@@ -1221,7 +1221,7 @@ struct btrfs_space_info {
1221 * bytes_pinned does not reflect the bytes that will be pinned once the 1221 * bytes_pinned does not reflect the bytes that will be pinned once the
1222 * delayed refs are flushed, so this counter is inc'ed every time we 1222 * delayed refs are flushed, so this counter is inc'ed every time we
1223 * call btrfs_free_extent so it is a realtime count of what will be 1223 * call btrfs_free_extent so it is a realtime count of what will be
1224 * freed once the transaction is committed. It will be zero'ed every 1224 * freed once the transaction is committed. It will be zeroed every
1225 * time the transaction commits. 1225 * time the transaction commits.
1226 */ 1226 */
1227 struct percpu_counter total_bytes_pinned; 1227 struct percpu_counter total_bytes_pinned;
@@ -2392,7 +2392,7 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
2392 token->kaddr = NULL; 2392 token->kaddr = NULL;
2393} 2393}
2394 2394
2395/* some macros to generate set/get funcs for the struct fields. This 2395/* some macros to generate set/get functions for the struct fields. This
2396 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 2396 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
2397 * one for u8: 2397 * one for u8:
2398 */ 2398 */
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index c24b653c7343..5fca9534a271 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -188,7 +188,7 @@ struct btrfs_delayed_ref_root {
188 188
189 /* 189 /*
190 * To make qgroup to skip given root. 190 * To make qgroup to skip given root.
191 * This is for snapshot, as btrfs_qgroup_inherit() will manully 191 * This is for snapshot, as btrfs_qgroup_inherit() will manually
192 * modify counters for snapshot and its source, so we should skip 192 * modify counters for snapshot and its source, so we should skip
193 * the snapshot in new_root/old_roots or it will get calculated twice 193 * the snapshot in new_root/old_roots or it will get calculated twice
194 */ 194 */
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 26bcb487f958..0584b6c2df57 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -441,7 +441,7 @@ leave:
441} 441}
442 442
443/* 443/*
444 * blocked until all flighting bios are finished. 444 * blocked until all in-flight bios operations are finished.
445 */ 445 */
446static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) 446static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
447{ 447{
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 263823f28ba4..aacc707ef952 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -384,7 +384,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
384 /* 384 /*
385 * Things reading via commit roots that don't have normal protection, 385 * Things reading via commit roots that don't have normal protection,
386 * like send, can have a really old block in cache that may point at a 386 * like send, can have a really old block in cache that may point at a
387 * block that has been free'd and re-allocated. So don't clear uptodate 387 * block that has been freed and re-allocated. So don't clear uptodate
388 * if we find an eb that is under IO (dirty/writeback) because we could 388 * if we find an eb that is under IO (dirty/writeback) because we could
389 * end up reading in the stale data and then writing it back out and 389 * end up reading in the stale data and then writing it back out and
390 * making everybody very sad. 390 * making everybody very sad.
@@ -418,7 +418,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
418 /* 418 /*
419 * The super_block structure does not span the whole 419 * The super_block structure does not span the whole
420 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space 420 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
421 * is filled with zeros and is included in the checkum. 421 * is filled with zeros and is included in the checksum.
422 */ 422 */
423 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, 423 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
424 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 424 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
@@ -600,7 +600,7 @@ static noinline int check_leaf(struct btrfs_root *root,
600 600
601 /* 601 /*
602 * Check to make sure that we don't point outside of the leaf, 602 * Check to make sure that we don't point outside of the leaf,
603 * just incase all the items are consistent to eachother, but 603 * just in case all the items are consistent to each other, but
604 * all point outside of the leaf. 604 * all point outside of the leaf.
605 */ 605 */
606 if (btrfs_item_end_nr(leaf, slot) > 606 if (btrfs_item_end_nr(leaf, slot) >
@@ -3017,7 +3017,7 @@ retry_root_backup:
3017 } 3017 }
3018 3018
3019 /* 3019 /*
3020 * Mount does not set all options immediatelly, we can do it now and do 3020 * Mount does not set all options immediately, we can do it now and do
3021 * not have to wait for transaction commit 3021 * not have to wait for transaction commit
3022 */ 3022 */
3023 btrfs_apply_pending_changes(fs_info); 3023 btrfs_apply_pending_changes(fs_info);
@@ -3245,7 +3245,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3245 btrfs_warn_rl_in_rcu(device->dev_root->fs_info, 3245 btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
3246 "lost page write due to IO error on %s", 3246 "lost page write due to IO error on %s",
3247 rcu_str_deref(device->name)); 3247 rcu_str_deref(device->name));
3248 /* note, we dont' set_buffer_write_io_error because we have 3248 /* note, we don't set_buffer_write_io_error because we have
3249 * our own ways of dealing with the IO errors 3249 * our own ways of dealing with the IO errors
3250 */ 3250 */
3251 clear_buffer_uptodate(bh); 3251 clear_buffer_uptodate(bh);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0ddc996a40e1..939a043eabfa 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -980,7 +980,7 @@ out_free:
980 * event that tree block loses its owner tree's reference and do the 980 * event that tree block loses its owner tree's reference and do the
981 * back refs conversion. 981 * back refs conversion.
982 * 982 *
983 * When a tree block is COW'd through a tree, there are four cases: 983 * When a tree block is COWed through a tree, there are four cases:
984 * 984 *
985 * The reference count of the block is one and the tree is the block's 985 * The reference count of the block is one and the tree is the block's
986 * owner tree. Nothing to do in this case. 986 * owner tree. Nothing to do in this case.
@@ -2595,7 +2595,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2595 } 2595 }
2596 2596
2597 /* 2597 /*
2598 * Need to drop our head ref lock and re-aqcuire the 2598 * Need to drop our head ref lock and re-acquire the
2599 * delayed ref lock and then re-check to make sure 2599 * delayed ref lock and then re-check to make sure
2600 * nobody got added. 2600 * nobody got added.
2601 */ 2601 */
@@ -2747,7 +2747,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2747 2747
2748 /* 2748 /*
2749 * We don't ever fill up leaves all the way so multiply by 2 just to be 2749 * We don't ever fill up leaves all the way so multiply by 2 just to be
2750 * closer to what we're really going to want to ouse. 2750 * closer to what we're really going to want to use.
2751 */ 2751 */
2752 return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root)); 2752 return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2753} 2753}
@@ -2851,7 +2851,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
2851 } 2851 }
2852 2852
2853 /* 2853 /*
2854 * trans->sync means that when we call end_transaciton, we won't 2854 * trans->sync means that when we call end_transaction, we won't
2855 * wait on delayed refs 2855 * wait on delayed refs
2856 */ 2856 */
2857 trans->sync = true; 2857 trans->sync = true;
@@ -4243,7 +4243,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4243 * Called if we need to clear a data reservation for this inode 4243 * Called if we need to clear a data reservation for this inode
4244 * Normally in a error case. 4244 * Normally in a error case.
4245 * 4245 *
4246 * This one will handle the per-indoe data rsv map for accurate reserved 4246 * This one will handle the per-inode data rsv map for accurate reserved
4247 * space framework. 4247 * space framework.
4248 */ 4248 */
4249void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len) 4249void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
@@ -4911,7 +4911,7 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
4911 * @orig_bytes - the number of bytes we want 4911 * @orig_bytes - the number of bytes we want
4912 * @flush - whether or not we can flush to make our reservation 4912 * @flush - whether or not we can flush to make our reservation
4913 * 4913 *
4914 * This will reserve orgi_bytes number of bytes from the space info associated 4914 * This will reserve orig_bytes number of bytes from the space info associated
4915 * with the block_rsv. If there is not enough space it will make an attempt to 4915 * with the block_rsv. If there is not enough space it will make an attempt to
4916 * flush out space to make room. It will do this by flushing delalloc if 4916 * flush out space to make room. It will do this by flushing delalloc if
4917 * possible or committing the transaction. If flush is 0 then no attempts to 4917 * possible or committing the transaction. If flush is 0 then no attempts to
@@ -5516,7 +5516,7 @@ void btrfs_orphan_release_metadata(struct inode *inode)
5516 * common file/directory operations, they change two fs/file trees 5516 * common file/directory operations, they change two fs/file trees
5517 * and root tree, the number of items that the qgroup reserves is 5517 * and root tree, the number of items that the qgroup reserves is
5518 * different with the free space reservation. So we can not use 5518 * different with the free space reservation. So we can not use
5519 * the space reseravtion mechanism in start_transaction(). 5519 * the space reservation mechanism in start_transaction().
5520 */ 5520 */
5521int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 5521int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5522 struct btrfs_block_rsv *rsv, 5522 struct btrfs_block_rsv *rsv,
@@ -5565,7 +5565,7 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5565/** 5565/**
5566 * drop_outstanding_extent - drop an outstanding extent 5566 * drop_outstanding_extent - drop an outstanding extent
5567 * @inode: the inode we're dropping the extent for 5567 * @inode: the inode we're dropping the extent for
5568 * @num_bytes: the number of bytes we're relaseing. 5568 * @num_bytes: the number of bytes we're releasing.
5569 * 5569 *
5570 * This is called when we are freeing up an outstanding extent, either called 5570 * This is called when we are freeing up an outstanding extent, either called
5571 * after an error or after an extent is written. This will return the number of 5571 * after an error or after an extent is written. This will return the number of
@@ -5591,7 +5591,7 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5591 drop_inode_space = 1; 5591 drop_inode_space = 1;
5592 5592
5593 /* 5593 /*
5594 * If we have more or the same amount of outsanding extents than we have 5594 * If we have more or the same amount of outstanding extents than we have
5595 * reserved then we need to leave the reserved extents count alone. 5595 * reserved then we need to leave the reserved extents count alone.
5596 */ 5596 */
5597 if (BTRFS_I(inode)->outstanding_extents >= 5597 if (BTRFS_I(inode)->outstanding_extents >=
@@ -5605,8 +5605,8 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5605} 5605}
5606 5606
5607/** 5607/**
5608 * calc_csum_metadata_size - return the amount of metada space that must be 5608 * calc_csum_metadata_size - return the amount of metadata space that must be
5609 * reserved/free'd for the given bytes. 5609 * reserved/freed for the given bytes.
5610 * @inode: the inode we're manipulating 5610 * @inode: the inode we're manipulating
5611 * @num_bytes: the number of bytes in question 5611 * @num_bytes: the number of bytes in question
5612 * @reserve: 1 if we are reserving space, 0 if we are freeing space 5612 * @reserve: 1 if we are reserving space, 0 if we are freeing space
@@ -5758,7 +5758,7 @@ out_fail:
5758 5758
5759 /* 5759 /*
5760 * This is tricky, but first we need to figure out how much we 5760 * This is tricky, but first we need to figure out how much we
5761 * free'd from any free-ers that occurred during this 5761 * freed from any free-ers that occurred during this
5762 * reservation, so we reset ->csum_bytes to the csum_bytes 5762 * reservation, so we reset ->csum_bytes to the csum_bytes
5763 * before we dropped our lock, and then call the free for the 5763 * before we dropped our lock, and then call the free for the
5764 * number of bytes that were freed while we were trying our 5764 * number of bytes that were freed while we were trying our
@@ -5780,7 +5780,7 @@ out_fail:
5780 5780
5781 /* 5781 /*
5782 * Now reset ->csum_bytes to what it should be. If bytes is 5782 * Now reset ->csum_bytes to what it should be. If bytes is
5783 * more than to_free then we would have free'd more space had we 5783 * more than to_free then we would have freed more space had we
5784 * not had an artificially high ->csum_bytes, so we need to free 5784 * not had an artificially high ->csum_bytes, so we need to free
5785 * the remainder. If bytes is the same or less then we don't 5785 * the remainder. If bytes is the same or less then we don't
5786 * need to do anything, the other free-ers did the correct 5786 * need to do anything, the other free-ers did the correct
@@ -7471,7 +7471,7 @@ loop:
7471 if (loop == LOOP_CACHING_NOWAIT) { 7471 if (loop == LOOP_CACHING_NOWAIT) {
7472 /* 7472 /*
7473 * We want to skip the LOOP_CACHING_WAIT step if we 7473 * We want to skip the LOOP_CACHING_WAIT step if we
7474 * don't have any unached bgs and we've alrelady done a 7474 * don't have any uncached bgs and we've already done a
7475 * full search through. 7475 * full search through.
7476 */ 7476 */
7477 if (orig_have_caching_bg || !full_search) 7477 if (orig_have_caching_bg || !full_search)
@@ -7873,7 +7873,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7873 7873
7874 /* 7874 /*
7875 * Mixed block groups will exclude before processing the log so we only 7875 * Mixed block groups will exclude before processing the log so we only
7876 * need to do the exlude dance if this fs isn't mixed. 7876 * need to do the exclude dance if this fs isn't mixed.
7877 */ 7877 */
7878 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) { 7878 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7879 ret = __exclude_logged_extent(root, ins->objectid, ins->offset); 7879 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
@@ -9317,7 +9317,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9317 u64 free_bytes = 0; 9317 u64 free_bytes = 0;
9318 int factor; 9318 int factor;
9319 9319
9320 /* It's df, we don't care if it's racey */ 9320 /* It's df, we don't care if it's racy */
9321 if (list_empty(&sinfo->ro_bgs)) 9321 if (list_empty(&sinfo->ro_bgs))
9322 return 0; 9322 return 0;
9323 9323
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c1139bcf8870..5fdf1fcb03c9 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4591,7 +4591,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4591 if (mapped) 4591 if (mapped)
4592 spin_unlock(&page->mapping->private_lock); 4592 spin_unlock(&page->mapping->private_lock);
4593 4593
4594 /* One for when we alloced the page */ 4594 /* One for when we allocated the page */
4595 put_page(page); 4595 put_page(page);
4596 } while (index != 0); 4596 } while (index != 0);
4597} 4597}
@@ -5751,7 +5751,7 @@ int try_release_extent_buffer(struct page *page)
5751 struct extent_buffer *eb; 5751 struct extent_buffer *eb;
5752 5752
5753 /* 5753 /*
5754 * We need to make sure noboody is attaching this page to an eb right 5754 * We need to make sure nobody is attaching this page to an eb right
5755 * now. 5755 * now.
5756 */ 5756 */
5757 spin_lock(&page->mapping->private_lock); 5757 spin_lock(&page->mapping->private_lock);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 318b048eb254..e0715fcfb11e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -62,7 +62,7 @@ struct extent_map *alloc_extent_map(void)
62 62
63/** 63/**
64 * free_extent_map - drop reference count of an extent_map 64 * free_extent_map - drop reference count of an extent_map
65 * @em: extent map being releasead 65 * @em: extent map being released
66 * 66 *
67 * Drops the reference out on @em by one and free the structure 67 * Drops the reference out on @em by one and free the structure
68 * if the reference count hits zero. 68 * if the reference count hits zero.
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 8d7b5a45c005..50dac40d9561 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2024,7 +2024,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2024 BTRFS_I(inode)->last_trans 2024 BTRFS_I(inode)->last_trans
2025 <= root->fs_info->last_trans_committed)) { 2025 <= root->fs_info->last_trans_committed)) {
2026 /* 2026 /*
2027 * We'v had everything committed since the last time we were 2027 * We've had everything committed since the last time we were
2028 * modified so clear this flag in case it was set for whatever 2028 * modified so clear this flag in case it was set for whatever
2029 * reason, it's no longer relevant. 2029 * reason, it's no longer relevant.
2030 */ 2030 */
@@ -2372,7 +2372,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2372 2372
2373 /* Check the aligned pages after the first unaligned page, 2373 /* Check the aligned pages after the first unaligned page,
2374 * if offset != orig_start, which means the first unaligned page 2374 * if offset != orig_start, which means the first unaligned page
2375 * including serveral following pages are already in holes, 2375 * including several following pages are already in holes,
2376 * the extra check can be skipped */ 2376 * the extra check can be skipped */
2377 if (offset == orig_start) { 2377 if (offset == orig_start) {
2378 /* after truncate page, check hole again */ 2378 /* after truncate page, check hole again */
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5e6062c26129..c6dc1183f542 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1983,7 +1983,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1983 /* 1983 /*
1984 * If this block group has some small extents we don't want to 1984 * If this block group has some small extents we don't want to
1985 * use up all of our free slots in the cache with them, we want 1985 * use up all of our free slots in the cache with them, we want
1986 * to reserve them to larger extents, however if we have plent 1986 * to reserve them to larger extents, however if we have plenty
1987 * of cache left then go ahead an dadd them, no sense in adding 1987 * of cache left then go ahead an dadd them, no sense in adding
1988 * the overhead of a bitmap if we don't have to. 1988 * the overhead of a bitmap if we don't have to.
1989 */ 1989 */
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 33178c490ace..3af651c2bbc7 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -123,7 +123,7 @@ int btrfs_return_cluster_to_free_space(
123int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, 123int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
124 u64 *trimmed, u64 start, u64 end, u64 minlen); 124 u64 *trimmed, u64 start, u64 end, u64 minlen);
125 125
126/* Support functions for runnint our sanity tests */ 126/* Support functions for running our sanity tests */
127#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 127#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
128int test_add_free_space_entry(struct btrfs_block_group_cache *cache, 128int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
129 u64 offset, u64 bytes, bool bitmap); 129 u64 offset, u64 bytes, bool bitmap);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1a4bd193962c..2027bbd4b05e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -455,7 +455,7 @@ again:
455 455
456 /* 456 /*
457 * skip compression for a small file range(<=blocksize) that 457 * skip compression for a small file range(<=blocksize) that
458 * isn't an inline extent, since it dosen't save disk space at all. 458 * isn't an inline extent, since it doesn't save disk space at all.
459 */ 459 */
460 if (total_compressed <= blocksize && 460 if (total_compressed <= blocksize &&
461 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 461 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
@@ -3705,7 +3705,7 @@ cache_index:
3705 * and doesn't have an inode ref with the name "bar" anymore. 3705 * and doesn't have an inode ref with the name "bar" anymore.
3706 * 3706 *
3707 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3707 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3708 * but it guarantees correctness at the expense of ocassional full 3708 * but it guarantees correctness at the expense of occasional full
3709 * transaction commits on fsync if our inode is a directory, or if our 3709 * transaction commits on fsync if our inode is a directory, or if our
3710 * inode is not a directory, logging its parent unnecessarily. 3710 * inode is not a directory, logging its parent unnecessarily.
3711 */ 3711 */
@@ -4961,7 +4961,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4961 * be instantly completed which will give us extents that need 4961 * be instantly completed which will give us extents that need
4962 * to be truncated. If we fail to get an orphan inode down we 4962 * to be truncated. If we fail to get an orphan inode down we
4963 * could have left over extents that were never meant to live, 4963 * could have left over extents that were never meant to live,
4964 * so we need to garuntee from this point on that everything 4964 * so we need to guarantee from this point on that everything
4965 * will be consistent. 4965 * will be consistent.
4966 */ 4966 */
4967 ret = btrfs_orphan_add(trans, inode); 4967 ret = btrfs_orphan_add(trans, inode);
@@ -5231,7 +5231,7 @@ void btrfs_evict_inode(struct inode *inode)
5231 } 5231 }
5232 5232
5233 /* 5233 /*
5234 * We can't just steal from the global reserve, we need tomake 5234 * We can't just steal from the global reserve, we need to make
5235 * sure there is room to do it, if not we need to commit and try 5235 * sure there is room to do it, if not we need to commit and try
5236 * again. 5236 * again.
5237 */ 5237 */
@@ -7407,7 +7407,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7407 cached_state); 7407 cached_state);
7408 /* 7408 /*
7409 * We're concerned with the entire range that we're going to be 7409 * We're concerned with the entire range that we're going to be
7410 * doing DIO to, so we need to make sure theres no ordered 7410 * doing DIO to, so we need to make sure there's no ordered
7411 * extents in this range. 7411 * extents in this range.
7412 */ 7412 */
7413 ordered = btrfs_lookup_ordered_range(inode, lockstart, 7413 ordered = btrfs_lookup_ordered_range(inode, lockstart,
@@ -7569,7 +7569,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7569 if (current->journal_info) { 7569 if (current->journal_info) {
7570 /* 7570 /*
7571 * Need to pull our outstanding extents and set journal_info to NULL so 7571 * Need to pull our outstanding extents and set journal_info to NULL so
7572 * that anything that needs to check if there's a transction doesn't get 7572 * that anything that needs to check if there's a transaction doesn't get
7573 * confused. 7573 * confused.
7574 */ 7574 */
7575 dio_data = current->journal_info; 7575 dio_data = current->journal_info;
@@ -7602,7 +7602,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7602 * decompress it, so there will be buffering required no matter what we 7602 * decompress it, so there will be buffering required no matter what we
7603 * do, so go ahead and fallback to buffered. 7603 * do, so go ahead and fallback to buffered.
7604 * 7604 *
7605 * We return -ENOTBLK because thats what makes DIO go ahead and go back 7605 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7606 * to buffered IO. Don't blame me, this is the price we pay for using 7606 * to buffered IO. Don't blame me, this is the price we pay for using
7607 * the generic code. 7607 * the generic code.
7608 */ 7608 */
@@ -9018,7 +9018,7 @@ static int btrfs_truncate(struct inode *inode)
9018 return ret; 9018 return ret;
9019 9019
9020 /* 9020 /*
9021 * Yes ladies and gentelment, this is indeed ugly. The fact is we have 9021 * Yes ladies and gentlemen, this is indeed ugly. The fact is we have
9022 * 3 things going on here 9022 * 3 things going on here
9023 * 9023 *
9024 * 1) We need to reserve space for our orphan item and the space to 9024 * 1) We need to reserve space for our orphan item and the space to
@@ -9032,15 +9032,15 @@ static int btrfs_truncate(struct inode *inode)
9032 * space reserved in case it uses space during the truncate (thank you 9032 * space reserved in case it uses space during the truncate (thank you
9033 * very much snapshotting). 9033 * very much snapshotting).
9034 * 9034 *
9035 * And we need these to all be seperate. The fact is we can use alot of 9035 * And we need these to all be separate. The fact is we can use a lot of
9036 * space doing the truncate, and we have no earthly idea how much space 9036 * space doing the truncate, and we have no earthly idea how much space
9037 * we will use, so we need the truncate reservation to be seperate so it 9037 * we will use, so we need the truncate reservation to be separate so it
9038 * doesn't end up using space reserved for updating the inode or 9038 * doesn't end up using space reserved for updating the inode or
9039 * removing the orphan item. We also need to be able to stop the 9039 * removing the orphan item. We also need to be able to stop the
9040 * transaction and start a new one, which means we need to be able to 9040 * transaction and start a new one, which means we need to be able to
9041 * update the inode several times, and we have no idea of knowing how 9041 * update the inode several times, and we have no idea of knowing how
9042 * many times that will be, so we can't just reserve 1 item for the 9042 * many times that will be, so we can't just reserve 1 item for the
9043 * entirety of the opration, so that has to be done seperately as well. 9043 * entirety of the operation, so that has to be done separately as well.
9044 * Then there is the orphan item, which does indeed need to be held on 9044 * Then there is the orphan item, which does indeed need to be held on
9045 * to for the whole operation, and we need nobody to touch this reserved 9045 * to for the whole operation, and we need nobody to touch this reserved
9046 * space except the orphan code. 9046 * space except the orphan code.
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 72f48252a55d..972cd0b00b21 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -296,7 +296,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
296 } 296 }
297 } else { 297 } else {
298 /* 298 /*
299 * Revert back under same assuptions as above 299 * Revert back under same assumptions as above
300 */ 300 */
301 if (S_ISREG(mode)) { 301 if (S_ISREG(mode)) {
302 if (inode->i_size == 0) 302 if (inode->i_size == 0)
@@ -461,7 +461,7 @@ static noinline int create_subvol(struct inode *dir,
461 461
462 /* 462 /*
463 * Don't create subvolume whose level is not zero. Or qgroup will be 463 * Don't create subvolume whose level is not zero. Or qgroup will be
464 * screwed up since it assume subvolme qgroup's level to be 0. 464 * screwed up since it assumes subvolume qgroup's level to be 0.
465 */ 465 */
466 if (btrfs_qgroup_level(objectid)) 466 if (btrfs_qgroup_level(objectid))
467 return -ENOSPC; 467 return -ENOSPC;
@@ -771,7 +771,7 @@ free_pending:
771 * a. be owner of dir, or 771 * a. be owner of dir, or
772 * b. be owner of victim, or 772 * b. be owner of victim, or
773 * c. have CAP_FOWNER capability 773 * c. have CAP_FOWNER capability
774 * 6. If the victim is append-only or immutable we can't do antyhing with 774 * 6. If the victim is append-only or immutable we can't do anything with
775 * links pointing to it. 775 * links pointing to it.
776 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. 776 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
777 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. 777 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
@@ -4585,7 +4585,7 @@ again:
4585 } 4585 }
4586 4586
4587 /* 4587 /*
4588 * mut. excl. ops lock is locked. Three possibilites: 4588 * mut. excl. ops lock is locked. Three possibilities:
4589 * (1) some other op is running 4589 * (1) some other op is running
4590 * (2) balance is running 4590 * (2) balance is running
4591 * (3) balance is paused -- special case (think resume) 4591 * (3) balance is paused -- special case (think resume)
@@ -5490,7 +5490,7 @@ long btrfs_ioctl(struct file *file, unsigned int
5490 ret = btrfs_sync_fs(file_inode(file)->i_sb, 1); 5490 ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
5491 /* 5491 /*
5492 * The transaction thread may want to do more work, 5492 * The transaction thread may want to do more work,
5493 * namely it pokes the cleaner ktread that will start 5493 * namely it pokes the cleaner kthread that will start
5494 * processing uncleaned subvols. 5494 * processing uncleaned subvols.
5495 */ 5495 */
5496 wake_up_process(root->fs_info->transaction_kthread); 5496 wake_up_process(root->fs_info->transaction_kthread);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 23c96059cef2..ae068bbddc0d 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -58,7 +58,7 @@ struct btrfs_ordered_sum {
58 58
59#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */ 59#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */
60 60
61#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ 61#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to preallocated extent */
62 62
63#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */ 63#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
64 64
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 34af959b4e0f..9d4c05b14f6e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -85,7 +85,7 @@ struct btrfs_qgroup {
85 85
86 /* 86 /*
87 * temp variables for accounting operations 87 * temp variables for accounting operations
88 * Refer to qgroup_shared_accouting() for details. 88 * Refer to qgroup_shared_accounting() for details.
89 */ 89 */
90 u64 old_refcnt; 90 u64 old_refcnt;
91 u64 new_refcnt; 91 u64 new_refcnt;
@@ -499,7 +499,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
499 } 499 }
500 /* 500 /*
501 * we call btrfs_free_qgroup_config() when umounting 501 * we call btrfs_free_qgroup_config() when umounting
502 * filesystem and disabling quota, so we set qgroup_ulit 502 * filesystem and disabling quota, so we set qgroup_ulist
503 * to be null here to avoid double free. 503 * to be null here to avoid double free.
504 */ 504 */
505 ulist_free(fs_info->qgroup_ulist); 505 ulist_free(fs_info->qgroup_ulist);
@@ -1036,7 +1036,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1036 1036
1037/* 1037/*
1038 * The easy accounting, if we are adding/removing the only ref for an extent 1038 * The easy accounting, if we are adding/removing the only ref for an extent
1039 * then this qgroup and all of the parent qgroups get their refrence and 1039 * then this qgroup and all of the parent qgroups get their reference and
1040 * exclusive counts adjusted. 1040 * exclusive counts adjusted.
1041 * 1041 *
1042 * Caller should hold fs_info->qgroup_lock. 1042 * Caller should hold fs_info->qgroup_lock.
@@ -1436,7 +1436,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
1436 1436
1437 /* 1437 /*
1438 * No need to do lock, since this function will only be called in 1438 * No need to do lock, since this function will only be called in
1439 * btrfs_commmit_transaction(). 1439 * btrfs_commit_transaction().
1440 */ 1440 */
1441 node = rb_first(&delayed_refs->dirty_extent_root); 1441 node = rb_first(&delayed_refs->dirty_extent_root);
1442 while (node) { 1442 while (node) {
@@ -1557,7 +1557,7 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1557 * A: cur_old_roots < nr_old_roots (not exclusive before) 1557 * A: cur_old_roots < nr_old_roots (not exclusive before)
1558 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 1558 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
1559 * B: cur_new_roots < nr_new_roots (not exclusive now) 1559 * B: cur_new_roots < nr_new_roots (not exclusive now)
1560 * !B: cur_new_roots == nr_new_roots (possible exclsuive now) 1560 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
1561 * 1561 *
1562 * Results: 1562 * Results:
1563 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 1563 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
@@ -1851,7 +1851,7 @@ out:
1851} 1851}
1852 1852
1853/* 1853/*
1854 * Copy the acounting information between qgroups. This is necessary 1854 * Copy the accounting information between qgroups. This is necessary
1855 * when a snapshot or a subvolume is created. Throwing an error will 1855 * when a snapshot or a subvolume is created. Throwing an error will
1856 * cause a transaction abort so we take extra care here to only error 1856 * cause a transaction abort so we take extra care here to only error
1857 * when a readonly fs is a reasonable outcome. 1857 * when a readonly fs is a reasonable outcome.
@@ -2340,7 +2340,7 @@ out:
2340 mutex_unlock(&fs_info->qgroup_rescan_lock); 2340 mutex_unlock(&fs_info->qgroup_rescan_lock);
2341 2341
2342 /* 2342 /*
2343 * only update status, since the previous part has alreay updated the 2343 * only update status, since the previous part has already updated the
2344 * qgroup info. 2344 * qgroup info.
2345 */ 2345 */
2346 trans = btrfs_start_transaction(fs_info->quota_root, 1); 2346 trans = btrfs_start_transaction(fs_info->quota_root, 1);
@@ -2670,7 +2670,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2670} 2670}
2671 2671
2672/* 2672/*
2673 * Check qgroup reserved space leaking, normally at destory inode 2673 * Check qgroup reserved space leaking, normally at destroy inode
2674 * time 2674 * time
2675 */ 2675 */
2676void btrfs_qgroup_check_reserved_leak(struct inode *inode) 2676void btrfs_qgroup_check_reserved_leak(struct inode *inode)
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0b7792e02dd5..f8b6d411a034 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -576,7 +576,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
576 * we can't merge with cached rbios, since the 576 * we can't merge with cached rbios, since the
577 * idea is that when we merge the destination 577 * idea is that when we merge the destination
578 * rbio is going to run our IO for us. We can 578 * rbio is going to run our IO for us. We can
579 * steal from cached rbio's though, other functions 579 * steal from cached rbios though, other functions
580 * handle that. 580 * handle that.
581 */ 581 */
582 if (test_bit(RBIO_CACHE_BIT, &last->flags) || 582 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
@@ -2368,7 +2368,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2368 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); 2368 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2369 } 2369 }
2370 2370
2371 /* Check scrubbing pairty and repair it */ 2371 /* Check scrubbing parity and repair it */
2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); 2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2373 parity = kmap(p); 2373 parity = kmap(p);
2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) 2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
@@ -2493,7 +2493,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2493 /* 2493 /*
2494 * Here means we got one corrupted data stripe and one 2494 * Here means we got one corrupted data stripe and one
2495 * corrupted parity on RAID6, if the corrupted parity 2495 * corrupted parity on RAID6, if the corrupted parity
2496 * is scrubbing parity, luckly, use the other one to repair 2496 * is scrubbing parity, luckily, use the other one to repair
2497 * the data, or we can not repair the data stripe. 2497 * the data, or we can not repair the data stripe.
2498 */ 2498 */
2499 if (failp != rbio->scrubp) 2499 if (failp != rbio->scrubp)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index bd5ea1a8a9f1..852be779a834 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -668,8 +668,8 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
668 * roots of b-trees that reference the tree block. 668 * roots of b-trees that reference the tree block.
669 * 669 *
670 * the basic idea of this function is check backrefs of a given block 670 * the basic idea of this function is check backrefs of a given block
671 * to find upper level blocks that refernece the block, and then check 671 * to find upper level blocks that reference the block, and then check
672 * bakcrefs of these upper level blocks recursively. the recursion stop 672 * backrefs of these upper level blocks recursively. the recursion stop
673 * when tree root is reached or backrefs for the block is cached. 673 * when tree root is reached or backrefs for the block is cached.
674 * 674 *
675 * NOTE: if we find backrefs for a block are cached, we know backrefs 675 * NOTE: if we find backrefs for a block are cached, we know backrefs
@@ -1160,7 +1160,7 @@ out:
1160 if (!RB_EMPTY_NODE(&upper->rb_node)) 1160 if (!RB_EMPTY_NODE(&upper->rb_node))
1161 continue; 1161 continue;
1162 1162
1163 /* Add this guy's upper edges to the list to proces */ 1163 /* Add this guy's upper edges to the list to process */
1164 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1164 list_for_each_entry(edge, &upper->upper, list[LOWER])
1165 list_add_tail(&edge->list[UPPER], &list); 1165 list_add_tail(&edge->list[UPPER], &list);
1166 if (list_empty(&upper->upper)) 1166 if (list_empty(&upper->upper))
@@ -2396,7 +2396,7 @@ again:
2396 } 2396 }
2397 2397
2398 /* 2398 /*
2399 * we keep the old last snapshod transid in rtranid when we 2399 * we keep the old last snapshot transid in rtranid when we
2400 * created the relocation tree. 2400 * created the relocation tree.
2401 */ 2401 */
2402 last_snap = btrfs_root_rtransid(&reloc_root->root_item); 2402 last_snap = btrfs_root_rtransid(&reloc_root->root_item);
@@ -2616,7 +2616,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2616 * only one thread can access block_rsv at this point, 2616 * only one thread can access block_rsv at this point,
2617 * so we don't need hold lock to protect block_rsv. 2617 * so we don't need hold lock to protect block_rsv.
2618 * we expand more reservation size here to allow enough 2618 * we expand more reservation size here to allow enough
2619 * space for relocation and we will return eailer in 2619 * space for relocation and we will return earlier in
2620 * enospc case. 2620 * enospc case.
2621 */ 2621 */
2622 rc->block_rsv->size = tmp + rc->extent_root->nodesize * 2622 rc->block_rsv->size = tmp + rc->extent_root->nodesize *
@@ -4591,7 +4591,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4591 4591
4592/* 4592/*
4593 * called before creating snapshot. it calculates metadata reservation 4593 * called before creating snapshot. it calculates metadata reservation
4594 * requried for relocating tree blocks in the snapshot 4594 * required for relocating tree blocks in the snapshot
4595 */ 4595 */
4596void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4596void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4597 u64 *bytes_to_reserve) 4597 u64 *bytes_to_reserve)
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 9fcd6dfc3266..5ddf14e1b2d2 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -71,9 +71,9 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
71 * search_key: the key to search 71 * search_key: the key to search
72 * path: the path we search 72 * path: the path we search
73 * root_item: the root item of the tree we look for 73 * root_item: the root item of the tree we look for
74 * root_key: the reak key of the tree we look for 74 * root_key: the root key of the tree we look for
75 * 75 *
76 * If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset 76 * If ->offset of 'search_key' is -1ULL, it means we are not sure the offset
77 * of the search key, just lookup the root with the highest offset for a 77 * of the search key, just lookup the root with the highest offset for a
78 * given objectid. 78 * given objectid.
79 * 79 *
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 841dae874a21..6838b2ca3cab 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1044,7 +1044,7 @@ nodatasum_case:
1044 1044
1045 /* 1045 /*
1046 * !is_metadata and !have_csum, this means that the data 1046 * !is_metadata and !have_csum, this means that the data
1047 * might not be COW'ed, that it might be modified 1047 * might not be COWed, that it might be modified
1048 * concurrently. The general strategy to work on the 1048 * concurrently. The general strategy to work on the
1049 * commit root does not help in the case when COW is not 1049 * commit root does not help in the case when COW is not
1050 * used. 1050 * used.
@@ -1125,7 +1125,7 @@ nodatasum_case:
1125 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page 1125 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1126 * of mirror #2 is readable but the final checksum test fails, 1126 * of mirror #2 is readable but the final checksum test fails,
1127 * then the 2nd page of mirror #3 could be tried, whether now 1127 * then the 2nd page of mirror #3 could be tried, whether now
1128 * the final checksum succeedes. But this would be a rare 1128 * the final checksum succeeds. But this would be a rare
1129 * exception and is therefore not implemented. At least it is 1129 * exception and is therefore not implemented. At least it is
1130 * avoided that the good copy is overwritten. 1130 * avoided that the good copy is overwritten.
1131 * A more useful improvement would be to pick the sectors 1131 * A more useful improvement would be to pick the sectors
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 8d358c547c59..e9710f461008 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1831,7 +1831,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1831 1831
1832 /* 1832 /*
1833 * If we have a parent root we need to verify that the parent dir was 1833 * If we have a parent root we need to verify that the parent dir was
1834 * not delted and then re-created, if it was then we have no overwrite 1834 * not deleted and then re-created, if it was then we have no overwrite
1835 * and we can just unlink this entry. 1835 * and we can just unlink this entry.
1836 */ 1836 */
1837 if (sctx->parent_root) { 1837 if (sctx->parent_root) {
@@ -4192,9 +4192,9 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
4192 return -ENOMEM; 4192 return -ENOMEM;
4193 4193
4194 /* 4194 /*
4195 * This hack is needed because empty acl's are stored as zero byte 4195 * This hack is needed because empty acls are stored as zero byte
4196 * data in xattrs. Problem with that is, that receiving these zero byte 4196 * data in xattrs. Problem with that is, that receiving these zero byte
4197 * acl's will fail later. To fix this, we send a dummy acl list that 4197 * acls will fail later. To fix this, we send a dummy acl list that
4198 * only contains the version number and no entries. 4198 * only contains the version number and no entries.
4199 */ 4199 */
4200 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || 4200 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index e05619f241be..875c757e73e2 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -36,7 +36,7 @@ static inline void put_unaligned_le8(u8 val, void *p)
36 * 36 *
37 * The end result is that anyone who #includes ctree.h gets a 37 * The end result is that anyone who #includes ctree.h gets a
38 * declaration for the btrfs_set_foo functions and btrfs_foo functions, 38 * declaration for the btrfs_set_foo functions and btrfs_foo functions,
39 * which are wappers of btrfs_set_token_#bits functions and 39 * which are wrappers of btrfs_set_token_#bits functions and
40 * btrfs_get_token_#bits functions, which are defined in this file. 40 * btrfs_get_token_#bits functions, which are defined in this file.
41 * 41 *
42 * These setget functions do all the extent_buffer related mapping 42 * These setget functions do all the extent_buffer related mapping
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 00b8f37cc306..01ef1865db63 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -121,7 +121,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
121 * Note that a running device replace operation is not 121 * Note that a running device replace operation is not
122 * canceled here although there is no way to update 122 * canceled here although there is no way to update
123 * the progress. It would add the risk of a deadlock, 123 * the progress. It would add the risk of a deadlock,
124 * therefore the canceling is ommited. The only penalty 124 * therefore the canceling is omitted. The only penalty
125 * is that some I/O remains active until the procedure 125 * is that some I/O remains active until the procedure
126 * completes. The next time when the filesystem is 126 * completes. The next time when the filesystem is
127 * mounted writeable again, the device replace 127 * mounted writeable again, the device replace
@@ -1881,7 +1881,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1881 int ret; 1881 int ret;
1882 1882
1883 /* 1883 /*
1884 * We aren't under the device list lock, so this is racey-ish, but good 1884 * We aren't under the device list lock, so this is racy-ish, but good
1885 * enough for our purposes. 1885 * enough for our purposes.
1886 */ 1886 */
1887 nr_devices = fs_info->fs_devices->open_devices; 1887 nr_devices = fs_info->fs_devices->open_devices;
@@ -1900,7 +1900,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1900 if (!devices_info) 1900 if (!devices_info)
1901 return -ENOMEM; 1901 return -ENOMEM;
1902 1902
1903 /* calc min stripe number for data space alloction */ 1903 /* calc min stripe number for data space allocation */
1904 type = btrfs_get_alloc_profile(root, 1); 1904 type = btrfs_get_alloc_profile(root, 1);
1905 if (type & BTRFS_BLOCK_GROUP_RAID0) { 1905 if (type & BTRFS_BLOCK_GROUP_RAID0) {
1906 min_stripes = 2; 1906 min_stripes = 2;
@@ -1936,7 +1936,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1936 avail_space *= BTRFS_STRIPE_LEN; 1936 avail_space *= BTRFS_STRIPE_LEN;
1937 1937
1938 /* 1938 /*
1939 * In order to avoid overwritting the superblock on the drive, 1939 * In order to avoid overwriting the superblock on the drive,
1940 * btrfs starts at an offset of at least 1MB when doing chunk 1940 * btrfs starts at an offset of at least 1MB when doing chunk
1941 * allocation. 1941 * allocation.
1942 */ 1942 */
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index c190d2a520d1..55724607f79b 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -176,7 +176,7 @@ static int test_find_delalloc(void)
176 locked_page = find_lock_page(inode->i_mapping, test_start >> 176 locked_page = find_lock_page(inode->i_mapping, test_start >>
177 PAGE_SHIFT); 177 PAGE_SHIFT);
178 if (!locked_page) { 178 if (!locked_page) {
179 test_msg("Could'nt find the locked page\n"); 179 test_msg("Couldn't find the locked page\n");
180 goto out_bits; 180 goto out_bits;
181 } 181 }
182 start = test_start; 182 start = test_start;
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 514247515312..0eeb8f3d6b67 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -25,7 +25,7 @@
25#define BITS_PER_BITMAP (PAGE_SIZE * 8) 25#define BITS_PER_BITMAP (PAGE_SIZE * 8)
26 26
27/* 27/*
28 * This test just does basic sanity checking, making sure we can add an exten 28 * This test just does basic sanity checking, making sure we can add an extent
29 * entry and remove space from either end and the middle, and make sure we can 29 * entry and remove space from either end and the middle, and make sure we can
30 * remove space that covers adjacent extent entries. 30 * remove space that covers adjacent extent entries.
31 */ 31 */
@@ -396,8 +396,9 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
396 * wasn't optimal as they could be spread all over the block group while under 396 * wasn't optimal as they could be spread all over the block group while under
397 * concurrency (extra overhead and fragmentation). 397 * concurrency (extra overhead and fragmentation).
398 * 398 *
399 * This stealing approach is benefical, since we always prefer to allocate from 399 * This stealing approach is beneficial, since we always prefer to allocate
400 * extent entries, both for clustered and non-clustered allocation requests. 400 * from extent entries, both for clustered and non-clustered allocation
401 * requests.
401 */ 402 */
402static int 403static int
403test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) 404test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 863a6a3af1f8..8a25fe8b7c45 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -264,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
264 264
265 /* 265 /*
266 * We will just free a dummy node if it's ref count is 2 so we need an 266 * We will just free a dummy node if it's ref count is 2 so we need an
267 * extra ref so our searches don't accidently release our page. 267 * extra ref so our searches don't accidentally release our page.
268 */ 268 */
269 extent_buffer_get(root->node); 269 extent_buffer_get(root->node);
270 btrfs_set_header_nritems(root->node, 0); 270 btrfs_set_header_nritems(root->node, 0);
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 8ea5d34bc5a2..8aa4ded31326 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -234,7 +234,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
234 } 234 }
235 235
236 /* 236 /*
237 * Since the test trans doesn't havee the complicated delayed refs, 237 * Since the test trans doesn't have the complicated delayed refs,
238 * we can only call btrfs_qgroup_account_extent() directly to test 238 * we can only call btrfs_qgroup_account_extent() directly to test
239 * quota. 239 * quota.
240 */ 240 */
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 72be51f7ca2f..9fe0ec2bf0fe 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -144,7 +144,7 @@ struct btrfs_pending_snapshot {
144 /* block reservation for the operation */ 144 /* block reservation for the operation */
145 struct btrfs_block_rsv block_rsv; 145 struct btrfs_block_rsv block_rsv;
146 u64 qgroup_reserved; 146 u64 qgroup_reserved;
147 /* extra metadata reseration for relocation */ 147 /* extra metadata reservation for relocation */
148 int error; 148 int error;
149 bool readonly; 149 bool readonly;
150 struct list_head list; 150 struct list_head list;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a111e275ab9e..f3a8939d63fb 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2330,7 +2330,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2330 break; 2330 break;
2331 2331
2332 /* for regular files, make sure corresponding 2332 /* for regular files, make sure corresponding
2333 * orhpan item exist. extents past the new EOF 2333 * orphan item exist. extents past the new EOF
2334 * will be truncated later by orphan cleanup. 2334 * will be truncated later by orphan cleanup.
2335 */ 2335 */
2336 if (S_ISREG(mode)) { 2336 if (S_ISREG(mode)) {
@@ -4937,7 +4937,7 @@ out_unlock:
4937 * the actual unlink operation, so if we do this check before a concurrent task 4937 * the actual unlink operation, so if we do this check before a concurrent task
4938 * sets last_unlink_trans it means we've logged a consistent version/state of 4938 * sets last_unlink_trans it means we've logged a consistent version/state of
4939 * all the inode items, otherwise we are not sure and must do a transaction 4939 * all the inode items, otherwise we are not sure and must do a transaction
4940 * commit (the concurrent task migth have only updated last_unlink_trans before 4940 * commit (the concurrent task might have only updated last_unlink_trans before
4941 * we logged the inode or it might have also done the unlink). 4941 * we logged the inode or it might have also done the unlink).
4942 */ 4942 */
4943static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, 4943static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
@@ -4996,7 +4996,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4996 while (1) { 4996 while (1) {
4997 /* 4997 /*
4998 * If we are logging a directory then we start with our inode, 4998 * If we are logging a directory then we start with our inode,
4999 * not our parents inode, so we need to skipp setting the 4999 * not our parent's inode, so we need to skip setting the
5000 * logged_trans so that further down in the log code we don't 5000 * logged_trans so that further down in the log code we don't
5001 * think this inode has already been logged. 5001 * think this inode has already been logged.
5002 */ 5002 */
@@ -5375,7 +5375,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5375 log_dentries = true; 5375 log_dentries = true;
5376 5376
5377 /* 5377 /*
5378 * On unlink we must make sure all our current and old parent directores 5378 * On unlink we must make sure all our current and old parent directory
5379 * inodes are fully logged. This is to prevent leaving dangling 5379 * inodes are fully logged. This is to prevent leaving dangling
5380 * directory index entries in directories that were our parents but are 5380 * directory index entries in directories that were our parents but are
5381 * not anymore. Not doing this results in old parent directory being 5381 * not anymore. Not doing this results in old parent directory being
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 91feb2bdefee..b1434bb57e36 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -28,7 +28,7 @@
28 * } 28 * }
29 * ulist_free(ulist); 29 * ulist_free(ulist);
30 * 30 *
31 * This assumes the graph nodes are adressable by u64. This stems from the 31 * This assumes the graph nodes are addressable by u64. This stems from the
32 * usage for tree enumeration in btrfs, where the logical addresses are 32 * usage for tree enumeration in btrfs, where the logical addresses are
33 * 64 bit. 33 * 64 bit.
34 * 34 *
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index bd0f45fb38c4..8fe7fff5ffb7 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2165,7 +2165,7 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
2165} 2165}
2166 2166
2167/* 2167/*
2168 * strore the expected generation for seed devices in device items. 2168 * Store the expected generation for seed devices in device items.
2169 */ 2169 */
2170static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, 2170static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2171 struct btrfs_root *root) 2171 struct btrfs_root *root)
@@ -3362,7 +3362,7 @@ static int should_balance_chunk(struct btrfs_root *root,
3362 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { 3362 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3363 /* 3363 /*
3364 * Same logic as the 'limit' filter; the minimum cannot be 3364 * Same logic as the 'limit' filter; the minimum cannot be
3365 * determined here because we do not have the global informatoin 3365 * determined here because we do not have the global information
3366 * about the count of all chunks that satisfy the filters. 3366 * about the count of all chunks that satisfy the filters.
3367 */ 3367 */
3368 if (bargs->limit_max == 0) 3368 if (bargs->limit_max == 0)
@@ -6032,7 +6032,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6032{ 6032{
6033 atomic_inc(&bbio->error); 6033 atomic_inc(&bbio->error);
6034 if (atomic_dec_and_test(&bbio->stripes_pending)) { 6034 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6035 /* Shoud be the original bio. */ 6035 /* Should be the original bio. */
6036 WARN_ON(bio != bbio->orig_bio); 6036 WARN_ON(bio != bbio->orig_bio);
6037 6037
6038 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6038 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -6516,7 +6516,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6516 set_extent_buffer_uptodate(sb); 6516 set_extent_buffer_uptodate(sb);
6517 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6517 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6518 /* 6518 /*
6519 * The sb extent buffer is artifical and just used to read the system array. 6519 * The sb extent buffer is artificial and just used to read the system array.
6520 * set_extent_buffer_uptodate() call does not properly mark all it's 6520 * set_extent_buffer_uptodate() call does not properly mark all it's
6521 * pages up-to-date when the page is larger: extent does not cover the 6521 * pages up-to-date when the page is larger: extent does not cover the
6522 * whole page and consequently check_page_uptodate does not find all 6522 * whole page and consequently check_page_uptodate does not find all