summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Gelmini <andrea.gelmini@gelma.net>2018-11-28 06:05:13 -0500
committerDavid Sterba <dsterba@suse.com>2018-12-17 08:51:50 -0500
commit52042d8e82ff50d40e76a275ac0b97aa663328b0 (patch)
tree58ba6c57c648c97b824ec2618f1e6964de3ee55b
parent1690dd41e0cb1dade80850ed8a3eb0121b96d22f (diff)
btrfs: Fix typos in comments and strings
The typos accumulate over time so once in a while time they get fixed in a large patch. Signed-off-by: Andrea Gelmini <andrea.gelmini@gelma.net> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/check-integrity.c2
-rw-r--r--fs/btrfs/compression.c4
-rw-r--r--fs/btrfs/ctree.c4
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/extent-tree.c22
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/extent_map.c3
-rw-r--r--fs/btrfs/file.c6
-rw-r--r--fs/btrfs/inode.c10
-rw-r--r--fs/btrfs/lzo.c2
-rw-r--r--fs/btrfs/qgroup.c14
-rw-r--r--fs/btrfs/qgroup.h4
-rw-r--r--fs/btrfs/raid56.c2
-rw-r--r--fs/btrfs/ref-verify.c6
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/btrfs/tree-checker.c6
-rw-r--r--fs/btrfs/tree-log.c4
-rw-r--r--fs/btrfs/volumes.c14
25 files changed, 70 insertions, 69 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 4a15f87dbbb4..78556447e1d5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -591,7 +591,7 @@ unode_aux_to_inode_list(struct ulist_node *node)
591} 591}
592 592
593/* 593/*
594 * We maintain three seperate rbtrees: one for direct refs, one for 594 * We maintain three separate rbtrees: one for direct refs, one for
595 * indirect refs which have a key, and one for indirect refs which do not 595 * indirect refs which have a key, and one for indirect refs which do not
596 * have a key. Each tree does merge on insertion. 596 * have a key. Each tree does merge on insertion.
597 * 597 *
@@ -695,7 +695,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
695 } 695 }
696 696
697 /* 697 /*
698 * Now it's a direct ref, put it in the the direct tree. We must 698 * Now it's a direct ref, put it in the direct tree. We must
699 * do this last because the ref could be merged/freed here. 699 * do this last because the ref could be merged/freed here.
700 */ 700 */
701 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); 701 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 84e9729badaa..b0c8094528d1 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -2327,7 +2327,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2327 * write operations. Therefore it keeps the linkage 2327 * write operations. Therefore it keeps the linkage
2328 * information for a block until a block is 2328 * information for a block until a block is
2329 * rewritten. This can temporarily cause incorrect 2329 * rewritten. This can temporarily cause incorrect
2330 * and even circular linkage informations. This 2330 * and even circular linkage information. This
2331 * causes no harm unless such blocks are referenced 2331 * causes no harm unless such blocks are referenced
2332 * by the most recent super block. 2332 * by the most recent super block.
2333 */ 2333 */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d5381f39a9e8..548057630b69 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1203,7 +1203,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1203/* 1203/*
1204 * Shannon Entropy calculation 1204 * Shannon Entropy calculation
1205 * 1205 *
1206 * Pure byte distribution analysis fails to determine compressiability of data. 1206 * Pure byte distribution analysis fails to determine compressibility of data.
1207 * Try calculating entropy to estimate the average minimum number of bits 1207 * Try calculating entropy to estimate the average minimum number of bits
1208 * needed to encode the sampled data. 1208 * needed to encode the sampled data.
1209 * 1209 *
@@ -1267,7 +1267,7 @@ static u8 get4bits(u64 num, int shift) {
1267 1267
1268/* 1268/*
1269 * Use 4 bits as radix base 1269 * Use 4 bits as radix base
1270 * Use 16 u32 counters for calculating new possition in buf array 1270 * Use 16 u32 counters for calculating new position in buf array
1271 * 1271 *
1272 * @array - array that will be sorted 1272 * @array - array that will be sorted
1273 * @array_buf - buffer array to store sorting results 1273 * @array_buf - buffer array to store sorting results
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 4252e89df6ae..d92462fe66c8 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1414,7 +1414,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
1414 * 1414 *
1415 * What is forced COW: 1415 * What is forced COW:
1416 * when we create snapshot during committing the transaction, 1416 * when we create snapshot during committing the transaction,
1417 * after we've finished coping src root, we must COW the shared 1417 * after we've finished copying src root, we must COW the shared
1418 * block to ensure the metadata consistency. 1418 * block to ensure the metadata consistency.
1419 */ 1419 */
1420 if (btrfs_header_generation(buf) == trans->transid && 1420 if (btrfs_header_generation(buf) == trans->transid &&
@@ -3771,7 +3771,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3771 /* Key greater than all keys in the leaf, right neighbor has 3771 /* Key greater than all keys in the leaf, right neighbor has
3772 * enough room for it and we're not emptying our leaf to delete 3772 * enough room for it and we're not emptying our leaf to delete
3773 * it, therefore use right neighbor to insert the new item and 3773 * it, therefore use right neighbor to insert the new item and
3774 * no need to touch/dirty our left leaft. */ 3774 * no need to touch/dirty our left leaf. */
3775 btrfs_tree_unlock(left); 3775 btrfs_tree_unlock(left);
3776 free_extent_buffer(left); 3776 free_extent_buffer(left);
3777 path->nodes[0] = right; 3777 path->nodes[0] = right;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 6f7e890bf480..8750c835f535 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -991,7 +991,7 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
991 * something that can happen if the dev_replace 991 * something that can happen if the dev_replace
992 * procedure is suspended by an umount and then 992 * procedure is suspended by an umount and then
993 * the tgtdev is missing (or "btrfs dev scan") was 993 * the tgtdev is missing (or "btrfs dev scan") was
994 * not called and the the filesystem is remounted 994 * not called and the filesystem is remounted
995 * in degraded state. This does not stop the 995 * in degraded state. This does not stop the
996 * dev_replace procedure. It needs to be canceled 996 * dev_replace procedure. It needs to be canceled
997 * manually if the cancellation is wanted. 997 * manually if the cancellation is wanted.
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 2f934a0b2148..8da2f380d3c0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3100,7 +3100,7 @@ retry_root_backup:
3100 3100
3101 if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) { 3101 if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3102 btrfs_warn(fs_info, 3102 btrfs_warn(fs_info,
3103 "writeable mount is not allowed due to too many missing devices"); 3103 "writable mount is not allowed due to too many missing devices");
3104 goto fail_sysfs; 3104 goto fail_sysfs;
3105 } 3105 }
3106 3106
@@ -4077,7 +4077,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4077#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4077#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4078 /* 4078 /*
4079 * This is a fast path so only do this check if we have sanity tests 4079 * This is a fast path so only do this check if we have sanity tests
4080 * enabled. Normal people shouldn't be using umapped buffers as dirty 4080 * enabled. Normal people shouldn't be using unmapped buffers as dirty
4081 * outside of the sanity tests. 4081 * outside of the sanity tests.
4082 */ 4082 */
4083 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) 4083 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 8a9ce33dfdbc..b15afeae16df 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1055,7 +1055,7 @@ out_free:
1055 1055
1056/* 1056/*
1057 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, 1057 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
1058 * is_data == BTRFS_REF_TYPE_DATA, data type is requried, 1058 * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
1059 * is_data == BTRFS_REF_TYPE_ANY, either type is OK. 1059 * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
1060 */ 1060 */
1061int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, 1061int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
@@ -3705,7 +3705,7 @@ again:
3705 } 3705 }
3706 } 3706 }
3707 3707
3708 /* if its not on the io list, we need to put the block group */ 3708 /* if it's not on the io list, we need to put the block group */
3709 if (should_put) 3709 if (should_put)
3710 btrfs_put_block_group(cache); 3710 btrfs_put_block_group(cache);
3711 if (drop_reserve) 3711 if (drop_reserve)
@@ -4675,7 +4675,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
4675 4675
4676 /* 4676 /*
4677 * If we have dup, raid1 or raid10 then only half of the free 4677 * If we have dup, raid1 or raid10 then only half of the free
4678 * space is actually useable. For raid56, the space info used 4678 * space is actually usable. For raid56, the space info used
4679 * doesn't include the parity drive, so we don't have to 4679 * doesn't include the parity drive, so we don't have to
4680 * change the math 4680 * change the math
4681 */ 4681 */
@@ -5302,7 +5302,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
5302 * @orig_bytes - the number of bytes we want 5302 * @orig_bytes - the number of bytes we want
5303 * @flush - whether or not we can flush to make our reservation 5303 * @flush - whether or not we can flush to make our reservation
5304 * 5304 *
5305 * This will reserve orgi_bytes number of bytes from the space info associated 5305 * This will reserve orig_bytes number of bytes from the space info associated
5306 * with the block_rsv. If there is not enough space it will make an attempt to 5306 * with the block_rsv. If there is not enough space it will make an attempt to
5307 * flush out space to make room. It will do this by flushing delalloc if 5307 * flush out space to make room. It will do this by flushing delalloc if
5308 * possible or committing the transaction. If flush is 0 then no attempts to 5308 * possible or committing the transaction. If flush is 0 then no attempts to
@@ -5771,11 +5771,11 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
5771/** 5771/**
5772 * btrfs_inode_rsv_refill - refill the inode block rsv. 5772 * btrfs_inode_rsv_refill - refill the inode block rsv.
5773 * @inode - the inode we are refilling. 5773 * @inode - the inode we are refilling.
5774 * @flush - the flusing restriction. 5774 * @flush - the flushing restriction.
5775 * 5775 *
5776 * Essentially the same as btrfs_block_rsv_refill, except it uses the 5776 * Essentially the same as btrfs_block_rsv_refill, except it uses the
5777 * block_rsv->size as the minimum size. We'll either refill the missing amount 5777 * block_rsv->size as the minimum size. We'll either refill the missing amount
5778 * or return if we already have enough space. This will also handle the resreve 5778 * or return if we already have enough space. This will also handle the reserve
5779 * tracepoint for the reserved amount. 5779 * tracepoint for the reserved amount.
5780 */ 5780 */
5781static int btrfs_inode_rsv_refill(struct btrfs_inode *inode, 5781static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
@@ -8500,7 +8500,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8500 buf->log_index = root->log_transid % 2; 8500 buf->log_index = root->log_transid % 2;
8501 /* 8501 /*
8502 * we allow two log transactions at a time, use different 8502 * we allow two log transactions at a time, use different
8503 * EXENT bit to differentiate dirty pages. 8503 * EXTENT bit to differentiate dirty pages.
8504 */ 8504 */
8505 if (buf->log_index == 0) 8505 if (buf->log_index == 0)
8506 set_extent_dirty(&root->dirty_log_pages, buf->start, 8506 set_extent_dirty(&root->dirty_log_pages, buf->start,
@@ -9762,7 +9762,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
9762} 9762}
9763 9763
9764/* 9764/*
9765 * checks to see if its even possible to relocate this block group. 9765 * Checks to see if it's even possible to relocate this block group.
9766 * 9766 *
9767 * @return - -1 if it's not a good idea to relocate this block group, 0 if its 9767 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9768 * ok to go ahead and try. 9768 * ok to go ahead and try.
@@ -10390,7 +10390,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
10390 * check for two cases, either we are full, and therefore 10390 * check for two cases, either we are full, and therefore
10391 * don't need to bother with the caching work since we won't 10391 * don't need to bother with the caching work since we won't
10392 * find any space, or we are empty, and we can just add all 10392 * find any space, or we are empty, and we can just add all
10393 * the space in and be done with it. This saves us _alot_ of 10393 * the space in and be done with it. This saves us _a_lot_ of
10394 * time, particularly in the full case. 10394 * time, particularly in the full case.
10395 */ 10395 */
10396 if (found_key.offset == btrfs_block_group_used(&cache->item)) { 10396 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
@@ -10660,7 +10660,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10660 10660
10661 mutex_lock(&trans->transaction->cache_write_mutex); 10661 mutex_lock(&trans->transaction->cache_write_mutex);
10662 /* 10662 /*
10663 * make sure our free spache cache IO is done before remove the 10663 * Make sure our free space cache IO is done before removing the
10664 * free space inode 10664 * free space inode
10665 */ 10665 */
10666 spin_lock(&trans->transaction->dirty_bgs_lock); 10666 spin_lock(&trans->transaction->dirty_bgs_lock);
@@ -11177,7 +11177,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11177 if (!blk_queue_discard(bdev_get_queue(device->bdev))) 11177 if (!blk_queue_discard(bdev_get_queue(device->bdev)))
11178 return 0; 11178 return 0;
11179 11179
11180 /* Not writeable = nothing to do. */ 11180 /* Not writable = nothing to do. */
11181 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 11181 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
11182 return 0; 11182 return 0;
11183 11183
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7b489988d811..fc126b92ea59 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -492,7 +492,7 @@ static struct extent_state *next_state(struct extent_state *state)
492 492
493/* 493/*
494 * utility function to clear some bits in an extent state struct. 494 * utility function to clear some bits in an extent state struct.
495 * it will optionally wake up any one waiting on this state (wake == 1). 495 * it will optionally wake up anyone waiting on this state (wake == 1).
496 * 496 *
497 * If no bits are set on the state struct after clearing things, the 497 * If no bits are set on the state struct after clearing things, the
498 * struct is freed and removed from the tree 498 * struct is freed and removed from the tree
@@ -4312,7 +4312,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4312 4312
4313 /* 4313 /*
4314 * Sanity check, extent_fiemap() should have ensured that new 4314 * Sanity check, extent_fiemap() should have ensured that new
4315 * fiemap extent won't overlap with cahced one. 4315 * fiemap extent won't overlap with cached one.
4316 * Not recoverable. 4316 * Not recoverable.
4317 * 4317 *
4318 * NOTE: Physical address can overlap, due to compression 4318 * NOTE: Physical address can overlap, due to compression
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 04eefa93fe1f..9673be3f3d1f 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -98,7 +98,7 @@ typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
98 98
99struct extent_io_ops { 99struct extent_io_ops {
100 /* 100 /*
101 * The following callbacks must be allways defined, the function 101 * The following callbacks must be always defined, the function
102 * pointer will be called unconditionally. 102 * pointer will be called unconditionally.
103 */ 103 */
104 extent_submit_bio_hook_t *submit_bio_hook; 104 extent_submit_bio_hook_t *submit_bio_hook;
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 7eea8b6e2cd3..a042a193c120 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -475,7 +475,8 @@ static struct extent_map *prev_extent_map(struct extent_map *em)
475 return container_of(prev, struct extent_map, rb_node); 475 return container_of(prev, struct extent_map, rb_node);
476} 476}
477 477
478/* helper for btfs_get_extent. Given an existing extent in the tree, 478/*
479 * Helper for btrfs_get_extent. Given an existing extent in the tree,
479 * the existing extent is the nearest extent to map_start, 480 * the existing extent is the nearest extent to map_start,
480 * and an extent that you want to insert, deal with overlap and insert 481 * and an extent that you want to insert, deal with overlap and insert
481 * the best fitted new extent into the tree. 482 * the best fitted new extent into the tree.
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 81aae230d1a5..d38dc8c31533 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2005,7 +2005,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
2005 filp->private_data = NULL; 2005 filp->private_data = NULL;
2006 2006
2007 /* 2007 /*
2008 * ordered_data_close is set by settattr when we are about to truncate 2008 * ordered_data_close is set by setattr when we are about to truncate
2009 * a file from a non-zero size to a zero size. This tries to 2009 * a file from a non-zero size to a zero size. This tries to
2010 * flush down new bytes that may have been written if the 2010 * flush down new bytes that may have been written if the
2011 * application were using truncate to replace a file in place. 2011 * application were using truncate to replace a file in place.
@@ -2114,7 +2114,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2114 2114
2115 /* 2115 /*
2116 * We have to do this here to avoid the priority inversion of waiting on 2116 * We have to do this here to avoid the priority inversion of waiting on
2117 * IO of a lower priority task while holding a transaciton open. 2117 * IO of a lower priority task while holding a transaction open.
2118 */ 2118 */
2119 ret = btrfs_wait_ordered_range(inode, start, len); 2119 ret = btrfs_wait_ordered_range(inode, start, len);
2120 if (ret) { 2120 if (ret) {
@@ -2154,7 +2154,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2154 * here we could get into a situation where we're waiting on IO to 2154 * here we could get into a situation where we're waiting on IO to
2155 * happen that is blocked on a transaction trying to commit. With start 2155 * happen that is blocked on a transaction trying to commit. With start
2156 * we inc the extwriter counter, so we wait for all extwriters to exit 2156 * we inc the extwriter counter, so we wait for all extwriters to exit
2157 * before we start blocking join'ers. This comment is to keep somebody 2157 * before we start blocking joiners. This comment is to keep somebody
2158 * from thinking they are super smart and changing this to 2158 * from thinking they are super smart and changing this to
2159 * btrfs_join_transaction *cough*Josef*cough*. 2159 * btrfs_join_transaction *cough*Josef*cough*.
2160 */ 2160 */
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3d29505971fe..43eb4535319d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -104,7 +104,7 @@ static void __endio_write_update_ordered(struct inode *inode,
104 104
105/* 105/*
106 * Cleanup all submitted ordered extents in specified range to handle errors 106 * Cleanup all submitted ordered extents in specified range to handle errors
107 * from the fill_dellaloc() callback. 107 * from the btrfs_run_delalloc_range() callback.
108 * 108 *
109 * NOTE: caller must ensure that when an error happens, it can not call 109 * NOTE: caller must ensure that when an error happens, it can not call
110 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 110 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
@@ -1842,7 +1842,7 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
1842 1842
1843 /* 1843 /*
1844 * We don't reserve metadata space for space cache inodes so we 1844 * We don't reserve metadata space for space cache inodes so we
1845 * don't need to call dellalloc_release_metadata if there is an 1845 * don't need to call delalloc_release_metadata if there is an
1846 * error. 1846 * error.
1847 */ 1847 */
1848 if (*bits & EXTENT_CLEAR_META_RESV && 1848 if (*bits & EXTENT_CLEAR_META_RESV &&
@@ -4516,7 +4516,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4516 /* 4516 /*
4517 * This function is also used to drop the items in the log tree before 4517 * This function is also used to drop the items in the log tree before
4518 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 4518 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4519 * it is used to drop the loged items. So we shouldn't kill the delayed 4519 * it is used to drop the logged items. So we shouldn't kill the delayed
4520 * items. 4520 * items.
4521 */ 4521 */
4522 if (min_type == 0 && root == BTRFS_I(inode)->root) 4522 if (min_type == 0 && root == BTRFS_I(inode)->root)
@@ -5108,7 +5108,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5108 5108
5109 truncate_setsize(inode, newsize); 5109 truncate_setsize(inode, newsize);
5110 5110
5111 /* Disable nonlocked read DIO to avoid the end less truncate */ 5111 /* Disable nonlocked read DIO to avoid the endless truncate */
5112 btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); 5112 btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
5113 inode_dio_wait(inode); 5113 inode_dio_wait(inode);
5114 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); 5114 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
@@ -8052,7 +8052,7 @@ static void __endio_write_update_ordered(struct inode *inode,
8052 return; 8052 return;
8053 /* 8053 /*
8054 * Our bio might span multiple ordered extents. In this case 8054 * Our bio might span multiple ordered extents. In this case
8055 * we keep goin until we have accounted the whole dio. 8055 * we keep going until we have accounted the whole dio.
8056 */ 8056 */
8057 if (ordered_offset < offset + bytes) { 8057 if (ordered_offset < offset + bytes) {
8058 ordered_bytes = offset + bytes - ordered_offset; 8058 ordered_bytes = offset + bytes - ordered_offset;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index b6a4cc178bee..90639140439f 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -27,7 +27,7 @@
27 * Records the total size (including the header) of compressed data. 27 * Records the total size (including the header) of compressed data.
28 * 28 *
29 * 2. Segment(s) 29 * 2. Segment(s)
30 * Variable size. Each segment includes one segment header, followd by data 30 * Variable size. Each segment includes one segment header, followed by data
31 * payload. 31 * payload.
32 * One regular LZO compressed extent can have one or more segments. 32 * One regular LZO compressed extent can have one or more segments.
33 * For inlined LZO compressed extent, only one segment is allowed. 33 * For inlined LZO compressed extent, only one segment is allowed.
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 2272419ade7e..4e473a998219 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -30,7 +30,7 @@
30 * - sync 30 * - sync
31 * - copy also limits on subvol creation 31 * - copy also limits on subvol creation
32 * - limit 32 * - limit
33 * - caches fuer ulists 33 * - caches for ulists
34 * - performance benchmarks 34 * - performance benchmarks
35 * - check all ioctl parameters 35 * - check all ioctl parameters
36 */ 36 */
@@ -522,7 +522,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
522 __del_qgroup_rb(qgroup); 522 __del_qgroup_rb(qgroup);
523 } 523 }
524 /* 524 /*
525 * we call btrfs_free_qgroup_config() when umounting 525 * We call btrfs_free_qgroup_config() when unmounting
526 * filesystem and disabling quota, so we set qgroup_ulist 526 * filesystem and disabling quota, so we set qgroup_ulist
527 * to be null here to avoid double free. 527 * to be null here to avoid double free.
528 */ 528 */
@@ -1128,7 +1128,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1128 * The easy accounting, we're updating qgroup relationship whose child qgroup 1128 * The easy accounting, we're updating qgroup relationship whose child qgroup
1129 * only has exclusive extents. 1129 * only has exclusive extents.
1130 * 1130 *
1131 * In this case, all exclsuive extents will also be exlusive for parent, so 1131 * In this case, all exclusive extents will also be exclusive for parent, so
1132 * excl/rfer just get added/removed. 1132 * excl/rfer just get added/removed.
1133 * 1133 *
1134 * So is qgroup reservation space, which should also be added/removed to 1134 * So is qgroup reservation space, which should also be added/removed to
@@ -1755,14 +1755,14 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1755 * 1755 *
1756 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty 1756 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1757 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. 1757 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1758 * They should be marked during preivous (@dst_level = 1) iteration. 1758 * They should be marked during previous (@dst_level = 1) iteration.
1759 * 1759 *
1760 * 3) Mark file extents in leaves dirty 1760 * 3) Mark file extents in leaves dirty
1761 * We don't have good way to pick out new file extents only. 1761 * We don't have good way to pick out new file extents only.
1762 * So we still follow the old method by scanning all file extents in 1762 * So we still follow the old method by scanning all file extents in
1763 * the leave. 1763 * the leave.
1764 * 1764 *
1765 * This function can free us from keeping two pathes, thus later we only need 1765 * This function can free us from keeping two paths, thus later we only need
1766 * to care about how to iterate all new tree blocks in reloc tree. 1766 * to care about how to iterate all new tree blocks in reloc tree.
1767 */ 1767 */
1768static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, 1768static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
@@ -1901,7 +1901,7 @@ out:
1901 * 1901 *
1902 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace 1902 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1903 * above tree blocks along with their counter parts in file tree. 1903 * above tree blocks along with their counter parts in file tree.
1904 * While during search, old tree blocsk OO(c) will be skiped as tree block swap 1904 * While during search, old tree blocks OO(c) will be skipped as tree block swap
1905 * won't affect OO(c). 1905 * won't affect OO(c).
1906 */ 1906 */
1907static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, 1907static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
@@ -2026,7 +2026,7 @@ out:
2026 * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and 2026 * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
2027 * @dst_slot), and find any tree blocks whose generation is at @last_snapshot, 2027 * @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
2028 * and then go down @src_eb (pointed by @src_parent and @src_slot) to find 2028 * and then go down @src_eb (pointed by @src_parent and @src_slot) to find
2029 * the conterpart of the tree block, then mark both tree blocks as qgroup dirty, 2029 * the counterpart of the tree block, then mark both tree blocks as qgroup dirty,
2030 * and skip all tree blocks whose generation is smaller than last_snapshot. 2030 * and skip all tree blocks whose generation is smaller than last_snapshot.
2031 * 2031 *
2032 * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(), 2032 * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index e4e6ee44073a..20c6bd5fa701 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -81,10 +81,10 @@ enum btrfs_qgroup_rsv_type {
81 * 81 *
82 * Each type should have different reservation behavior. 82 * Each type should have different reservation behavior.
83 * E.g, data follows its io_tree flag modification, while 83 * E.g, data follows its io_tree flag modification, while
84 * *currently* meta is just reserve-and-clear during transcation. 84 * *currently* meta is just reserve-and-clear during transaction.
85 * 85 *
86 * TODO: Add new type for reservation which can survive transaction commit. 86 * TODO: Add new type for reservation which can survive transaction commit.
87 * Currect metadata reservation behavior is not suitable for such case. 87 * Current metadata reservation behavior is not suitable for such case.
88 */ 88 */
89struct btrfs_qgroup_rsv { 89struct btrfs_qgroup_rsv {
90 u64 values[BTRFS_QGROUP_RSV_LAST]; 90 u64 values[BTRFS_QGROUP_RSV_LAST];
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index df41d7049936..e74455eb42f9 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1980,7 +1980,7 @@ cleanup_io:
1980 * - In case of single failure, where rbio->failb == -1: 1980 * - In case of single failure, where rbio->failb == -1:
1981 * 1981 *
1982 * Cache this rbio iff the above read reconstruction is 1982 * Cache this rbio iff the above read reconstruction is
1983 * excuted without problems. 1983 * executed without problems.
1984 */ 1984 */
1985 if (err == BLK_STS_OK && rbio->failb < 0) 1985 if (err == BLK_STS_OK && rbio->failb < 0)
1986 cache_rbio_pages(rbio); 1986 cache_rbio_pages(rbio);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index d69fbfb30aa9..c3557c12656b 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -43,7 +43,7 @@ struct ref_entry {
43 * back to the delayed ref action. We hold the ref we are changing in the 43 * back to the delayed ref action. We hold the ref we are changing in the
44 * action so we can account for the history properly, and we record the root we 44 * action so we can account for the history properly, and we record the root we
45 * were called with since it could be different from ref_root. We also store 45 * were called with since it could be different from ref_root. We also store
46 * stack traces because thats how I roll. 46 * stack traces because that's how I roll.
47 */ 47 */
48struct ref_action { 48struct ref_action {
49 int action; 49 int action;
@@ -56,7 +56,7 @@ struct ref_action {
56 56
57/* 57/*
58 * One of these for every block we reference, it holds the roots and references 58 * One of these for every block we reference, it holds the roots and references
59 * to it as well as all of the ref actions that have occured to it. We never 59 * to it as well as all of the ref actions that have occurred to it. We never
60 * free it until we unmount the file system in order to make sure re-allocations 60 * free it until we unmount the file system in order to make sure re-allocations
61 * are happening properly. 61 * are happening properly.
62 */ 62 */
@@ -859,7 +859,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
859 * This shouldn't happen because we will add our re 859 * This shouldn't happen because we will add our re
860 * above when we lookup the be with !parent, but just in 860 * above when we lookup the be with !parent, but just in
861 * case catch this case so we don't panic because I 861 * case catch this case so we don't panic because I
862 * didn't thik of some other corner case. 862 * didn't think of some other corner case.
863 */ 863 */
864 btrfs_err(fs_info, "failed to find root %llu for %llu", 864 btrfs_err(fs_info, "failed to find root %llu for %llu",
865 root->root_key.objectid, be->bytenr); 865 root->root_key.objectid, be->bytenr);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 35bfe014712a..272b287f8cf0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2631,7 +2631,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2631 * only one thread can access block_rsv at this point, 2631 * only one thread can access block_rsv at this point,
2632 * so we don't need hold lock to protect block_rsv. 2632 * so we don't need hold lock to protect block_rsv.
2633 * we expand more reservation size here to allow enough 2633 * we expand more reservation size here to allow enough
2634 * space for relocation and we will return eailer in 2634 * space for relocation and we will return earlier in
2635 * enospc case. 2635 * enospc case.
2636 */ 2636 */
2637 rc->block_rsv->size = tmp + fs_info->nodesize * 2637 rc->block_rsv->size = tmp + fs_info->nodesize *
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 099eb3c8f86b..6dcd36d7b849 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3554,7 +3554,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3554 if (!ret && sctx->is_dev_replace) { 3554 if (!ret && sctx->is_dev_replace) {
3555 /* 3555 /*
3556 * If we are doing a device replace wait for any tasks 3556 * If we are doing a device replace wait for any tasks
3557 * that started dellaloc right before we set the block 3557 * that started delalloc right before we set the block
3558 * group to RO mode, as they might have just allocated 3558 * group to RO mode, as they might have just allocated
3559 * an extent from it or decided they could do a nocow 3559 * an extent from it or decided they could do a nocow
3560 * write. And if any such tasks did that, wait for their 3560 * write. And if any such tasks did that, wait for their
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 9df4c0b0e789..1b15b43905f8 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -2238,7 +2238,7 @@ out:
2238 * inodes "orphan" name instead of the real name and stop. Same with new inodes 2238 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2239 * that were not created yet and overwritten inodes/refs. 2239 * that were not created yet and overwritten inodes/refs.
2240 * 2240 *
2241 * When do we have have orphan inodes: 2241 * When do we have orphan inodes:
2242 * 1. When an inode is freshly created and thus no valid refs are available yet 2242 * 1. When an inode is freshly created and thus no valid refs are available yet
2243 * 2. When a directory lost all it's refs (deleted) but still has dir items 2243 * 2. When a directory lost all it's refs (deleted) but still has dir items
2244 * inside which were not processed yet (pending for move/delete). If anyone 2244 * inside which were not processed yet (pending for move/delete). If anyone
@@ -3854,7 +3854,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3854 /* 3854 /*
3855 * We may have refs where the parent directory does not exist 3855 * We may have refs where the parent directory does not exist
3856 * yet. This happens if the parent directories inum is higher 3856 * yet. This happens if the parent directories inum is higher
3857 * the the current inum. To handle this case, we create the 3857 * than the current inum. To handle this case, we create the
3858 * parent directory out of order. But we need to check if this 3858 * parent directory out of order. But we need to check if this
3859 * did already happen before due to other refs in the same dir. 3859 * did already happen before due to other refs in the same dir.
3860 */ 3860 */
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d3c6bbc0aa3a..368a5b9e6c13 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -93,7 +93,7 @@ const char *btrfs_decode_error(int errno)
93 93
94/* 94/*
95 * __btrfs_handle_fs_error decodes expected errors from the caller and 95 * __btrfs_handle_fs_error decodes expected errors from the caller and
96 * invokes the approciate error response. 96 * invokes the appropriate error response.
97 */ 97 */
98__cold 98__cold
99void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, 99void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
@@ -151,7 +151,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
151 * although there is no way to update the progress. It would add the 151 * although there is no way to update the progress. It would add the
152 * risk of a deadlock, therefore the canceling is omitted. The only 152 * risk of a deadlock, therefore the canceling is omitted. The only
153 * penalty is that some I/O remains active until the procedure 153 * penalty is that some I/O remains active until the procedure
154 * completes. The next time when the filesystem is mounted writeable 154 * completes. The next time when the filesystem is mounted writable
155 * again, the device replace operation continues. 155 * again, the device replace operation continues.
156 */ 156 */
157} 157}
@@ -1848,7 +1848,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1848 1848
1849 if (!btrfs_check_rw_degradable(fs_info, NULL)) { 1849 if (!btrfs_check_rw_degradable(fs_info, NULL)) {
1850 btrfs_warn(fs_info, 1850 btrfs_warn(fs_info,
1851 "too many missing devices, writeable remount is not allowed"); 1851 "too many missing devices, writable remount is not allowed");
1852 ret = -EACCES; 1852 ret = -EACCES;
1853 goto restore; 1853 goto restore;
1854 } 1854 }
@@ -2312,7 +2312,7 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2312 * device_list_mutex here as we only read the device data and the list 2312 * device_list_mutex here as we only read the device data and the list
2313 * is protected by RCU. Even if a device is deleted during the list 2313 * is protected by RCU. Even if a device is deleted during the list
2314 * traversals, we'll get valid data, the freeing callback will wait at 2314 * traversals, we'll get valid data, the freeing callback will wait at
2315 * least until until the rcu_read_unlock. 2315 * least until the rcu_read_unlock.
2316 */ 2316 */
2317 rcu_read_lock(); 2317 rcu_read_lock();
2318 cur_devices = fs_info->fs_devices; 2318 cur_devices = fs_info->fs_devices;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 39d3b4b14098..127fa1535f58 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -699,7 +699,7 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
699/* 699/*
700 * btrfs_attach_transaction_barrier() - catch the running transaction 700 * btrfs_attach_transaction_barrier() - catch the running transaction
701 * 701 *
702 * It is similar to the above function, the differentia is this one 702 * It is similar to the above function, the difference is this one
703 * will wait for all the inactive transactions until they fully 703 * will wait for all the inactive transactions until they fully
704 * complete. 704 * complete.
705 */ 705 */
@@ -1329,7 +1329,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1329 return 0; 1329 return 0;
1330 1330
1331 /* 1331 /*
1332 * Ensure dirty @src will be commited. Or, after comming 1332 * Ensure dirty @src will be committed. Or, after coming
1333 * commit_fs_roots() and switch_commit_roots(), any dirty but not 1333 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1334 * recorded root will never be updated again, causing an outdated root 1334 * recorded root will never be updated again, causing an outdated root
1335 * item. 1335 * item.
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 1a4e2b101ef2..a62e1e837a89 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -27,10 +27,10 @@
27 * 27 *
28 * @type: leaf or node 28 * @type: leaf or node
29 * @identifier: the necessary info to locate the leaf/node. 29 * @identifier: the necessary info to locate the leaf/node.
30 * It's recommened to decode key.objecitd/offset if it's 30 * It's recommended to decode key.objecitd/offset if it's
31 * meaningful. 31 * meaningful.
32 * @reason: describe the error 32 * @reason: describe the error
33 * @bad_value: optional, it's recommened to output bad value and its 33 * @bad_value: optional, it's recommended to output bad value and its
34 * expected value (range). 34 * expected value (range).
35 * 35 *
36 * Since comma is used to separate the components, only space is allowed 36 * Since comma is used to separate the components, only space is allowed
@@ -130,7 +130,7 @@ static int check_extent_data_item(struct btrfs_fs_info *fs_info,
130 } 130 }
131 131
132 /* 132 /*
133 * Support for new compression/encrption must introduce incompat flag, 133 * Support for new compression/encryption must introduce incompat flag,
134 * and must be caught in open_ctree(). 134 * and must be caught in open_ctree().
135 */ 135 */
136 if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) { 136 if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 5baad9bebc62..ac232b3d6d7e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1144,7 +1144,7 @@ next:
1144 } 1144 }
1145 btrfs_release_path(path); 1145 btrfs_release_path(path);
1146 1146
1147 /* look for a conflicing name */ 1147 /* look for a conflicting name */
1148 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1148 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1149 name, namelen, 0); 1149 name, namelen, 0);
1150 if (di && !IS_ERR(di)) { 1150 if (di && !IS_ERR(di)) {
@@ -3149,7 +3149,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
3149 mutex_unlock(&log_root_tree->log_mutex); 3149 mutex_unlock(&log_root_tree->log_mutex);
3150 3150
3151 /* 3151 /*
3152 * nobody else is going to jump in and write the the ctree 3152 * Nobody else is going to jump in and write the ctree
3153 * super here because the log_commit atomic below is protecting 3153 * super here because the log_commit atomic below is protecting
3154 * us. We must be called with a transaction handle pinning 3154 * us. We must be called with a transaction handle pinning
3155 * the running transaction open, so a full commit can't hop 3155 * the running transaction open, so a full commit can't hop
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a735576471a9..2576b1a379c9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -212,7 +212,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
212 * the mutex can be very coarse and can cover long-running operations 212 * the mutex can be very coarse and can cover long-running operations
213 * 213 *
214 * protects: updates to fs_devices counters like missing devices, rw devices, 214 * protects: updates to fs_devices counters like missing devices, rw devices,
215 * seeding, structure cloning, openning/closing devices at mount/umount time 215 * seeding, structure cloning, opening/closing devices at mount/umount time
216 * 216 *
217 * global::fs_devs - add, remove, updates to the global list 217 * global::fs_devs - add, remove, updates to the global list
218 * 218 *
@@ -5047,7 +5047,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
5047 BUG_ON(1); 5047 BUG_ON(1);
5048 } 5048 }
5049 5049
5050 /* we don't want a chunk larger than 10% of writeable space */ 5050 /* We don't want a chunk larger than 10% of writable space */
5051 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), 5051 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5052 max_chunk_size); 5052 max_chunk_size);
5053 5053
@@ -5355,10 +5355,10 @@ out:
5355} 5355}
5356 5356
5357/* 5357/*
5358 * Chunk allocation falls into two parts. The first part does works 5358 * Chunk allocation falls into two parts. The first part does work
5359 * that make the new allocated chunk useable, but not do any operation 5359 * that makes the new allocated chunk usable, but does not do any operation
5360 * that modifies the chunk tree. The second part does the works that 5360 * that modifies the chunk tree. The second part does the work that
5361 * require modifying the chunk tree. This division is important for the 5361 * requires modifying the chunk tree. This division is important for the
5362 * bootstrap process of adding storage to a seed btrfs. 5362 * bootstrap process of adding storage to a seed btrfs.
5363 */ 5363 */
5364int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) 5364int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
@@ -7256,7 +7256,7 @@ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7256 if (missing > max_tolerated) { 7256 if (missing > max_tolerated) {
7257 if (!failing_dev) 7257 if (!failing_dev)
7258 btrfs_warn(fs_info, 7258 btrfs_warn(fs_info,
7259 "chunk %llu missing %d devices, max tolerance is %d for writeable mount", 7259 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7260 em->start, missing, max_tolerated); 7260 em->start, missing, max_tolerated);
7261 free_extent_map(em); 7261 free_extent_map(em);
7262 ret = false; 7262 ret = false;