aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 14:23:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 14:23:06 -0400
commitf29135b54bcbfe1fea97d94e2ae860bade1d5a31 (patch)
treecd6cf1887c689b0fdd802bb8f16d3253adbc54c0
parent4c609922a3ae0248597785d1f9adc8f142a80aef (diff)
parent19c4d2f994788a954af1aa7e53b0fdb46fd7925a (diff)
Merge branch 'for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason: "This is a big variety of fixes and cleanups. Liu Bo continues to fixup fuzzer related problems, and some of Josef's cleanups are prep for his bigger extent buffer changes (slated for v4.10)" * 'for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (39 commits) Revert "btrfs: let btrfs_delete_unused_bgs() to clean relocated bgs" Btrfs: remove unnecessary btrfs_mark_buffer_dirty in split_leaf Btrfs: don't BUG() during drop snapshot btrfs: fix btrfs_no_printk stub helper Btrfs: memset to avoid stale content in btree leaf btrfs: parent_start initialization cleanup btrfs: Remove already completed TODO comment btrfs: Do not reassign count in btrfs_run_delayed_refs btrfs: fix a possible umount deadlock Btrfs: fix memory leak in do_walk_down btrfs: btrfs_debug should consume fs_info when DEBUG is not defined btrfs: convert send's verbose_printk to btrfs_debug btrfs: convert pr_* to btrfs_* where possible btrfs: convert printk(KERN_* to use pr_* calls btrfs: unsplit printed strings btrfs: clean the old superblocks before freeing the device Btrfs: kill BUG_ON in run_delayed_tree_ref Btrfs: don't leak reloc root nodes on error btrfs: squash lines for simple wrapper functions Btrfs: improve check_node to avoid reading corrupted nodes ...
-rw-r--r--fs/btrfs/backref.c409
-rw-r--r--fs/btrfs/btrfs_inode.h11
-rw-r--r--fs/btrfs/check-integrity.c342
-rw-r--r--fs/btrfs/compression.c6
-rw-r--r--fs/btrfs/ctree.c56
-rw-r--r--fs/btrfs/ctree.h116
-rw-r--r--fs/btrfs/delayed-inode.c25
-rw-r--r--fs/btrfs/delayed-ref.c15
-rw-r--r--fs/btrfs/dev-replace.c21
-rw-r--r--fs/btrfs/dir-item.c7
-rw-r--r--fs/btrfs/disk-io.c237
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c198
-rw-r--r--fs/btrfs/extent_io.c170
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/file.c43
-rw-r--r--fs/btrfs/free-space-cache.c21
-rw-r--r--fs/btrfs/free-space-cache.h6
-rw-r--r--fs/btrfs/free-space-tree.c20
-rw-r--r--fs/btrfs/inode-map.c31
-rw-r--r--fs/btrfs/inode.c70
-rw-r--r--fs/btrfs/ioctl.c14
-rw-r--r--fs/btrfs/lzo.c6
-rw-r--r--fs/btrfs/ordered-data.c4
-rw-r--r--fs/btrfs/print-tree.c93
-rw-r--r--fs/btrfs/qgroup.c77
-rw-r--r--fs/btrfs/raid56.c5
-rw-r--r--fs/btrfs/reada.c32
-rw-r--r--fs/btrfs/relocation.c47
-rw-r--r--fs/btrfs/root-tree.c18
-rw-r--r--fs/btrfs/scrub.c58
-rw-r--r--fs/btrfs/send.c79
-rw-r--r--fs/btrfs/super.c62
-rw-r--r--fs/btrfs/sysfs.c19
-rw-r--r--fs/btrfs/tests/inode-tests.c12
-rw-r--r--fs/btrfs/tests/qgroup-tests.c2
-rw-r--r--fs/btrfs/transaction.c49
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/tree-log.c12
-rw-r--r--fs/btrfs/uuid-tree.c27
-rw-r--r--fs/btrfs/volumes.c197
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/btrfs/zlib.c8
43 files changed, 1563 insertions, 1071 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 455a6b2fd539..85dc7ab8f89e 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <linux/rbtree.h>
20#include "ctree.h" 21#include "ctree.h"
21#include "disk-io.h" 22#include "disk-io.h"
22#include "backref.h" 23#include "backref.h"
@@ -34,6 +35,265 @@ struct extent_inode_elem {
34 struct extent_inode_elem *next; 35 struct extent_inode_elem *next;
35}; 36};
36 37
38/*
39 * ref_root is used as the root of the ref tree that hold a collection
40 * of unique references.
41 */
42struct ref_root {
43 struct rb_root rb_root;
44
45 /*
46 * The unique_refs represents the number of ref_nodes with a positive
47 * count stored in the tree. Even if a ref_node (the count is greater
48 * than one) is added, the unique_refs will only increase by one.
49 */
50 unsigned int unique_refs;
51};
52
53/* ref_node is used to store a unique reference to the ref tree. */
54struct ref_node {
55 struct rb_node rb_node;
56
57 /* For NORMAL_REF, otherwise all these fields should be set to 0 */
58 u64 root_id;
59 u64 object_id;
60 u64 offset;
61
62 /* For SHARED_REF, otherwise parent field should be set to 0 */
63 u64 parent;
64
65 /* Ref to the ref_mod of btrfs_delayed_ref_node */
66 int ref_mod;
67};
68
69/* Dynamically allocate and initialize a ref_root */
70static struct ref_root *ref_root_alloc(void)
71{
72 struct ref_root *ref_tree;
73
74 ref_tree = kmalloc(sizeof(*ref_tree), GFP_NOFS);
75 if (!ref_tree)
76 return NULL;
77
78 ref_tree->rb_root = RB_ROOT;
79 ref_tree->unique_refs = 0;
80
81 return ref_tree;
82}
83
84/* Free all nodes in the ref tree, and reinit ref_root */
85static void ref_root_fini(struct ref_root *ref_tree)
86{
87 struct ref_node *node;
88 struct rb_node *next;
89
90 while ((next = rb_first(&ref_tree->rb_root)) != NULL) {
91 node = rb_entry(next, struct ref_node, rb_node);
92 rb_erase(next, &ref_tree->rb_root);
93 kfree(node);
94 }
95
96 ref_tree->rb_root = RB_ROOT;
97 ref_tree->unique_refs = 0;
98}
99
100static void ref_root_free(struct ref_root *ref_tree)
101{
102 if (!ref_tree)
103 return;
104
105 ref_root_fini(ref_tree);
106 kfree(ref_tree);
107}
108
109/*
110 * Compare ref_node with (root_id, object_id, offset, parent)
111 *
112 * The function compares two ref_node a and b. It returns an integer less
113 * than, equal to, or greater than zero , respectively, to be less than, to
114 * equal, or be greater than b.
115 */
116static int ref_node_cmp(struct ref_node *a, struct ref_node *b)
117{
118 if (a->root_id < b->root_id)
119 return -1;
120 else if (a->root_id > b->root_id)
121 return 1;
122
123 if (a->object_id < b->object_id)
124 return -1;
125 else if (a->object_id > b->object_id)
126 return 1;
127
128 if (a->offset < b->offset)
129 return -1;
130 else if (a->offset > b->offset)
131 return 1;
132
133 if (a->parent < b->parent)
134 return -1;
135 else if (a->parent > b->parent)
136 return 1;
137
138 return 0;
139}
140
141/*
142 * Search ref_node with (root_id, object_id, offset, parent) in the tree
143 *
144 * if found, the pointer of the ref_node will be returned;
145 * if not found, NULL will be returned and pos will point to the rb_node for
146 * insert, pos_parent will point to pos'parent for insert;
147*/
148static struct ref_node *__ref_tree_search(struct ref_root *ref_tree,
149 struct rb_node ***pos,
150 struct rb_node **pos_parent,
151 u64 root_id, u64 object_id,
152 u64 offset, u64 parent)
153{
154 struct ref_node *cur = NULL;
155 struct ref_node entry;
156 int ret;
157
158 entry.root_id = root_id;
159 entry.object_id = object_id;
160 entry.offset = offset;
161 entry.parent = parent;
162
163 *pos = &ref_tree->rb_root.rb_node;
164
165 while (**pos) {
166 *pos_parent = **pos;
167 cur = rb_entry(*pos_parent, struct ref_node, rb_node);
168
169 ret = ref_node_cmp(cur, &entry);
170 if (ret > 0)
171 *pos = &(**pos)->rb_left;
172 else if (ret < 0)
173 *pos = &(**pos)->rb_right;
174 else
175 return cur;
176 }
177
178 return NULL;
179}
180
181/*
182 * Insert a ref_node to the ref tree
183 * @pos used for specifiy the position to insert
184 * @pos_parent for specifiy pos's parent
185 *
186 * success, return 0;
187 * ref_node already exists, return -EEXIST;
188*/
189static int ref_tree_insert(struct ref_root *ref_tree, struct rb_node **pos,
190 struct rb_node *pos_parent, struct ref_node *ins)
191{
192 struct rb_node **p = NULL;
193 struct rb_node *parent = NULL;
194 struct ref_node *cur = NULL;
195
196 if (!pos) {
197 cur = __ref_tree_search(ref_tree, &p, &parent, ins->root_id,
198 ins->object_id, ins->offset,
199 ins->parent);
200 if (cur)
201 return -EEXIST;
202 } else {
203 p = pos;
204 parent = pos_parent;
205 }
206
207 rb_link_node(&ins->rb_node, parent, p);
208 rb_insert_color(&ins->rb_node, &ref_tree->rb_root);
209
210 return 0;
211}
212
213/* Erase and free ref_node, caller should update ref_root->unique_refs */
214static void ref_tree_remove(struct ref_root *ref_tree, struct ref_node *node)
215{
216 rb_erase(&node->rb_node, &ref_tree->rb_root);
217 kfree(node);
218}
219
220/*
221 * Update ref_root->unique_refs
222 *
223 * Call __ref_tree_search
224 * 1. if ref_node doesn't exist, ref_tree_insert this node, and update
225 * ref_root->unique_refs:
226 * if ref_node->ref_mod > 0, ref_root->unique_refs++;
227 * if ref_node->ref_mod < 0, do noting;
228 *
229 * 2. if ref_node is found, then get origin ref_node->ref_mod, and update
230 * ref_node->ref_mod.
231 * if ref_node->ref_mod is equal to 0,then call ref_tree_remove
232 *
233 * according to origin_mod and new_mod, update ref_root->items
234 * +----------------+--------------+-------------+
235 * | |new_count <= 0|new_count > 0|
236 * +----------------+--------------+-------------+
237 * |origin_count < 0| 0 | 1 |
238 * +----------------+--------------+-------------+
239 * |origin_count > 0| -1 | 0 |
240 * +----------------+--------------+-------------+
241 *
242 * In case of allocation failure, -ENOMEM is returned and the ref_tree stays
243 * unaltered.
244 * Success, return 0
245 */
246static int ref_tree_add(struct ref_root *ref_tree, u64 root_id, u64 object_id,
247 u64 offset, u64 parent, int count)
248{
249 struct ref_node *node = NULL;
250 struct rb_node **pos = NULL;
251 struct rb_node *pos_parent = NULL;
252 int origin_count;
253 int ret;
254
255 if (!count)
256 return 0;
257
258 node = __ref_tree_search(ref_tree, &pos, &pos_parent, root_id,
259 object_id, offset, parent);
260 if (node == NULL) {
261 node = kmalloc(sizeof(*node), GFP_NOFS);
262 if (!node)
263 return -ENOMEM;
264
265 node->root_id = root_id;
266 node->object_id = object_id;
267 node->offset = offset;
268 node->parent = parent;
269 node->ref_mod = count;
270
271 ret = ref_tree_insert(ref_tree, pos, pos_parent, node);
272 ASSERT(!ret);
273 if (ret) {
274 kfree(node);
275 return ret;
276 }
277
278 ref_tree->unique_refs += node->ref_mod > 0 ? 1 : 0;
279
280 return 0;
281 }
282
283 origin_count = node->ref_mod;
284 node->ref_mod += count;
285
286 if (node->ref_mod > 0)
287 ref_tree->unique_refs += origin_count > 0 ? 0 : 1;
288 else if (node->ref_mod <= 0)
289 ref_tree->unique_refs += origin_count > 0 ? -1 : 0;
290
291 if (!node->ref_mod)
292 ref_tree_remove(ref_tree, node);
293
294 return 0;
295}
296
37static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, 297static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
38 struct btrfs_file_extent_item *fi, 298 struct btrfs_file_extent_item *fi,
39 u64 extent_item_pos, 299 u64 extent_item_pos,
@@ -390,8 +650,8 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
390 /* root node has been locked, we can release @subvol_srcu safely here */ 650 /* root node has been locked, we can release @subvol_srcu safely here */
391 srcu_read_unlock(&fs_info->subvol_srcu, index); 651 srcu_read_unlock(&fs_info->subvol_srcu, index);
392 652
393 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 653 btrfs_debug(fs_info,
394 "%d for key (%llu %u %llu)\n", 654 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
395 ref->root_id, level, ref->count, ret, 655 ref->root_id, level, ref->count, ret,
396 ref->key_for_search.objectid, ref->key_for_search.type, 656 ref->key_for_search.objectid, ref->key_for_search.type,
397 ref->key_for_search.offset); 657 ref->key_for_search.offset);
@@ -700,6 +960,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
700static int __add_inline_refs(struct btrfs_fs_info *fs_info, 960static int __add_inline_refs(struct btrfs_fs_info *fs_info,
701 struct btrfs_path *path, u64 bytenr, 961 struct btrfs_path *path, u64 bytenr,
702 int *info_level, struct list_head *prefs, 962 int *info_level, struct list_head *prefs,
963 struct ref_root *ref_tree,
703 u64 *total_refs, u64 inum) 964 u64 *total_refs, u64 inum)
704{ 965{
705 int ret = 0; 966 int ret = 0;
@@ -767,6 +1028,13 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
767 count = btrfs_shared_data_ref_count(leaf, sdref); 1028 count = btrfs_shared_data_ref_count(leaf, sdref);
768 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset, 1029 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
769 bytenr, count, GFP_NOFS); 1030 bytenr, count, GFP_NOFS);
1031 if (ref_tree) {
1032 if (!ret)
1033 ret = ref_tree_add(ref_tree, 0, 0, 0,
1034 bytenr, count);
1035 if (!ret && ref_tree->unique_refs > 1)
1036 ret = BACKREF_FOUND_SHARED;
1037 }
770 break; 1038 break;
771 } 1039 }
772 case BTRFS_TREE_BLOCK_REF_KEY: 1040 case BTRFS_TREE_BLOCK_REF_KEY:
@@ -794,6 +1062,15 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
794 root = btrfs_extent_data_ref_root(leaf, dref); 1062 root = btrfs_extent_data_ref_root(leaf, dref);
795 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 1063 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
796 bytenr, count, GFP_NOFS); 1064 bytenr, count, GFP_NOFS);
1065 if (ref_tree) {
1066 if (!ret)
1067 ret = ref_tree_add(ref_tree, root,
1068 key.objectid,
1069 key.offset, 0,
1070 count);
1071 if (!ret && ref_tree->unique_refs > 1)
1072 ret = BACKREF_FOUND_SHARED;
1073 }
797 break; 1074 break;
798 } 1075 }
799 default: 1076 default:
@@ -812,7 +1089,8 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
812 */ 1089 */
813static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 1090static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
814 struct btrfs_path *path, u64 bytenr, 1091 struct btrfs_path *path, u64 bytenr,
815 int info_level, struct list_head *prefs, u64 inum) 1092 int info_level, struct list_head *prefs,
1093 struct ref_root *ref_tree, u64 inum)
816{ 1094{
817 struct btrfs_root *extent_root = fs_info->extent_root; 1095 struct btrfs_root *extent_root = fs_info->extent_root;
818 int ret; 1096 int ret;
@@ -855,6 +1133,13 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
855 count = btrfs_shared_data_ref_count(leaf, sdref); 1133 count = btrfs_shared_data_ref_count(leaf, sdref);
856 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset, 1134 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
857 bytenr, count, GFP_NOFS); 1135 bytenr, count, GFP_NOFS);
1136 if (ref_tree) {
1137 if (!ret)
1138 ret = ref_tree_add(ref_tree, 0, 0, 0,
1139 bytenr, count);
1140 if (!ret && ref_tree->unique_refs > 1)
1141 ret = BACKREF_FOUND_SHARED;
1142 }
858 break; 1143 break;
859 } 1144 }
860 case BTRFS_TREE_BLOCK_REF_KEY: 1145 case BTRFS_TREE_BLOCK_REF_KEY:
@@ -883,6 +1168,15 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
883 root = btrfs_extent_data_ref_root(leaf, dref); 1168 root = btrfs_extent_data_ref_root(leaf, dref);
884 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 1169 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
885 bytenr, count, GFP_NOFS); 1170 bytenr, count, GFP_NOFS);
1171 if (ref_tree) {
1172 if (!ret)
1173 ret = ref_tree_add(ref_tree, root,
1174 key.objectid,
1175 key.offset, 0,
1176 count);
1177 if (!ret && ref_tree->unique_refs > 1)
1178 ret = BACKREF_FOUND_SHARED;
1179 }
886 break; 1180 break;
887 } 1181 }
888 default: 1182 default:
@@ -909,13 +1203,16 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
909 * commit root. 1203 * commit root.
910 * The special case is for qgroup to search roots in commit_transaction(). 1204 * The special case is for qgroup to search roots in commit_transaction().
911 * 1205 *
1206 * If check_shared is set to 1, any extent has more than one ref item, will
1207 * be returned BACKREF_FOUND_SHARED immediately.
1208 *
912 * FIXME some caching might speed things up 1209 * FIXME some caching might speed things up
913 */ 1210 */
914static int find_parent_nodes(struct btrfs_trans_handle *trans, 1211static int find_parent_nodes(struct btrfs_trans_handle *trans,
915 struct btrfs_fs_info *fs_info, u64 bytenr, 1212 struct btrfs_fs_info *fs_info, u64 bytenr,
916 u64 time_seq, struct ulist *refs, 1213 u64 time_seq, struct ulist *refs,
917 struct ulist *roots, const u64 *extent_item_pos, 1214 struct ulist *roots, const u64 *extent_item_pos,
918 u64 root_objectid, u64 inum) 1215 u64 root_objectid, u64 inum, int check_shared)
919{ 1216{
920 struct btrfs_key key; 1217 struct btrfs_key key;
921 struct btrfs_path *path; 1218 struct btrfs_path *path;
@@ -927,6 +1224,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
927 struct list_head prefs; 1224 struct list_head prefs;
928 struct __prelim_ref *ref; 1225 struct __prelim_ref *ref;
929 struct extent_inode_elem *eie = NULL; 1226 struct extent_inode_elem *eie = NULL;
1227 struct ref_root *ref_tree = NULL;
930 u64 total_refs = 0; 1228 u64 total_refs = 0;
931 1229
932 INIT_LIST_HEAD(&prefs); 1230 INIT_LIST_HEAD(&prefs);
@@ -958,6 +1256,18 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
958again: 1256again:
959 head = NULL; 1257 head = NULL;
960 1258
1259 if (check_shared) {
1260 if (!ref_tree) {
1261 ref_tree = ref_root_alloc();
1262 if (!ref_tree) {
1263 ret = -ENOMEM;
1264 goto out;
1265 }
1266 } else {
1267 ref_root_fini(ref_tree);
1268 }
1269 }
1270
961 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 1271 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
962 if (ret < 0) 1272 if (ret < 0)
963 goto out; 1273 goto out;
@@ -1002,6 +1312,36 @@ again:
1002 } else { 1312 } else {
1003 spin_unlock(&delayed_refs->lock); 1313 spin_unlock(&delayed_refs->lock);
1004 } 1314 }
1315
1316 if (check_shared && !list_empty(&prefs_delayed)) {
1317 /*
1318 * Add all delay_ref to the ref_tree and check if there
1319 * are multiple ref items added.
1320 */
1321 list_for_each_entry(ref, &prefs_delayed, list) {
1322 if (ref->key_for_search.type) {
1323 ret = ref_tree_add(ref_tree,
1324 ref->root_id,
1325 ref->key_for_search.objectid,
1326 ref->key_for_search.offset,
1327 0, ref->count);
1328 if (ret)
1329 goto out;
1330 } else {
1331 ret = ref_tree_add(ref_tree, 0, 0, 0,
1332 ref->parent, ref->count);
1333 if (ret)
1334 goto out;
1335 }
1336
1337 }
1338
1339 if (ref_tree->unique_refs > 1) {
1340 ret = BACKREF_FOUND_SHARED;
1341 goto out;
1342 }
1343
1344 }
1005 } 1345 }
1006 1346
1007 if (path->slots[0]) { 1347 if (path->slots[0]) {
@@ -1017,11 +1357,13 @@ again:
1017 key.type == BTRFS_METADATA_ITEM_KEY)) { 1357 key.type == BTRFS_METADATA_ITEM_KEY)) {
1018 ret = __add_inline_refs(fs_info, path, bytenr, 1358 ret = __add_inline_refs(fs_info, path, bytenr,
1019 &info_level, &prefs, 1359 &info_level, &prefs,
1020 &total_refs, inum); 1360 ref_tree, &total_refs,
1361 inum);
1021 if (ret) 1362 if (ret)
1022 goto out; 1363 goto out;
1023 ret = __add_keyed_refs(fs_info, path, bytenr, 1364 ret = __add_keyed_refs(fs_info, path, bytenr,
1024 info_level, &prefs, inum); 1365 info_level, &prefs,
1366 ref_tree, inum);
1025 if (ret) 1367 if (ret)
1026 goto out; 1368 goto out;
1027 } 1369 }
@@ -1106,6 +1448,7 @@ again:
1106 1448
1107out: 1449out:
1108 btrfs_free_path(path); 1450 btrfs_free_path(path);
1451 ref_root_free(ref_tree);
1109 while (!list_empty(&prefs)) { 1452 while (!list_empty(&prefs)) {
1110 ref = list_first_entry(&prefs, struct __prelim_ref, list); 1453 ref = list_first_entry(&prefs, struct __prelim_ref, list);
1111 list_del(&ref->list); 1454 list_del(&ref->list);
@@ -1159,8 +1502,8 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1159 if (!*leafs) 1502 if (!*leafs)
1160 return -ENOMEM; 1503 return -ENOMEM;
1161 1504
1162 ret = find_parent_nodes(trans, fs_info, bytenr, 1505 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1163 time_seq, *leafs, NULL, extent_item_pos, 0, 0); 1506 *leafs, NULL, extent_item_pos, 0, 0, 0);
1164 if (ret < 0 && ret != -ENOENT) { 1507 if (ret < 0 && ret != -ENOENT) {
1165 free_leaf_list(*leafs); 1508 free_leaf_list(*leafs);
1166 return ret; 1509 return ret;
@@ -1202,8 +1545,8 @@ static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1202 1545
1203 ULIST_ITER_INIT(&uiter); 1546 ULIST_ITER_INIT(&uiter);
1204 while (1) { 1547 while (1) {
1205 ret = find_parent_nodes(trans, fs_info, bytenr, 1548 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1206 time_seq, tmp, *roots, NULL, 0, 0); 1549 tmp, *roots, NULL, 0, 0, 0);
1207 if (ret < 0 && ret != -ENOENT) { 1550 if (ret < 0 && ret != -ENOENT) {
1208 ulist_free(tmp); 1551 ulist_free(tmp);
1209 ulist_free(*roots); 1552 ulist_free(*roots);
@@ -1273,7 +1616,7 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
1273 ULIST_ITER_INIT(&uiter); 1616 ULIST_ITER_INIT(&uiter);
1274 while (1) { 1617 while (1) {
1275 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, 1618 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1276 roots, NULL, root_objectid, inum); 1619 roots, NULL, root_objectid, inum, 1);
1277 if (ret == BACKREF_FOUND_SHARED) { 1620 if (ret == BACKREF_FOUND_SHARED) {
1278 /* this is the only condition under which we return 1 */ 1621 /* this is the only condition under which we return 1 */
1279 ret = 1; 1622 ret = 1;
@@ -1492,7 +1835,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1492 1835
1493 if (found_key->objectid > logical || 1836 if (found_key->objectid > logical ||
1494 found_key->objectid + size <= logical) { 1837 found_key->objectid + size <= logical) {
1495 pr_debug("logical %llu is not within any extent\n", logical); 1838 btrfs_debug(fs_info,
1839 "logical %llu is not within any extent", logical);
1496 return -ENOENT; 1840 return -ENOENT;
1497 } 1841 }
1498 1842
@@ -1503,8 +1847,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1503 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 1847 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1504 flags = btrfs_extent_flags(eb, ei); 1848 flags = btrfs_extent_flags(eb, ei);
1505 1849
1506 pr_debug("logical %llu is at position %llu within the extent (%llu " 1850 btrfs_debug(fs_info,
1507 "EXTENT_ITEM %llu) flags %#llx size %u\n", 1851 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1508 logical, logical - found_key->objectid, found_key->objectid, 1852 logical, logical - found_key->objectid, found_key->objectid,
1509 found_key->offset, flags, item_size); 1853 found_key->offset, flags, item_size);
1510 1854
@@ -1625,21 +1969,24 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1625 return 0; 1969 return 0;
1626} 1970}
1627 1971
1628static int iterate_leaf_refs(struct extent_inode_elem *inode_list, 1972static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1629 u64 root, u64 extent_item_objectid, 1973 struct extent_inode_elem *inode_list,
1630 iterate_extent_inodes_t *iterate, void *ctx) 1974 u64 root, u64 extent_item_objectid,
1975 iterate_extent_inodes_t *iterate, void *ctx)
1631{ 1976{
1632 struct extent_inode_elem *eie; 1977 struct extent_inode_elem *eie;
1633 int ret = 0; 1978 int ret = 0;
1634 1979
1635 for (eie = inode_list; eie; eie = eie->next) { 1980 for (eie = inode_list; eie; eie = eie->next) {
1636 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1981 btrfs_debug(fs_info,
1637 "root %llu\n", extent_item_objectid, 1982 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1638 eie->inum, eie->offset, root); 1983 extent_item_objectid, eie->inum,
1984 eie->offset, root);
1639 ret = iterate(eie->inum, eie->offset, root, ctx); 1985 ret = iterate(eie->inum, eie->offset, root, ctx);
1640 if (ret) { 1986 if (ret) {
1641 pr_debug("stopping iteration for %llu due to ret=%d\n", 1987 btrfs_debug(fs_info,
1642 extent_item_objectid, ret); 1988 "stopping iteration for %llu due to ret=%d",
1989 extent_item_objectid, ret);
1643 break; 1990 break;
1644 } 1991 }
1645 } 1992 }
@@ -1667,7 +2014,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1667 struct ulist_iterator ref_uiter; 2014 struct ulist_iterator ref_uiter;
1668 struct ulist_iterator root_uiter; 2015 struct ulist_iterator root_uiter;
1669 2016
1670 pr_debug("resolving all inodes for extent %llu\n", 2017 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1671 extent_item_objectid); 2018 extent_item_objectid);
1672 2019
1673 if (!search_commit_root) { 2020 if (!search_commit_root) {
@@ -1693,10 +2040,12 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1693 break; 2040 break;
1694 ULIST_ITER_INIT(&root_uiter); 2041 ULIST_ITER_INIT(&root_uiter);
1695 while (!ret && (root_node = ulist_next(roots, &root_uiter))) { 2042 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1696 pr_debug("root %llu references leaf %llu, data list " 2043 btrfs_debug(fs_info,
1697 "%#llx\n", root_node->val, ref_node->val, 2044 "root %llu references leaf %llu, data list %#llx",
1698 ref_node->aux); 2045 root_node->val, ref_node->val,
1699 ret = iterate_leaf_refs((struct extent_inode_elem *) 2046 ref_node->aux);
2047 ret = iterate_leaf_refs(fs_info,
2048 (struct extent_inode_elem *)
1700 (uintptr_t)ref_node->aux, 2049 (uintptr_t)ref_node->aux,
1701 root_node->val, 2050 root_node->val,
1702 extent_item_objectid, 2051 extent_item_objectid,
@@ -1792,9 +2141,9 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1792 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 2141 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1793 name_len = btrfs_inode_ref_name_len(eb, iref); 2142 name_len = btrfs_inode_ref_name_len(eb, iref);
1794 /* path must be released before calling iterate()! */ 2143 /* path must be released before calling iterate()! */
1795 pr_debug("following ref at offset %u for inode %llu in " 2144 btrfs_debug(fs_root->fs_info,
1796 "tree %llu\n", cur, found_key.objectid, 2145 "following ref at offset %u for inode %llu in tree %llu",
1797 fs_root->objectid); 2146 cur, found_key.objectid, fs_root->objectid);
1798 ret = iterate(parent, name_len, 2147 ret = iterate(parent, name_len,
1799 (unsigned long)(iref + 1), eb, ctx); 2148 (unsigned long)(iref + 1), eb, ctx);
1800 if (ret) 2149 if (ret)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 4919aedb5fc1..1a8fa46ff87e 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,17 +44,6 @@
44#define BTRFS_INODE_IN_DELALLOC_LIST 9 44#define BTRFS_INODE_IN_DELALLOC_LIST 9
45#define BTRFS_INODE_READDIO_NEED_LOCK 10 45#define BTRFS_INODE_READDIO_NEED_LOCK 10
46#define BTRFS_INODE_HAS_PROPS 11 46#define BTRFS_INODE_HAS_PROPS 11
47/*
48 * The following 3 bits are meant only for the btree inode.
49 * When any of them is set, it means an error happened while writing an
50 * extent buffer belonging to:
51 * 1) a non-log btree
52 * 2) a log btree and first log sub-transaction
53 * 3) a log btree and second log sub-transaction
54 */
55#define BTRFS_INODE_BTREE_ERR 12
56#define BTRFS_INODE_BTREE_LOG1_ERR 13
57#define BTRFS_INODE_BTREE_LOG2_ERR 14
58 47
59/* in memory btrfs inode */ 48/* in memory btrfs inode */
60struct btrfs_inode { 49struct btrfs_inode {
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 66789471b49d..8e99251650b3 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -656,7 +656,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
656 BUG_ON(NULL == state); 656 BUG_ON(NULL == state);
657 selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); 657 selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
658 if (NULL == selected_super) { 658 if (NULL == selected_super) {
659 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 659 pr_info("btrfsic: error, kmalloc failed!\n");
660 return -ENOMEM; 660 return -ENOMEM;
661 } 661 }
662 662
@@ -681,7 +681,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
681 } 681 }
682 682
683 if (NULL == state->latest_superblock) { 683 if (NULL == state->latest_superblock) {
684 printk(KERN_INFO "btrfsic: no superblock found!\n"); 684 pr_info("btrfsic: no superblock found!\n");
685 kfree(selected_super); 685 kfree(selected_super);
686 return -1; 686 return -1;
687 } 687 }
@@ -698,13 +698,13 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
698 next_bytenr = btrfs_super_root(selected_super); 698 next_bytenr = btrfs_super_root(selected_super);
699 if (state->print_mask & 699 if (state->print_mask &
700 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 700 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
701 printk(KERN_INFO "root@%llu\n", next_bytenr); 701 pr_info("root@%llu\n", next_bytenr);
702 break; 702 break;
703 case 1: 703 case 1:
704 next_bytenr = btrfs_super_chunk_root(selected_super); 704 next_bytenr = btrfs_super_chunk_root(selected_super);
705 if (state->print_mask & 705 if (state->print_mask &
706 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 706 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
707 printk(KERN_INFO "chunk@%llu\n", next_bytenr); 707 pr_info("chunk@%llu\n", next_bytenr);
708 break; 708 break;
709 case 2: 709 case 2:
710 next_bytenr = btrfs_super_log_root(selected_super); 710 next_bytenr = btrfs_super_log_root(selected_super);
@@ -712,7 +712,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
712 continue; 712 continue;
713 if (state->print_mask & 713 if (state->print_mask &
714 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 714 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
715 printk(KERN_INFO "log@%llu\n", next_bytenr); 715 pr_info("log@%llu\n", next_bytenr);
716 break; 716 break;
717 } 717 }
718 718
@@ -720,7 +720,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
720 btrfs_num_copies(state->root->fs_info, 720 btrfs_num_copies(state->root->fs_info,
721 next_bytenr, state->metablock_size); 721 next_bytenr, state->metablock_size);
722 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 722 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
723 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 723 pr_info("num_copies(log_bytenr=%llu) = %d\n",
724 next_bytenr, num_copies); 724 next_bytenr, num_copies);
725 725
726 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 726 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
@@ -733,9 +733,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
733 &tmp_next_block_ctx, 733 &tmp_next_block_ctx,
734 mirror_num); 734 mirror_num);
735 if (ret) { 735 if (ret) {
736 printk(KERN_INFO "btrfsic:" 736 pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n",
737 " btrfsic_map_block(root @%llu,"
738 " mirror %d) failed!\n",
739 next_bytenr, mirror_num); 737 next_bytenr, mirror_num);
740 kfree(selected_super); 738 kfree(selected_super);
741 return -1; 739 return -1;
@@ -758,8 +756,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
758 756
759 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 757 ret = btrfsic_read_block(state, &tmp_next_block_ctx);
760 if (ret < (int)PAGE_SIZE) { 758 if (ret < (int)PAGE_SIZE) {
761 printk(KERN_INFO 759 pr_info("btrfsic: read @logical %llu failed!\n",
762 "btrfsic: read @logical %llu failed!\n",
763 tmp_next_block_ctx.start); 760 tmp_next_block_ctx.start);
764 btrfsic_release_block_ctx(&tmp_next_block_ctx); 761 btrfsic_release_block_ctx(&tmp_next_block_ctx);
765 kfree(selected_super); 762 kfree(selected_super);
@@ -820,7 +817,7 @@ static int btrfsic_process_superblock_dev_mirror(
820 if (NULL == superblock_tmp) { 817 if (NULL == superblock_tmp) {
821 superblock_tmp = btrfsic_block_alloc(); 818 superblock_tmp = btrfsic_block_alloc();
822 if (NULL == superblock_tmp) { 819 if (NULL == superblock_tmp) {
823 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 820 pr_info("btrfsic: error, kmalloc failed!\n");
824 brelse(bh); 821 brelse(bh);
825 return -1; 822 return -1;
826 } 823 }
@@ -894,7 +891,7 @@ static int btrfsic_process_superblock_dev_mirror(
894 btrfs_num_copies(state->root->fs_info, 891 btrfs_num_copies(state->root->fs_info,
895 next_bytenr, state->metablock_size); 892 next_bytenr, state->metablock_size);
896 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 893 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
897 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 894 pr_info("num_copies(log_bytenr=%llu) = %d\n",
898 next_bytenr, num_copies); 895 next_bytenr, num_copies);
899 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 896 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
900 struct btrfsic_block *next_block; 897 struct btrfsic_block *next_block;
@@ -905,8 +902,7 @@ static int btrfsic_process_superblock_dev_mirror(
905 state->metablock_size, 902 state->metablock_size,
906 &tmp_next_block_ctx, 903 &tmp_next_block_ctx,
907 mirror_num)) { 904 mirror_num)) {
908 printk(KERN_INFO "btrfsic: btrfsic_map_block(" 905 pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n",
909 "bytenr @%llu, mirror %d) failed!\n",
910 next_bytenr, mirror_num); 906 next_bytenr, mirror_num);
911 brelse(bh); 907 brelse(bh);
912 return -1; 908 return -1;
@@ -948,7 +944,7 @@ static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void)
948 944
949 sf = kzalloc(sizeof(*sf), GFP_NOFS); 945 sf = kzalloc(sizeof(*sf), GFP_NOFS);
950 if (NULL == sf) 946 if (NULL == sf)
951 printk(KERN_INFO "btrfsic: alloc memory failed!\n"); 947 pr_info("btrfsic: alloc memory failed!\n");
952 else 948 else
953 sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER; 949 sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
954 return sf; 950 return sf;
@@ -994,9 +990,7 @@ continue_with_new_stack_frame:
994 sf->nr = btrfs_stack_header_nritems(&leafhdr->header); 990 sf->nr = btrfs_stack_header_nritems(&leafhdr->header);
995 991
996 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 992 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
997 printk(KERN_INFO 993 pr_info("leaf %llu items %d generation %llu owner %llu\n",
998 "leaf %llu items %d generation %llu"
999 " owner %llu\n",
1000 sf->block_ctx->start, sf->nr, 994 sf->block_ctx->start, sf->nr,
1001 btrfs_stack_header_generation( 995 btrfs_stack_header_generation(
1002 &leafhdr->header), 996 &leafhdr->header),
@@ -1023,8 +1017,7 @@ continue_with_current_leaf_stack_frame:
1023 if (disk_item_offset + sizeof(struct btrfs_item) > 1017 if (disk_item_offset + sizeof(struct btrfs_item) >
1024 sf->block_ctx->len) { 1018 sf->block_ctx->len) {
1025leaf_item_out_of_bounce_error: 1019leaf_item_out_of_bounce_error:
1026 printk(KERN_INFO 1020 pr_info("btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
1027 "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
1028 sf->block_ctx->start, 1021 sf->block_ctx->start,
1029 sf->block_ctx->dev->name); 1022 sf->block_ctx->dev->name);
1030 goto one_stack_frame_backwards; 1023 goto one_stack_frame_backwards;
@@ -1120,8 +1113,7 @@ leaf_item_out_of_bounce_error:
1120 sf->nr = btrfs_stack_header_nritems(&nodehdr->header); 1113 sf->nr = btrfs_stack_header_nritems(&nodehdr->header);
1121 1114
1122 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1115 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1123 printk(KERN_INFO "node %llu level %d items %d" 1116 pr_info("node %llu level %d items %d generation %llu owner %llu\n",
1124 " generation %llu owner %llu\n",
1125 sf->block_ctx->start, 1117 sf->block_ctx->start,
1126 nodehdr->header.level, sf->nr, 1118 nodehdr->header.level, sf->nr,
1127 btrfs_stack_header_generation( 1119 btrfs_stack_header_generation(
@@ -1145,8 +1137,7 @@ continue_with_current_node_stack_frame:
1145 (uintptr_t)nodehdr; 1137 (uintptr_t)nodehdr;
1146 if (key_ptr_offset + sizeof(struct btrfs_key_ptr) > 1138 if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
1147 sf->block_ctx->len) { 1139 sf->block_ctx->len) {
1148 printk(KERN_INFO 1140 pr_info("btrfsic: node item out of bounce at logical %llu, dev %s\n",
1149 "btrfsic: node item out of bounce at logical %llu, dev %s\n",
1150 sf->block_ctx->start, 1141 sf->block_ctx->start,
1151 sf->block_ctx->dev->name); 1142 sf->block_ctx->dev->name);
1152 goto one_stack_frame_backwards; 1143 goto one_stack_frame_backwards;
@@ -1275,7 +1266,7 @@ static int btrfsic_create_link_to_next_block(
1275 btrfs_num_copies(state->root->fs_info, 1266 btrfs_num_copies(state->root->fs_info,
1276 next_bytenr, state->metablock_size); 1267 next_bytenr, state->metablock_size);
1277 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1268 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
1278 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 1269 pr_info("num_copies(log_bytenr=%llu) = %d\n",
1279 next_bytenr, *num_copiesp); 1270 next_bytenr, *num_copiesp);
1280 *mirror_nump = 1; 1271 *mirror_nump = 1;
1281 } 1272 }
@@ -1284,15 +1275,13 @@ static int btrfsic_create_link_to_next_block(
1284 return 0; 1275 return 0;
1285 1276
1286 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1277 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1287 printk(KERN_INFO 1278 pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n",
1288 "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
1289 *mirror_nump); 1279 *mirror_nump);
1290 ret = btrfsic_map_block(state, next_bytenr, 1280 ret = btrfsic_map_block(state, next_bytenr,
1291 state->metablock_size, 1281 state->metablock_size,
1292 next_block_ctx, *mirror_nump); 1282 next_block_ctx, *mirror_nump);
1293 if (ret) { 1283 if (ret) {
1294 printk(KERN_INFO 1284 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
1295 "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
1296 next_bytenr, *mirror_nump); 1285 next_bytenr, *mirror_nump);
1297 btrfsic_release_block_ctx(next_block_ctx); 1286 btrfsic_release_block_ctx(next_block_ctx);
1298 *next_blockp = NULL; 1287 *next_blockp = NULL;
@@ -1318,16 +1307,14 @@ static int btrfsic_create_link_to_next_block(
1318 if (next_block->logical_bytenr != next_bytenr && 1307 if (next_block->logical_bytenr != next_bytenr &&
1319 !(!next_block->is_metadata && 1308 !(!next_block->is_metadata &&
1320 0 == next_block->logical_bytenr)) 1309 0 == next_block->logical_bytenr))
1321 printk(KERN_INFO 1310 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
1322 "Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
1323 next_bytenr, next_block_ctx->dev->name, 1311 next_bytenr, next_block_ctx->dev->name,
1324 next_block_ctx->dev_bytenr, *mirror_nump, 1312 next_block_ctx->dev_bytenr, *mirror_nump,
1325 btrfsic_get_block_type(state, 1313 btrfsic_get_block_type(state,
1326 next_block), 1314 next_block),
1327 next_block->logical_bytenr); 1315 next_block->logical_bytenr);
1328 else 1316 else
1329 printk(KERN_INFO 1317 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n",
1330 "Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n",
1331 next_bytenr, next_block_ctx->dev->name, 1318 next_bytenr, next_block_ctx->dev->name,
1332 next_block_ctx->dev_bytenr, *mirror_nump, 1319 next_block_ctx->dev_bytenr, *mirror_nump,
1333 btrfsic_get_block_type(state, 1320 btrfsic_get_block_type(state,
@@ -1348,7 +1335,7 @@ static int btrfsic_create_link_to_next_block(
1348 if (NULL == l) { 1335 if (NULL == l) {
1349 l = btrfsic_block_link_alloc(); 1336 l = btrfsic_block_link_alloc();
1350 if (NULL == l) { 1337 if (NULL == l) {
1351 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 1338 pr_info("btrfsic: error, kmalloc failed!\n");
1352 btrfsic_release_block_ctx(next_block_ctx); 1339 btrfsic_release_block_ctx(next_block_ctx);
1353 *next_blockp = NULL; 1340 *next_blockp = NULL;
1354 return -1; 1341 return -1;
@@ -1381,8 +1368,7 @@ static int btrfsic_create_link_to_next_block(
1381 if (limit_nesting > 0 && did_alloc_block_link) { 1368 if (limit_nesting > 0 && did_alloc_block_link) {
1382 ret = btrfsic_read_block(state, next_block_ctx); 1369 ret = btrfsic_read_block(state, next_block_ctx);
1383 if (ret < (int)next_block_ctx->len) { 1370 if (ret < (int)next_block_ctx->len) {
1384 printk(KERN_INFO 1371 pr_info("btrfsic: read block @logical %llu failed!\n",
1385 "btrfsic: read block @logical %llu failed!\n",
1386 next_bytenr); 1372 next_bytenr);
1387 btrfsic_release_block_ctx(next_block_ctx); 1373 btrfsic_release_block_ctx(next_block_ctx);
1388 *next_blockp = NULL; 1374 *next_blockp = NULL;
@@ -1417,8 +1403,7 @@ static int btrfsic_handle_extent_data(
1417 if (file_extent_item_offset + 1403 if (file_extent_item_offset +
1418 offsetof(struct btrfs_file_extent_item, disk_num_bytes) > 1404 offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
1419 block_ctx->len) { 1405 block_ctx->len) {
1420 printk(KERN_INFO 1406 pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n",
1421 "btrfsic: file item out of bounce at logical %llu, dev %s\n",
1422 block_ctx->start, block_ctx->dev->name); 1407 block_ctx->start, block_ctx->dev->name);
1423 return -1; 1408 return -1;
1424 } 1409 }
@@ -1429,7 +1414,7 @@ static int btrfsic_handle_extent_data(
1429 if (BTRFS_FILE_EXTENT_REG != file_extent_item.type || 1414 if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
1430 btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) { 1415 btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) {
1431 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1416 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
1432 printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n", 1417 pr_info("extent_data: type %u, disk_bytenr = %llu\n",
1433 file_extent_item.type, 1418 file_extent_item.type,
1434 btrfs_stack_file_extent_disk_bytenr( 1419 btrfs_stack_file_extent_disk_bytenr(
1435 &file_extent_item)); 1420 &file_extent_item));
@@ -1438,8 +1423,7 @@ static int btrfsic_handle_extent_data(
1438 1423
1439 if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) > 1424 if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
1440 block_ctx->len) { 1425 block_ctx->len) {
1441 printk(KERN_INFO 1426 pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n",
1442 "btrfsic: file item out of bounce at logical %llu, dev %s\n",
1443 block_ctx->start, block_ctx->dev->name); 1427 block_ctx->start, block_ctx->dev->name);
1444 return -1; 1428 return -1;
1445 } 1429 }
@@ -1457,8 +1441,7 @@ static int btrfsic_handle_extent_data(
1457 generation = btrfs_stack_file_extent_generation(&file_extent_item); 1441 generation = btrfs_stack_file_extent_generation(&file_extent_item);
1458 1442
1459 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1443 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
1460 printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu," 1444 pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n",
1461 " offset = %llu, num_bytes = %llu\n",
1462 file_extent_item.type, 1445 file_extent_item.type,
1463 btrfs_stack_file_extent_disk_bytenr(&file_extent_item), 1446 btrfs_stack_file_extent_disk_bytenr(&file_extent_item),
1464 btrfs_stack_file_extent_offset(&file_extent_item), 1447 btrfs_stack_file_extent_offset(&file_extent_item),
@@ -1477,7 +1460,7 @@ static int btrfsic_handle_extent_data(
1477 btrfs_num_copies(state->root->fs_info, 1460 btrfs_num_copies(state->root->fs_info,
1478 next_bytenr, state->datablock_size); 1461 next_bytenr, state->datablock_size);
1479 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1462 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
1480 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 1463 pr_info("num_copies(log_bytenr=%llu) = %d\n",
1481 next_bytenr, num_copies); 1464 next_bytenr, num_copies);
1482 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 1465 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
1483 struct btrfsic_block_data_ctx next_block_ctx; 1466 struct btrfsic_block_data_ctx next_block_ctx;
@@ -1485,19 +1468,16 @@ static int btrfsic_handle_extent_data(
1485 int block_was_created; 1468 int block_was_created;
1486 1469
1487 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1470 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1488 printk(KERN_INFO "btrfsic_handle_extent_data(" 1471 pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n",
1489 "mirror_num=%d)\n", mirror_num); 1472 mirror_num);
1490 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1473 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
1491 printk(KERN_INFO 1474 pr_info("\tdisk_bytenr = %llu, num_bytes %u\n",
1492 "\tdisk_bytenr = %llu, num_bytes %u\n",
1493 next_bytenr, chunk_len); 1475 next_bytenr, chunk_len);
1494 ret = btrfsic_map_block(state, next_bytenr, 1476 ret = btrfsic_map_block(state, next_bytenr,
1495 chunk_len, &next_block_ctx, 1477 chunk_len, &next_block_ctx,
1496 mirror_num); 1478 mirror_num);
1497 if (ret) { 1479 if (ret) {
1498 printk(KERN_INFO 1480 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
1499 "btrfsic: btrfsic_map_block(@%llu,"
1500 " mirror=%d) failed!\n",
1501 next_bytenr, mirror_num); 1481 next_bytenr, mirror_num);
1502 return -1; 1482 return -1;
1503 } 1483 }
@@ -1512,8 +1492,7 @@ static int btrfsic_handle_extent_data(
1512 mirror_num, 1492 mirror_num,
1513 &block_was_created); 1493 &block_was_created);
1514 if (NULL == next_block) { 1494 if (NULL == next_block) {
1515 printk(KERN_INFO 1495 pr_info("btrfsic: error, kmalloc failed!\n");
1516 "btrfsic: error, kmalloc failed!\n");
1517 btrfsic_release_block_ctx(&next_block_ctx); 1496 btrfsic_release_block_ctx(&next_block_ctx);
1518 return -1; 1497 return -1;
1519 } 1498 }
@@ -1523,12 +1502,7 @@ static int btrfsic_handle_extent_data(
1523 next_block->logical_bytenr != next_bytenr && 1502 next_block->logical_bytenr != next_bytenr &&
1524 !(!next_block->is_metadata && 1503 !(!next_block->is_metadata &&
1525 0 == next_block->logical_bytenr)) { 1504 0 == next_block->logical_bytenr)) {
1526 printk(KERN_INFO 1505 pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu).\n",
1527 "Referenced block"
1528 " @%llu (%s/%llu/%d)"
1529 " found in hash table, D,"
1530 " bytenr mismatch"
1531 " (!= stored %llu).\n",
1532 next_bytenr, 1506 next_bytenr,
1533 next_block_ctx.dev->name, 1507 next_block_ctx.dev->name,
1534 next_block_ctx.dev_bytenr, 1508 next_block_ctx.dev_bytenr,
@@ -1592,7 +1566,7 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
1592 kfree(multi); 1566 kfree(multi);
1593 if (NULL == block_ctx_out->dev) { 1567 if (NULL == block_ctx_out->dev) {
1594 ret = -ENXIO; 1568 ret = -ENXIO;
1595 printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n"); 1569 pr_info("btrfsic: error, cannot lookup dev (#1)!\n");
1596 } 1570 }
1597 1571
1598 return ret; 1572 return ret;
@@ -1638,8 +1612,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1638 BUG_ON(block_ctx->pagev); 1612 BUG_ON(block_ctx->pagev);
1639 BUG_ON(block_ctx->mem_to_free); 1613 BUG_ON(block_ctx->mem_to_free);
1640 if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) { 1614 if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
1641 printk(KERN_INFO 1615 pr_info("btrfsic: read_block() with unaligned bytenr %llu\n",
1642 "btrfsic: read_block() with unaligned bytenr %llu\n",
1643 block_ctx->dev_bytenr); 1616 block_ctx->dev_bytenr);
1644 return -1; 1617 return -1;
1645 } 1618 }
@@ -1666,8 +1639,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1666 1639
1667 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); 1640 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
1668 if (!bio) { 1641 if (!bio) {
1669 printk(KERN_INFO 1642 pr_info("btrfsic: bio_alloc() for %u pages failed!\n",
1670 "btrfsic: bio_alloc() for %u pages failed!\n",
1671 num_pages - i); 1643 num_pages - i);
1672 return -1; 1644 return -1;
1673 } 1645 }
@@ -1682,13 +1654,11 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1682 break; 1654 break;
1683 } 1655 }
1684 if (j == i) { 1656 if (j == i) {
1685 printk(KERN_INFO 1657 pr_info("btrfsic: error, failed to add a single page!\n");
1686 "btrfsic: error, failed to add a single page!\n");
1687 return -1; 1658 return -1;
1688 } 1659 }
1689 if (submit_bio_wait(bio)) { 1660 if (submit_bio_wait(bio)) {
1690 printk(KERN_INFO 1661 pr_info("btrfsic: read error at logical %llu dev %s!\n",
1691 "btrfsic: read error at logical %llu dev %s!\n",
1692 block_ctx->start, block_ctx->dev->name); 1662 block_ctx->start, block_ctx->dev->name);
1693 bio_put(bio); 1663 bio_put(bio);
1694 return -1; 1664 return -1;
@@ -1700,7 +1670,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1700 for (i = 0; i < num_pages; i++) { 1670 for (i = 0; i < num_pages; i++) {
1701 block_ctx->datav[i] = kmap(block_ctx->pagev[i]); 1671 block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
1702 if (!block_ctx->datav[i]) { 1672 if (!block_ctx->datav[i]) {
1703 printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n", 1673 pr_info("btrfsic: kmap() failed (dev %s)!\n",
1704 block_ctx->dev->name); 1674 block_ctx->dev->name);
1705 return -1; 1675 return -1;
1706 } 1676 }
@@ -1715,19 +1685,17 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
1715 1685
1716 BUG_ON(NULL == state); 1686 BUG_ON(NULL == state);
1717 1687
1718 printk(KERN_INFO "all_blocks_list:\n"); 1688 pr_info("all_blocks_list:\n");
1719 list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) { 1689 list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
1720 const struct btrfsic_block_link *l; 1690 const struct btrfsic_block_link *l;
1721 1691
1722 printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n", 1692 pr_info("%c-block @%llu (%s/%llu/%d)\n",
1723 btrfsic_get_block_type(state, b_all), 1693 btrfsic_get_block_type(state, b_all),
1724 b_all->logical_bytenr, b_all->dev_state->name, 1694 b_all->logical_bytenr, b_all->dev_state->name,
1725 b_all->dev_bytenr, b_all->mirror_num); 1695 b_all->dev_bytenr, b_all->mirror_num);
1726 1696
1727 list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) { 1697 list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
1728 printk(KERN_INFO " %c @%llu (%s/%llu/%d)" 1698 pr_info(" %c @%llu (%s/%llu/%d) refers %u* to %c @%llu (%s/%llu/%d)\n",
1729 " refers %u* to"
1730 " %c @%llu (%s/%llu/%d)\n",
1731 btrfsic_get_block_type(state, b_all), 1699 btrfsic_get_block_type(state, b_all),
1732 b_all->logical_bytenr, b_all->dev_state->name, 1700 b_all->logical_bytenr, b_all->dev_state->name,
1733 b_all->dev_bytenr, b_all->mirror_num, 1701 b_all->dev_bytenr, b_all->mirror_num,
@@ -1740,9 +1708,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
1740 } 1708 }
1741 1709
1742 list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) { 1710 list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
1743 printk(KERN_INFO " %c @%llu (%s/%llu/%d)" 1711 pr_info(" %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n",
1744 " is ref %u* from"
1745 " %c @%llu (%s/%llu/%d)\n",
1746 btrfsic_get_block_type(state, b_all), 1712 btrfsic_get_block_type(state, b_all),
1747 b_all->logical_bytenr, b_all->dev_state->name, 1713 b_all->logical_bytenr, b_all->dev_state->name,
1748 b_all->dev_bytenr, b_all->mirror_num, 1714 b_all->dev_bytenr, b_all->mirror_num,
@@ -1754,7 +1720,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
1754 l->block_ref_from->mirror_num); 1720 l->block_ref_from->mirror_num);
1755 } 1721 }
1756 1722
1757 printk(KERN_INFO "\n"); 1723 pr_info("\n");
1758 } 1724 }
1759} 1725}
1760 1726
@@ -1829,8 +1795,7 @@ again:
1829 mapped_datav[0]); 1795 mapped_datav[0]);
1830 if (num_pages * PAGE_SIZE < 1796 if (num_pages * PAGE_SIZE <
1831 BTRFS_SUPER_INFO_SIZE) { 1797 BTRFS_SUPER_INFO_SIZE) {
1832 printk(KERN_INFO 1798 pr_info("btrfsic: cannot work with too short bios!\n");
1833 "btrfsic: cannot work with too short bios!\n");
1834 return; 1799 return;
1835 } 1800 }
1836 is_metadata = 1; 1801 is_metadata = 1;
@@ -1838,8 +1803,7 @@ again:
1838 processed_len = BTRFS_SUPER_INFO_SIZE; 1803 processed_len = BTRFS_SUPER_INFO_SIZE;
1839 if (state->print_mask & 1804 if (state->print_mask &
1840 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1805 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
1841 printk(KERN_INFO 1806 pr_info("[before new superblock is written]:\n");
1842 "[before new superblock is written]:\n");
1843 btrfsic_dump_tree_sub(state, block, 0); 1807 btrfsic_dump_tree_sub(state, block, 0);
1844 } 1808 }
1845 } 1809 }
@@ -1847,8 +1811,7 @@ again:
1847 if (!block->is_superblock) { 1811 if (!block->is_superblock) {
1848 if (num_pages * PAGE_SIZE < 1812 if (num_pages * PAGE_SIZE <
1849 state->metablock_size) { 1813 state->metablock_size) {
1850 printk(KERN_INFO 1814 pr_info("btrfsic: cannot work with too short bios!\n");
1851 "btrfsic: cannot work with too short bios!\n");
1852 return; 1815 return;
1853 } 1816 }
1854 processed_len = state->metablock_size; 1817 processed_len = state->metablock_size;
@@ -1863,8 +1826,7 @@ again:
1863 if (block->logical_bytenr != bytenr && 1826 if (block->logical_bytenr != bytenr &&
1864 !(!block->is_metadata && 1827 !(!block->is_metadata &&
1865 block->logical_bytenr == 0)) 1828 block->logical_bytenr == 0))
1866 printk(KERN_INFO 1829 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
1867 "Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
1868 bytenr, dev_state->name, 1830 bytenr, dev_state->name,
1869 dev_bytenr, 1831 dev_bytenr,
1870 block->mirror_num, 1832 block->mirror_num,
@@ -1872,8 +1834,7 @@ again:
1872 block), 1834 block),
1873 block->logical_bytenr); 1835 block->logical_bytenr);
1874 else 1836 else
1875 printk(KERN_INFO 1837 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
1876 "Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
1877 bytenr, dev_state->name, 1838 bytenr, dev_state->name,
1878 dev_bytenr, block->mirror_num, 1839 dev_bytenr, block->mirror_num,
1879 btrfsic_get_block_type(state, 1840 btrfsic_get_block_type(state,
@@ -1883,33 +1844,24 @@ again:
1883 } else { 1844 } else {
1884 if (num_pages * PAGE_SIZE < 1845 if (num_pages * PAGE_SIZE <
1885 state->datablock_size) { 1846 state->datablock_size) {
1886 printk(KERN_INFO 1847 pr_info("btrfsic: cannot work with too short bios!\n");
1887 "btrfsic: cannot work with too short bios!\n");
1888 return; 1848 return;
1889 } 1849 }
1890 processed_len = state->datablock_size; 1850 processed_len = state->datablock_size;
1891 bytenr = block->logical_bytenr; 1851 bytenr = block->logical_bytenr;
1892 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1852 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1893 printk(KERN_INFO 1853 pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
1894 "Written block @%llu (%s/%llu/%d)"
1895 " found in hash table, %c.\n",
1896 bytenr, dev_state->name, dev_bytenr, 1854 bytenr, dev_state->name, dev_bytenr,
1897 block->mirror_num, 1855 block->mirror_num,
1898 btrfsic_get_block_type(state, block)); 1856 btrfsic_get_block_type(state, block));
1899 } 1857 }
1900 1858
1901 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1859 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1902 printk(KERN_INFO 1860 pr_info("ref_to_list: %cE, ref_from_list: %cE\n",
1903 "ref_to_list: %cE, ref_from_list: %cE\n",
1904 list_empty(&block->ref_to_list) ? ' ' : '!', 1861 list_empty(&block->ref_to_list) ? ' ' : '!',
1905 list_empty(&block->ref_from_list) ? ' ' : '!'); 1862 list_empty(&block->ref_from_list) ? ' ' : '!');
1906 if (btrfsic_is_block_ref_by_superblock(state, block, 0)) { 1863 if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
1907 printk(KERN_INFO "btrfs: attempt to overwrite %c-block" 1864 pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n",
1908 " @%llu (%s/%llu/%d), old(gen=%llu,"
1909 " objectid=%llu, type=%d, offset=%llu),"
1910 " new(gen=%llu),"
1911 " which is referenced by most recent superblock"
1912 " (superblockgen=%llu)!\n",
1913 btrfsic_get_block_type(state, block), bytenr, 1865 btrfsic_get_block_type(state, block), bytenr,
1914 dev_state->name, dev_bytenr, block->mirror_num, 1866 dev_state->name, dev_bytenr, block->mirror_num,
1915 block->generation, 1867 block->generation,
@@ -1923,9 +1875,7 @@ again:
1923 } 1875 }
1924 1876
1925 if (!block->is_iodone && !block->never_written) { 1877 if (!block->is_iodone && !block->never_written) {
1926 printk(KERN_INFO "btrfs: attempt to overwrite %c-block" 1878 pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n",
1927 " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
1928 " which is not yet iodone!\n",
1929 btrfsic_get_block_type(state, block), bytenr, 1879 btrfsic_get_block_type(state, block), bytenr,
1930 dev_state->name, dev_bytenr, block->mirror_num, 1880 dev_state->name, dev_bytenr, block->mirror_num,
1931 block->generation, 1881 block->generation,
@@ -2023,8 +1973,7 @@ again:
2023 mapped_datav[0]); 1973 mapped_datav[0]);
2024 if (state->print_mask & 1974 if (state->print_mask &
2025 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) { 1975 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
2026 printk(KERN_INFO 1976 pr_info("[after new superblock is written]:\n");
2027 "[after new superblock is written]:\n");
2028 btrfsic_dump_tree_sub(state, block, 0); 1977 btrfsic_dump_tree_sub(state, block, 0);
2029 } 1978 }
2030 } else { 1979 } else {
@@ -2036,9 +1985,7 @@ again:
2036 0, 0); 1985 0, 0);
2037 } 1986 }
2038 if (ret) 1987 if (ret)
2039 printk(KERN_INFO 1988 pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n",
2040 "btrfsic: btrfsic_process_metablock"
2041 "(root @%llu) failed!\n",
2042 dev_bytenr); 1989 dev_bytenr);
2043 } else { 1990 } else {
2044 block->is_metadata = 0; 1991 block->is_metadata = 0;
@@ -2065,8 +2012,7 @@ again:
2065 if (!is_metadata) { 2012 if (!is_metadata) {
2066 processed_len = state->datablock_size; 2013 processed_len = state->datablock_size;
2067 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2014 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2068 printk(KERN_INFO "Written block (%s/%llu/?)" 2015 pr_info("Written block (%s/%llu/?) !found in hash table, D.\n",
2069 " !found in hash table, D.\n",
2070 dev_state->name, dev_bytenr); 2016 dev_state->name, dev_bytenr);
2071 if (!state->include_extent_data) { 2017 if (!state->include_extent_data) {
2072 /* ignore that written D block */ 2018 /* ignore that written D block */
@@ -2084,9 +2030,7 @@ again:
2084 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, 2030 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
2085 dev_bytenr); 2031 dev_bytenr);
2086 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2032 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2087 printk(KERN_INFO 2033 pr_info("Written block @%llu (%s/%llu/?) !found in hash table, M.\n",
2088 "Written block @%llu (%s/%llu/?)"
2089 " !found in hash table, M.\n",
2090 bytenr, dev_state->name, dev_bytenr); 2034 bytenr, dev_state->name, dev_bytenr);
2091 } 2035 }
2092 2036
@@ -2100,7 +2044,7 @@ again:
2100 2044
2101 block = btrfsic_block_alloc(); 2045 block = btrfsic_block_alloc();
2102 if (NULL == block) { 2046 if (NULL == block) {
2103 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 2047 pr_info("btrfsic: error, kmalloc failed!\n");
2104 btrfsic_release_block_ctx(&block_ctx); 2048 btrfsic_release_block_ctx(&block_ctx);
2105 goto continue_loop; 2049 goto continue_loop;
2106 } 2050 }
@@ -2150,8 +2094,7 @@ again:
2150 block->next_in_same_bio = NULL; 2094 block->next_in_same_bio = NULL;
2151 } 2095 }
2152 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2096 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2153 printk(KERN_INFO 2097 pr_info("New written %c-block @%llu (%s/%llu/%d)\n",
2154 "New written %c-block @%llu (%s/%llu/%d)\n",
2155 is_metadata ? 'M' : 'D', 2098 is_metadata ? 'M' : 'D',
2156 block->logical_bytenr, block->dev_state->name, 2099 block->logical_bytenr, block->dev_state->name,
2157 block->dev_bytenr, block->mirror_num); 2100 block->dev_bytenr, block->mirror_num);
@@ -2162,9 +2105,7 @@ again:
2162 ret = btrfsic_process_metablock(state, block, 2105 ret = btrfsic_process_metablock(state, block,
2163 &block_ctx, 0, 0); 2106 &block_ctx, 0, 0);
2164 if (ret) 2107 if (ret)
2165 printk(KERN_INFO 2108 pr_info("btrfsic: process_metablock(root @%llu) failed!\n",
2166 "btrfsic: process_metablock(root @%llu)"
2167 " failed!\n",
2168 dev_bytenr); 2109 dev_bytenr);
2169 } 2110 }
2170 btrfsic_release_block_ctx(&block_ctx); 2111 btrfsic_release_block_ctx(&block_ctx);
@@ -2199,8 +2140,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
2199 2140
2200 if ((dev_state->state->print_mask & 2141 if ((dev_state->state->print_mask &
2201 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2142 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2202 printk(KERN_INFO 2143 pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
2203 "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
2204 bp->bi_error, 2144 bp->bi_error,
2205 btrfsic_get_block_type(dev_state->state, block), 2145 btrfsic_get_block_type(dev_state->state, block),
2206 block->logical_bytenr, dev_state->name, 2146 block->logical_bytenr, dev_state->name,
@@ -2211,8 +2151,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
2211 dev_state->last_flush_gen++; 2151 dev_state->last_flush_gen++;
2212 if ((dev_state->state->print_mask & 2152 if ((dev_state->state->print_mask &
2213 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2153 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2214 printk(KERN_INFO 2154 pr_info("bio_end_io() new %s flush_gen=%llu\n",
2215 "bio_end_io() new %s flush_gen=%llu\n",
2216 dev_state->name, 2155 dev_state->name,
2217 dev_state->last_flush_gen); 2156 dev_state->last_flush_gen);
2218 } 2157 }
@@ -2235,8 +2174,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
2235 BUG_ON(NULL == block); 2174 BUG_ON(NULL == block);
2236 dev_state = block->dev_state; 2175 dev_state = block->dev_state;
2237 if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2176 if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2238 printk(KERN_INFO 2177 pr_info("bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
2239 "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
2240 iodone_w_error, 2178 iodone_w_error,
2241 btrfsic_get_block_type(dev_state->state, block), 2179 btrfsic_get_block_type(dev_state->state, block),
2242 block->logical_bytenr, block->dev_state->name, 2180 block->logical_bytenr, block->dev_state->name,
@@ -2247,8 +2185,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
2247 dev_state->last_flush_gen++; 2185 dev_state->last_flush_gen++;
2248 if ((dev_state->state->print_mask & 2186 if ((dev_state->state->print_mask &
2249 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2187 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2250 printk(KERN_INFO 2188 pr_info("bh_end_io() new %s flush_gen=%llu\n",
2251 "bh_end_io() new %s flush_gen=%llu\n",
2252 dev_state->name, dev_state->last_flush_gen); 2189 dev_state->name, dev_state->last_flush_gen);
2253 } 2190 }
2254 if (block->submit_bio_bh_rw & REQ_FUA) 2191 if (block->submit_bio_bh_rw & REQ_FUA)
@@ -2271,9 +2208,7 @@ static int btrfsic_process_written_superblock(
2271 if (!(superblock->generation > state->max_superblock_generation || 2208 if (!(superblock->generation > state->max_superblock_generation ||
2272 0 == state->max_superblock_generation)) { 2209 0 == state->max_superblock_generation)) {
2273 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2210 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
2274 printk(KERN_INFO 2211 pr_info("btrfsic: superblock @%llu (%s/%llu/%d) with old gen %llu <= %llu\n",
2275 "btrfsic: superblock @%llu (%s/%llu/%d)"
2276 " with old gen %llu <= %llu\n",
2277 superblock->logical_bytenr, 2212 superblock->logical_bytenr,
2278 superblock->dev_state->name, 2213 superblock->dev_state->name,
2279 superblock->dev_bytenr, superblock->mirror_num, 2214 superblock->dev_bytenr, superblock->mirror_num,
@@ -2281,9 +2216,7 @@ static int btrfsic_process_written_superblock(
2281 state->max_superblock_generation); 2216 state->max_superblock_generation);
2282 } else { 2217 } else {
2283 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 2218 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
2284 printk(KERN_INFO 2219 pr_info("btrfsic: got new superblock @%llu (%s/%llu/%d) with new gen %llu > %llu\n",
2285 "btrfsic: got new superblock @%llu (%s/%llu/%d)"
2286 " with new gen %llu > %llu\n",
2287 superblock->logical_bytenr, 2220 superblock->logical_bytenr,
2288 superblock->dev_state->name, 2221 superblock->dev_state->name,
2289 superblock->dev_bytenr, superblock->mirror_num, 2222 superblock->dev_bytenr, superblock->mirror_num,
@@ -2318,7 +2251,7 @@ static int btrfsic_process_written_superblock(
2318 next_bytenr = btrfs_super_root(super_hdr); 2251 next_bytenr = btrfs_super_root(super_hdr);
2319 if (state->print_mask & 2252 if (state->print_mask &
2320 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2253 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
2321 printk(KERN_INFO "root@%llu\n", next_bytenr); 2254 pr_info("root@%llu\n", next_bytenr);
2322 break; 2255 break;
2323 case 1: 2256 case 1:
2324 btrfs_set_disk_key_objectid(&tmp_disk_key, 2257 btrfs_set_disk_key_objectid(&tmp_disk_key,
@@ -2327,7 +2260,7 @@ static int btrfsic_process_written_superblock(
2327 next_bytenr = btrfs_super_chunk_root(super_hdr); 2260 next_bytenr = btrfs_super_chunk_root(super_hdr);
2328 if (state->print_mask & 2261 if (state->print_mask &
2329 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2262 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
2330 printk(KERN_INFO "chunk@%llu\n", next_bytenr); 2263 pr_info("chunk@%llu\n", next_bytenr);
2331 break; 2264 break;
2332 case 2: 2265 case 2:
2333 btrfs_set_disk_key_objectid(&tmp_disk_key, 2266 btrfs_set_disk_key_objectid(&tmp_disk_key,
@@ -2338,7 +2271,7 @@ static int btrfsic_process_written_superblock(
2338 continue; 2271 continue;
2339 if (state->print_mask & 2272 if (state->print_mask &
2340 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) 2273 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
2341 printk(KERN_INFO "log@%llu\n", next_bytenr); 2274 pr_info("log@%llu\n", next_bytenr);
2342 break; 2275 break;
2343 } 2276 }
2344 2277
@@ -2346,23 +2279,19 @@ static int btrfsic_process_written_superblock(
2346 btrfs_num_copies(state->root->fs_info, 2279 btrfs_num_copies(state->root->fs_info,
2347 next_bytenr, BTRFS_SUPER_INFO_SIZE); 2280 next_bytenr, BTRFS_SUPER_INFO_SIZE);
2348 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 2281 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
2349 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 2282 pr_info("num_copies(log_bytenr=%llu) = %d\n",
2350 next_bytenr, num_copies); 2283 next_bytenr, num_copies);
2351 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2284 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
2352 int was_created; 2285 int was_created;
2353 2286
2354 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2287 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2355 printk(KERN_INFO 2288 pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num);
2356 "btrfsic_process_written_superblock("
2357 "mirror_num=%d)\n", mirror_num);
2358 ret = btrfsic_map_block(state, next_bytenr, 2289 ret = btrfsic_map_block(state, next_bytenr,
2359 BTRFS_SUPER_INFO_SIZE, 2290 BTRFS_SUPER_INFO_SIZE,
2360 &tmp_next_block_ctx, 2291 &tmp_next_block_ctx,
2361 mirror_num); 2292 mirror_num);
2362 if (ret) { 2293 if (ret) {
2363 printk(KERN_INFO 2294 pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
2364 "btrfsic: btrfsic_map_block(@%llu,"
2365 " mirror=%d) failed!\n",
2366 next_bytenr, mirror_num); 2295 next_bytenr, mirror_num);
2367 return -1; 2296 return -1;
2368 } 2297 }
@@ -2375,8 +2304,7 @@ static int btrfsic_process_written_superblock(
2375 mirror_num, 2304 mirror_num,
2376 &was_created); 2305 &was_created);
2377 if (NULL == next_block) { 2306 if (NULL == next_block) {
2378 printk(KERN_INFO 2307 pr_info("btrfsic: error, kmalloc failed!\n");
2379 "btrfsic: error, kmalloc failed!\n");
2380 btrfsic_release_block_ctx(&tmp_next_block_ctx); 2308 btrfsic_release_block_ctx(&tmp_next_block_ctx);
2381 return -1; 2309 return -1;
2382 } 2310 }
@@ -2425,8 +2353,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2425 * by the most recent super block. 2353 * by the most recent super block.
2426 */ 2354 */
2427 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2355 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2428 printk(KERN_INFO 2356 pr_info("btrfsic: abort cyclic linkage (case 1).\n");
2429 "btrfsic: abort cyclic linkage (case 1).\n");
2430 2357
2431 return ret; 2358 return ret;
2432 } 2359 }
@@ -2437,9 +2364,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2437 */ 2364 */
2438 list_for_each_entry(l, &block->ref_to_list, node_ref_to) { 2365 list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
2439 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2366 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2440 printk(KERN_INFO 2367 pr_info("rl=%d, %c @%llu (%s/%llu/%d) %u* refers to %c @%llu (%s/%llu/%d)\n",
2441 "rl=%d, %c @%llu (%s/%llu/%d)"
2442 " %u* refers to %c @%llu (%s/%llu/%d)\n",
2443 recursion_level, 2368 recursion_level,
2444 btrfsic_get_block_type(state, block), 2369 btrfsic_get_block_type(state, block),
2445 block->logical_bytenr, block->dev_state->name, 2370 block->logical_bytenr, block->dev_state->name,
@@ -2451,9 +2376,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2451 l->block_ref_to->dev_bytenr, 2376 l->block_ref_to->dev_bytenr,
2452 l->block_ref_to->mirror_num); 2377 l->block_ref_to->mirror_num);
2453 if (l->block_ref_to->never_written) { 2378 if (l->block_ref_to->never_written) {
2454 printk(KERN_INFO "btrfs: attempt to write superblock" 2379 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is never written!\n",
2455 " which references block %c @%llu (%s/%llu/%d)"
2456 " which is never written!\n",
2457 btrfsic_get_block_type(state, l->block_ref_to), 2380 btrfsic_get_block_type(state, l->block_ref_to),
2458 l->block_ref_to->logical_bytenr, 2381 l->block_ref_to->logical_bytenr,
2459 l->block_ref_to->dev_state->name, 2382 l->block_ref_to->dev_state->name,
@@ -2461,9 +2384,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2461 l->block_ref_to->mirror_num); 2384 l->block_ref_to->mirror_num);
2462 ret = -1; 2385 ret = -1;
2463 } else if (!l->block_ref_to->is_iodone) { 2386 } else if (!l->block_ref_to->is_iodone) {
2464 printk(KERN_INFO "btrfs: attempt to write superblock" 2387 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not yet iodone!\n",
2465 " which references block %c @%llu (%s/%llu/%d)"
2466 " which is not yet iodone!\n",
2467 btrfsic_get_block_type(state, l->block_ref_to), 2388 btrfsic_get_block_type(state, l->block_ref_to),
2468 l->block_ref_to->logical_bytenr, 2389 l->block_ref_to->logical_bytenr,
2469 l->block_ref_to->dev_state->name, 2390 l->block_ref_to->dev_state->name,
@@ -2471,9 +2392,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2471 l->block_ref_to->mirror_num); 2392 l->block_ref_to->mirror_num);
2472 ret = -1; 2393 ret = -1;
2473 } else if (l->block_ref_to->iodone_w_error) { 2394 } else if (l->block_ref_to->iodone_w_error) {
2474 printk(KERN_INFO "btrfs: attempt to write superblock" 2395 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which has write error!\n",
2475 " which references block %c @%llu (%s/%llu/%d)"
2476 " which has write error!\n",
2477 btrfsic_get_block_type(state, l->block_ref_to), 2396 btrfsic_get_block_type(state, l->block_ref_to),
2478 l->block_ref_to->logical_bytenr, 2397 l->block_ref_to->logical_bytenr,
2479 l->block_ref_to->dev_state->name, 2398 l->block_ref_to->dev_state->name,
@@ -2486,10 +2405,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2486 l->parent_generation && 2405 l->parent_generation &&
2487 BTRFSIC_GENERATION_UNKNOWN != 2406 BTRFSIC_GENERATION_UNKNOWN !=
2488 l->block_ref_to->generation) { 2407 l->block_ref_to->generation) {
2489 printk(KERN_INFO "btrfs: attempt to write superblock" 2408 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) with generation %llu != parent generation %llu!\n",
2490 " which references block %c @%llu (%s/%llu/%d)"
2491 " with generation %llu !="
2492 " parent generation %llu!\n",
2493 btrfsic_get_block_type(state, l->block_ref_to), 2409 btrfsic_get_block_type(state, l->block_ref_to),
2494 l->block_ref_to->logical_bytenr, 2410 l->block_ref_to->logical_bytenr,
2495 l->block_ref_to->dev_state->name, 2411 l->block_ref_to->dev_state->name,
@@ -2500,11 +2416,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2500 ret = -1; 2416 ret = -1;
2501 } else if (l->block_ref_to->flush_gen > 2417 } else if (l->block_ref_to->flush_gen >
2502 l->block_ref_to->dev_state->last_flush_gen) { 2418 l->block_ref_to->dev_state->last_flush_gen) {
2503 printk(KERN_INFO "btrfs: attempt to write superblock" 2419 pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n",
2504 " which references block %c @%llu (%s/%llu/%d)"
2505 " which is not flushed out of disk's write cache"
2506 " (block flush_gen=%llu,"
2507 " dev->flush_gen=%llu)!\n",
2508 btrfsic_get_block_type(state, l->block_ref_to), 2420 btrfsic_get_block_type(state, l->block_ref_to),
2509 l->block_ref_to->logical_bytenr, 2421 l->block_ref_to->logical_bytenr,
2510 l->block_ref_to->dev_state->name, 2422 l->block_ref_to->dev_state->name,
@@ -2533,8 +2445,7 @@ static int btrfsic_is_block_ref_by_superblock(
2533 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { 2445 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
2534 /* refer to comment at "abort cyclic linkage (case 1)" */ 2446 /* refer to comment at "abort cyclic linkage (case 1)" */
2535 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2447 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2536 printk(KERN_INFO 2448 pr_info("btrfsic: abort cyclic linkage (case 2).\n");
2537 "btrfsic: abort cyclic linkage (case 2).\n");
2538 2449
2539 return 0; 2450 return 0;
2540 } 2451 }
@@ -2545,9 +2456,7 @@ static int btrfsic_is_block_ref_by_superblock(
2545 */ 2456 */
2546 list_for_each_entry(l, &block->ref_from_list, node_ref_from) { 2457 list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
2547 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2458 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2548 printk(KERN_INFO 2459 pr_info("rl=%d, %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n",
2549 "rl=%d, %c @%llu (%s/%llu/%d)"
2550 " is ref %u* from %c @%llu (%s/%llu/%d)\n",
2551 recursion_level, 2460 recursion_level,
2552 btrfsic_get_block_type(state, block), 2461 btrfsic_get_block_type(state, block),
2553 block->logical_bytenr, block->dev_state->name, 2462 block->logical_bytenr, block->dev_state->name,
@@ -2577,9 +2486,7 @@ static int btrfsic_is_block_ref_by_superblock(
2577static void btrfsic_print_add_link(const struct btrfsic_state *state, 2486static void btrfsic_print_add_link(const struct btrfsic_state *state,
2578 const struct btrfsic_block_link *l) 2487 const struct btrfsic_block_link *l)
2579{ 2488{
2580 printk(KERN_INFO 2489 pr_info("Add %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n",
2581 "Add %u* link from %c @%llu (%s/%llu/%d)"
2582 " to %c @%llu (%s/%llu/%d).\n",
2583 l->ref_cnt, 2490 l->ref_cnt,
2584 btrfsic_get_block_type(state, l->block_ref_from), 2491 btrfsic_get_block_type(state, l->block_ref_from),
2585 l->block_ref_from->logical_bytenr, 2492 l->block_ref_from->logical_bytenr,
@@ -2594,9 +2501,7 @@ static void btrfsic_print_add_link(const struct btrfsic_state *state,
2594static void btrfsic_print_rem_link(const struct btrfsic_state *state, 2501static void btrfsic_print_rem_link(const struct btrfsic_state *state,
2595 const struct btrfsic_block_link *l) 2502 const struct btrfsic_block_link *l)
2596{ 2503{
2597 printk(KERN_INFO 2504 pr_info("Rem %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n",
2598 "Rem %u* link from %c @%llu (%s/%llu/%d)"
2599 " to %c @%llu (%s/%llu/%d).\n",
2600 l->ref_cnt, 2505 l->ref_cnt,
2601 btrfsic_get_block_type(state, l->block_ref_from), 2506 btrfsic_get_block_type(state, l->block_ref_from),
2602 l->block_ref_from->logical_bytenr, 2507 l->block_ref_from->logical_bytenr,
@@ -2708,8 +2613,7 @@ static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
2708 if (NULL == l) { 2613 if (NULL == l) {
2709 l = btrfsic_block_link_alloc(); 2614 l = btrfsic_block_link_alloc();
2710 if (NULL == l) { 2615 if (NULL == l) {
2711 printk(KERN_INFO 2616 pr_info("btrfsic: error, kmalloc failed!\n");
2712 "btrfsic: error, kmalloc" " failed!\n");
2713 return NULL; 2617 return NULL;
2714 } 2618 }
2715 2619
@@ -2756,13 +2660,12 @@ static struct btrfsic_block *btrfsic_block_lookup_or_add(
2756 2660
2757 block = btrfsic_block_alloc(); 2661 block = btrfsic_block_alloc();
2758 if (NULL == block) { 2662 if (NULL == block) {
2759 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 2663 pr_info("btrfsic: error, kmalloc failed!\n");
2760 return NULL; 2664 return NULL;
2761 } 2665 }
2762 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev); 2666 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev);
2763 if (NULL == dev_state) { 2667 if (NULL == dev_state) {
2764 printk(KERN_INFO 2668 pr_info("btrfsic: error, lookup dev_state failed!\n");
2765 "btrfsic: error, lookup dev_state failed!\n");
2766 btrfsic_block_free(block); 2669 btrfsic_block_free(block);
2767 return NULL; 2670 return NULL;
2768 } 2671 }
@@ -2774,8 +2677,7 @@ static struct btrfsic_block *btrfsic_block_lookup_or_add(
2774 block->never_written = never_written; 2677 block->never_written = never_written;
2775 block->mirror_num = mirror_num; 2678 block->mirror_num = mirror_num;
2776 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2679 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2777 printk(KERN_INFO 2680 pr_info("New %s%c-block @%llu (%s/%llu/%d)\n",
2778 "New %s%c-block @%llu (%s/%llu/%d)\n",
2779 additional_string, 2681 additional_string,
2780 btrfsic_get_block_type(state, block), 2682 btrfsic_get_block_type(state, block),
2781 block->logical_bytenr, dev_state->name, 2683 block->logical_bytenr, dev_state->name,
@@ -2810,9 +2712,7 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
2810 ret = btrfsic_map_block(state, bytenr, state->metablock_size, 2712 ret = btrfsic_map_block(state, bytenr, state->metablock_size,
2811 &block_ctx, mirror_num); 2713 &block_ctx, mirror_num);
2812 if (ret) { 2714 if (ret) {
2813 printk(KERN_INFO "btrfsic:" 2715 pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n",
2814 " btrfsic_map_block(logical @%llu,"
2815 " mirror %d) failed!\n",
2816 bytenr, mirror_num); 2716 bytenr, mirror_num);
2817 continue; 2717 continue;
2818 } 2718 }
@@ -2827,9 +2727,7 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
2827 } 2727 }
2828 2728
2829 if (WARN_ON(!match)) { 2729 if (WARN_ON(!match)) {
2830 printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio," 2730 pr_info("btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%s, phys_bytenr=%llu)!\n",
2831 " buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
2832 " phys_bytenr=%llu)!\n",
2833 bytenr, dev_state->name, dev_bytenr); 2731 bytenr, dev_state->name, dev_bytenr);
2834 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2732 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
2835 ret = btrfsic_map_block(state, bytenr, 2733 ret = btrfsic_map_block(state, bytenr,
@@ -2838,8 +2736,7 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
2838 if (ret) 2736 if (ret)
2839 continue; 2737 continue;
2840 2738
2841 printk(KERN_INFO "Read logical bytenr @%llu maps to" 2739 pr_info("Read logical bytenr @%llu maps to (%s/%llu/%d)\n",
2842 " (%s/%llu/%d)\n",
2843 bytenr, block_ctx.dev->name, 2740 bytenr, block_ctx.dev->name,
2844 block_ctx.dev_bytenr, mirror_num); 2741 block_ctx.dev_bytenr, mirror_num);
2845 } 2742 }
@@ -2849,11 +2746,8 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
2849static struct btrfsic_dev_state *btrfsic_dev_state_lookup( 2746static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
2850 struct block_device *bdev) 2747 struct block_device *bdev)
2851{ 2748{
2852 struct btrfsic_dev_state *ds; 2749 return btrfsic_dev_state_hashtable_lookup(bdev,
2853 2750 &btrfsic_dev_state_hashtable);
2854 ds = btrfsic_dev_state_hashtable_lookup(bdev,
2855 &btrfsic_dev_state_hashtable);
2856 return ds;
2857} 2751}
2858 2752
2859int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh) 2753int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
@@ -2876,9 +2770,7 @@ int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
2876 dev_bytenr = 4096 * bh->b_blocknr; 2770 dev_bytenr = 4096 * bh->b_blocknr;
2877 if (dev_state->state->print_mask & 2771 if (dev_state->state->print_mask &
2878 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2772 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2879 printk(KERN_INFO 2773 pr_info("submit_bh(op=0x%x,0x%x, blocknr=%llu (bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
2880 "submit_bh(op=0x%x,0x%x, blocknr=%llu "
2881 "(bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
2882 op, op_flags, (unsigned long long)bh->b_blocknr, 2774 op, op_flags, (unsigned long long)bh->b_blocknr,
2883 dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev); 2775 dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev);
2884 btrfsic_process_written_block(dev_state, dev_bytenr, 2776 btrfsic_process_written_block(dev_state, dev_bytenr,
@@ -2887,17 +2779,13 @@ int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
2887 } else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) { 2779 } else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) {
2888 if (dev_state->state->print_mask & 2780 if (dev_state->state->print_mask &
2889 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2781 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2890 printk(KERN_INFO 2782 pr_info("submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
2891 "submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
2892 op, op_flags, bh->b_bdev); 2783 op, op_flags, bh->b_bdev);
2893 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2784 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
2894 if ((dev_state->state->print_mask & 2785 if ((dev_state->state->print_mask &
2895 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 2786 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
2896 BTRFSIC_PRINT_MASK_VERBOSE))) 2787 BTRFSIC_PRINT_MASK_VERBOSE)))
2897 printk(KERN_INFO 2788 pr_info("btrfsic_submit_bh(%s) with FLUSH but dummy block already in use (ignored)!\n",
2898 "btrfsic_submit_bh(%s) with FLUSH"
2899 " but dummy block already in use"
2900 " (ignored)!\n",
2901 dev_state->name); 2789 dev_state->name);
2902 } else { 2790 } else {
2903 struct btrfsic_block *const block = 2791 struct btrfsic_block *const block =
@@ -2942,9 +2830,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
2942 bio_is_patched = 0; 2830 bio_is_patched = 0;
2943 if (dev_state->state->print_mask & 2831 if (dev_state->state->print_mask &
2944 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2832 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2945 printk(KERN_INFO 2833 pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
2946 "submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
2947 " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
2948 bio_op(bio), bio->bi_opf, bio->bi_vcnt, 2834 bio_op(bio), bio->bi_opf, bio->bi_vcnt,
2949 (unsigned long long)bio->bi_iter.bi_sector, 2835 (unsigned long long)bio->bi_iter.bi_sector,
2950 dev_bytenr, bio->bi_bdev); 2836 dev_bytenr, bio->bi_bdev);
@@ -2967,8 +2853,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
2967 } 2853 }
2968 if (dev_state->state->print_mask & 2854 if (dev_state->state->print_mask &
2969 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) 2855 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
2970 printk(KERN_INFO 2856 pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
2971 "#%u: bytenr=%llu, len=%u, offset=%u\n",
2972 i, cur_bytenr, bio->bi_io_vec[i].bv_len, 2857 i, cur_bytenr, bio->bi_io_vec[i].bv_len,
2973 bio->bi_io_vec[i].bv_offset); 2858 bio->bi_io_vec[i].bv_offset);
2974 cur_bytenr += bio->bi_io_vec[i].bv_len; 2859 cur_bytenr += bio->bi_io_vec[i].bv_len;
@@ -2985,17 +2870,13 @@ static void __btrfsic_submit_bio(struct bio *bio)
2985 } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { 2870 } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
2986 if (dev_state->state->print_mask & 2871 if (dev_state->state->print_mask &
2987 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2872 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2988 printk(KERN_INFO 2873 pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
2989 "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
2990 bio_op(bio), bio->bi_opf, bio->bi_bdev); 2874 bio_op(bio), bio->bi_opf, bio->bi_bdev);
2991 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2875 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
2992 if ((dev_state->state->print_mask & 2876 if ((dev_state->state->print_mask &
2993 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 2877 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
2994 BTRFSIC_PRINT_MASK_VERBOSE))) 2878 BTRFSIC_PRINT_MASK_VERBOSE)))
2995 printk(KERN_INFO 2879 pr_info("btrfsic_submit_bio(%s) with FLUSH but dummy block already in use (ignored)!\n",
2996 "btrfsic_submit_bio(%s) with FLUSH"
2997 " but dummy block already in use"
2998 " (ignored)!\n",
2999 dev_state->name); 2880 dev_state->name);
3000 } else { 2881 } else {
3001 struct btrfsic_block *const block = 2882 struct btrfsic_block *const block =
@@ -3039,14 +2920,12 @@ int btrfsic_mount(struct btrfs_root *root,
3039 struct btrfs_device *device; 2920 struct btrfs_device *device;
3040 2921
3041 if (root->nodesize & ((u64)PAGE_SIZE - 1)) { 2922 if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
3042 printk(KERN_INFO 2923 pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
3043 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
3044 root->nodesize, PAGE_SIZE); 2924 root->nodesize, PAGE_SIZE);
3045 return -1; 2925 return -1;
3046 } 2926 }
3047 if (root->sectorsize & ((u64)PAGE_SIZE - 1)) { 2927 if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
3048 printk(KERN_INFO 2928 pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
3049 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
3050 root->sectorsize, PAGE_SIZE); 2929 root->sectorsize, PAGE_SIZE);
3051 return -1; 2930 return -1;
3052 } 2931 }
@@ -3054,7 +2933,7 @@ int btrfsic_mount(struct btrfs_root *root,
3054 if (!state) { 2933 if (!state) {
3055 state = vzalloc(sizeof(*state)); 2934 state = vzalloc(sizeof(*state));
3056 if (!state) { 2935 if (!state) {
3057 printk(KERN_INFO "btrfs check-integrity: vzalloc() failed!\n"); 2936 pr_info("btrfs check-integrity: vzalloc() failed!\n");
3058 return -1; 2937 return -1;
3059 } 2938 }
3060 } 2939 }
@@ -3086,8 +2965,7 @@ int btrfsic_mount(struct btrfs_root *root,
3086 2965
3087 ds = btrfsic_dev_state_alloc(); 2966 ds = btrfsic_dev_state_alloc();
3088 if (NULL == ds) { 2967 if (NULL == ds) {
3089 printk(KERN_INFO 2968 pr_info("btrfs check-integrity: kmalloc() failed!\n");
3090 "btrfs check-integrity: kmalloc() failed!\n");
3091 mutex_unlock(&btrfsic_mutex); 2969 mutex_unlock(&btrfsic_mutex);
3092 return -1; 2970 return -1;
3093 } 2971 }
@@ -3148,9 +3026,7 @@ void btrfsic_unmount(struct btrfs_root *root,
3148 } 3026 }
3149 3027
3150 if (NULL == state) { 3028 if (NULL == state) {
3151 printk(KERN_INFO 3029 pr_info("btrfsic: error, cannot find state information on umount!\n");
3152 "btrfsic: error, cannot find state information"
3153 " on umount!\n");
3154 mutex_unlock(&btrfsic_mutex); 3030 mutex_unlock(&btrfsic_mutex);
3155 return; 3031 return;
3156 } 3032 }
@@ -3177,9 +3053,7 @@ void btrfsic_unmount(struct btrfs_root *root,
3177 if (b_all->is_iodone || b_all->never_written) 3053 if (b_all->is_iodone || b_all->never_written)
3178 btrfsic_block_free(b_all); 3054 btrfsic_block_free(b_all);
3179 else 3055 else
3180 printk(KERN_INFO "btrfs: attempt to free %c-block" 3056 pr_info("btrfs: attempt to free %c-block @%llu (%s/%llu/%d) on umount which is not yet iodone!\n",
3181 " @%llu (%s/%llu/%d) on umount which is"
3182 " not yet iodone!\n",
3183 btrfsic_get_block_type(state, b_all), 3057 btrfsic_get_block_type(state, b_all),
3184 b_all->logical_bytenr, b_all->dev_state->name, 3058 b_all->logical_bytenr, b_all->dev_state->name,
3185 b_all->dev_bytenr, b_all->mirror_num); 3059 b_all->dev_bytenr, b_all->mirror_num);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 029db6e1105c..ccc70d96958d 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -783,8 +783,7 @@ void __init btrfs_init_compress(void)
783 */ 783 */
784 workspace = btrfs_compress_op[i]->alloc_workspace(); 784 workspace = btrfs_compress_op[i]->alloc_workspace();
785 if (IS_ERR(workspace)) { 785 if (IS_ERR(workspace)) {
786 printk(KERN_WARNING 786 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
787 "BTRFS: cannot preallocate compression workspace, will try later");
788 } else { 787 } else {
789 atomic_set(&btrfs_comp_ws[i].total_ws, 1); 788 atomic_set(&btrfs_comp_ws[i].total_ws, 1);
790 btrfs_comp_ws[i].free_ws = 1; 789 btrfs_comp_ws[i].free_ws = 1;
@@ -854,8 +853,7 @@ again:
854 /* no burst */ 1); 853 /* no burst */ 1);
855 854
856 if (__ratelimit(&_rs)) { 855 if (__ratelimit(&_rs)) {
857 printk(KERN_WARNING 856 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
858 "no compression workspaces, low memory, retrying");
859 } 857 }
860 } 858 }
861 goto again; 859 goto again;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d1c56c94dd5a..f6ba165d3f81 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -45,9 +45,7 @@ static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
45 45
46struct btrfs_path *btrfs_alloc_path(void) 46struct btrfs_path *btrfs_alloc_path(void)
47{ 47{
48 struct btrfs_path *path; 48 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 return path;
51} 49}
52 50
53/* 51/*
@@ -1102,7 +1100,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1102 int level, ret; 1100 int level, ret;
1103 int last_ref = 0; 1101 int last_ref = 0;
1104 int unlock_orig = 0; 1102 int unlock_orig = 0;
1105 u64 parent_start; 1103 u64 parent_start = 0;
1106 1104
1107 if (*cow_ret == buf) 1105 if (*cow_ret == buf)
1108 unlock_orig = 1; 1106 unlock_orig = 1;
@@ -1121,13 +1119,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1121 else 1119 else
1122 btrfs_node_key(buf, &disk_key, 0); 1120 btrfs_node_key(buf, &disk_key, 0);
1123 1121
1124 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 1122 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1125 if (parent) 1123 parent_start = parent->start;
1126 parent_start = parent->start;
1127 else
1128 parent_start = 0;
1129 } else
1130 parent_start = 0;
1131 1124
1132 cow = btrfs_alloc_tree_block(trans, root, parent_start, 1125 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1133 root->root_key.objectid, &disk_key, level, 1126 root->root_key.objectid, &disk_key, level,
@@ -1170,8 +1163,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1170 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 1163 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1171 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 1164 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1172 parent_start = buf->start; 1165 parent_start = buf->start;
1173 else
1174 parent_start = 0;
1175 1166
1176 extent_buffer_get(cow); 1167 extent_buffer_get(cow);
1177 tree_mod_log_set_root_pointer(root, cow, 1); 1168 tree_mod_log_set_root_pointer(root, cow, 1);
@@ -1182,11 +1173,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1182 free_extent_buffer(buf); 1173 free_extent_buffer(buf);
1183 add_root_to_dirty_list(root); 1174 add_root_to_dirty_list(root);
1184 } else { 1175 } else {
1185 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1186 parent_start = parent->start;
1187 else
1188 parent_start = 0;
1189
1190 WARN_ON(trans->transid != btrfs_header_generation(parent)); 1176 WARN_ON(trans->transid != btrfs_header_generation(parent));
1191 tree_mod_log_insert_key(root->fs_info, parent, parent_slot, 1177 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1192 MOD_LOG_KEY_REPLACE, GFP_NOFS); 1178 MOD_LOG_KEY_REPLACE, GFP_NOFS);
@@ -1729,20 +1715,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1729 return err; 1715 return err;
1730} 1716}
1731 1717
1732/*
1733 * The leaf data grows from end-to-front in the node.
1734 * this returns the address of the start of the last item,
1735 * which is the stop of the leaf data stack
1736 */
1737static inline unsigned int leaf_data_end(struct btrfs_root *root,
1738 struct extent_buffer *leaf)
1739{
1740 u32 nr = btrfs_header_nritems(leaf);
1741 if (nr == 0)
1742 return BTRFS_LEAF_DATA_SIZE(root);
1743 return btrfs_item_offset_nr(leaf, nr - 1);
1744}
1745
1746 1718
1747/* 1719/*
1748 * search for key in the extent_buffer. The items start at offset p, 1720 * search for key in the extent_buffer. The items start at offset p,
@@ -2268,7 +2240,6 @@ static void reada_for_search(struct btrfs_root *root,
2268 u64 search; 2240 u64 search;
2269 u64 target; 2241 u64 target;
2270 u64 nread = 0; 2242 u64 nread = 0;
2271 u64 gen;
2272 struct extent_buffer *eb; 2243 struct extent_buffer *eb;
2273 u32 nr; 2244 u32 nr;
2274 u32 blocksize; 2245 u32 blocksize;
@@ -2313,7 +2284,6 @@ static void reada_for_search(struct btrfs_root *root,
2313 search = btrfs_node_blockptr(node, nr); 2284 search = btrfs_node_blockptr(node, nr);
2314 if ((search <= target && target - search <= 65536) || 2285 if ((search <= target && target - search <= 65536) ||
2315 (search > target && search - target <= 65536)) { 2286 (search > target && search - target <= 65536)) {
2316 gen = btrfs_node_ptr_generation(node, nr);
2317 readahead_tree_block(root, search); 2287 readahead_tree_block(root, search);
2318 nread += blocksize; 2288 nread += blocksize;
2319 } 2289 }
@@ -4341,7 +4311,11 @@ again:
4341 if (path->slots[1] == 0) 4311 if (path->slots[1] == 0)
4342 fixup_low_keys(fs_info, path, &disk_key, 1); 4312 fixup_low_keys(fs_info, path, &disk_key, 1);
4343 } 4313 }
4344 btrfs_mark_buffer_dirty(right); 4314 /*
4315 * We create a new leaf 'right' for the required ins_len and
4316 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4317 * the content of ins_len to 'right'.
4318 */
4345 return ret; 4319 return ret;
4346 } 4320 }
4347 4321
@@ -4772,8 +4746,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4772 4746
4773 if (btrfs_leaf_free_space(root, leaf) < total_size) { 4747 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4774 btrfs_print_leaf(root, leaf); 4748 btrfs_print_leaf(root, leaf);
4775 btrfs_crit(root->fs_info, "not enough freespace need %u have %d", 4749 btrfs_crit(root->fs_info,
4776 total_size, btrfs_leaf_free_space(root, leaf)); 4750 "not enough freespace need %u have %d",
4751 total_size, btrfs_leaf_free_space(root, leaf));
4777 BUG(); 4752 BUG();
4778 } 4753 }
4779 4754
@@ -4782,8 +4757,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4782 4757
4783 if (old_data < data_end) { 4758 if (old_data < data_end) {
4784 btrfs_print_leaf(root, leaf); 4759 btrfs_print_leaf(root, leaf);
4785 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d", 4760 btrfs_crit(root->fs_info,
4786 slot, old_data, data_end); 4761 "slot %d old_data %d data_end %d",
4762 slot, old_data, data_end);
4787 BUG_ON(1); 4763 BUG_ON(1);
4788 } 4764 }
4789 /* 4765 /*
@@ -4793,7 +4769,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4793 for (i = slot; i < nritems; i++) { 4769 for (i = slot; i < nritems; i++) {
4794 u32 ioff; 4770 u32 ioff;
4795 4771
4796 item = btrfs_item_nr( i); 4772 item = btrfs_item_nr(i);
4797 ioff = btrfs_token_item_offset(leaf, item, &token); 4773 ioff = btrfs_token_item_offset(leaf, item, &token);
4798 btrfs_set_token_item_offset(leaf, item, 4774 btrfs_set_token_item_offset(leaf, item,
4799 ioff - total_data, &token); 4775 ioff - total_data, &token);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index e62fd50237e4..6c21bad26a27 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -37,6 +37,7 @@
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/security.h> 38#include <linux/security.h>
39#include <linux/sizes.h> 39#include <linux/sizes.h>
40#include <linux/dynamic_debug.h>
40#include "extent_io.h" 41#include "extent_io.h"
41#include "extent_map.h" 42#include "extent_map.h"
42#include "async-thread.h" 43#include "async-thread.h"
@@ -676,9 +677,25 @@ struct btrfs_device;
676struct btrfs_fs_devices; 677struct btrfs_fs_devices;
677struct btrfs_balance_control; 678struct btrfs_balance_control;
678struct btrfs_delayed_root; 679struct btrfs_delayed_root;
680
681#define BTRFS_FS_BARRIER 1
682#define BTRFS_FS_CLOSING_START 2
683#define BTRFS_FS_CLOSING_DONE 3
684#define BTRFS_FS_LOG_RECOVERING 4
685#define BTRFS_FS_OPEN 5
686#define BTRFS_FS_QUOTA_ENABLED 6
687#define BTRFS_FS_QUOTA_ENABLING 7
688#define BTRFS_FS_QUOTA_DISABLING 8
689#define BTRFS_FS_UPDATE_UUID_TREE_GEN 9
690#define BTRFS_FS_CREATING_FREE_SPACE_TREE 10
691#define BTRFS_FS_BTREE_ERR 11
692#define BTRFS_FS_LOG1_ERR 12
693#define BTRFS_FS_LOG2_ERR 13
694
679struct btrfs_fs_info { 695struct btrfs_fs_info {
680 u8 fsid[BTRFS_FSID_SIZE]; 696 u8 fsid[BTRFS_FSID_SIZE];
681 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 697 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
698 unsigned long flags;
682 struct btrfs_root *extent_root; 699 struct btrfs_root *extent_root;
683 struct btrfs_root *tree_root; 700 struct btrfs_root *tree_root;
684 struct btrfs_root *chunk_root; 701 struct btrfs_root *chunk_root;
@@ -907,10 +924,6 @@ struct btrfs_fs_info {
907 int thread_pool_size; 924 int thread_pool_size;
908 925
909 struct kobject *space_info_kobj; 926 struct kobject *space_info_kobj;
910 int do_barriers;
911 int closing;
912 int log_root_recovering;
913 int open;
914 927
915 u64 total_pinned; 928 u64 total_pinned;
916 929
@@ -987,17 +1000,6 @@ struct btrfs_fs_info {
987#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1000#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
988 u32 check_integrity_print_mask; 1001 u32 check_integrity_print_mask;
989#endif 1002#endif
990 /*
991 * quota information
992 */
993 unsigned int quota_enabled:1;
994
995 /*
996 * quota_enabled only changes state after a commit. This holds the
997 * next state.
998 */
999 unsigned int pending_quota_state:1;
1000
1001 /* is qgroup tracking in a consistent state? */ 1003 /* is qgroup tracking in a consistent state? */
1002 u64 qgroup_flags; 1004 u64 qgroup_flags;
1003 1005
@@ -1061,7 +1063,6 @@ struct btrfs_fs_info {
1061 wait_queue_head_t replace_wait; 1063 wait_queue_head_t replace_wait;
1062 1064
1063 struct semaphore uuid_tree_rescan_sem; 1065 struct semaphore uuid_tree_rescan_sem;
1064 unsigned int update_uuid_tree_gen:1;
1065 1066
1066 /* Used to reclaim the metadata space in the background. */ 1067 /* Used to reclaim the metadata space in the background. */
1067 struct work_struct async_reclaim_work; 1068 struct work_struct async_reclaim_work;
@@ -1080,7 +1081,6 @@ struct btrfs_fs_info {
1080 */ 1081 */
1081 struct list_head pinned_chunks; 1082 struct list_head pinned_chunks;
1082 1083
1083 int creating_free_space_tree;
1084 /* Used to record internally whether fs has been frozen */ 1084 /* Used to record internally whether fs has been frozen */
1085 int fs_frozen; 1085 int fs_frozen;
1086}; 1086};
@@ -1435,13 +1435,13 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
1435#define cpu_to_le8(v) (v) 1435#define cpu_to_le8(v) (v)
1436#define __le8 u8 1436#define __le8 u8
1437 1437
1438#define read_eb_member(eb, ptr, type, member, result) ( \ 1438#define read_eb_member(eb, ptr, type, member, result) (\
1439 read_extent_buffer(eb, (char *)(result), \ 1439 read_extent_buffer(eb, (char *)(result), \
1440 ((unsigned long)(ptr)) + \ 1440 ((unsigned long)(ptr)) + \
1441 offsetof(type, member), \ 1441 offsetof(type, member), \
1442 sizeof(((type *)0)->member))) 1442 sizeof(((type *)0)->member)))
1443 1443
1444#define write_eb_member(eb, ptr, type, member, result) ( \ 1444#define write_eb_member(eb, ptr, type, member, result) (\
1445 write_extent_buffer(eb, (char *)(result), \ 1445 write_extent_buffer(eb, (char *)(result), \
1446 ((unsigned long)(ptr)) + \ 1446 ((unsigned long)(ptr)) + \
1447 offsetof(type, member), \ 1447 offsetof(type, member), \
@@ -2293,6 +2293,21 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
2293 return offsetof(struct btrfs_leaf, items); 2293 return offsetof(struct btrfs_leaf, items);
2294} 2294}
2295 2295
2296/*
2297 * The leaf data grows from end-to-front in the node.
2298 * this returns the address of the start of the last item,
2299 * which is the stop of the leaf data stack
2300 */
2301static inline unsigned int leaf_data_end(struct btrfs_root *root,
2302 struct extent_buffer *leaf)
2303{
2304 u32 nr = btrfs_header_nritems(leaf);
2305
2306 if (nr == 0)
2307 return BTRFS_LEAF_DATA_SIZE(root);
2308 return btrfs_item_offset_nr(leaf, nr - 1);
2309}
2310
2296/* struct btrfs_file_extent_item */ 2311/* struct btrfs_file_extent_item */
2297BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); 2312BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
2298BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, 2313BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr,
@@ -2867,10 +2882,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2867static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) 2882static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
2868{ 2883{
2869 /* 2884 /*
2870 * Get synced with close_ctree() 2885 * Do it this way so we only ever do one test_bit in the normal case.
2871 */ 2886 */
2872 smp_mb(); 2887 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
2873 return fs_info->closing; 2888 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
2889 return 2;
2890 return 1;
2891 }
2892 return 0;
2874} 2893}
2875 2894
2876/* 2895/*
@@ -3118,7 +3137,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
3118int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 3137int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
3119 int nr); 3138 int nr);
3120int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 3139int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
3121 struct extent_state **cached_state); 3140 struct extent_state **cached_state, int dedupe);
3122int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 3141int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
3123 struct btrfs_root *new_root, 3142 struct btrfs_root *new_root,
3124 struct btrfs_root *parent_root, 3143 struct btrfs_root *parent_root,
@@ -3236,14 +3255,17 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
3236 unsigned long new_flags); 3255 unsigned long new_flags);
3237int btrfs_sync_fs(struct super_block *sb, int wait); 3256int btrfs_sync_fs(struct super_block *sb, int wait);
3238 3257
3258static inline __printf(2, 3)
3259void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
3260{
3261}
3262
3239#ifdef CONFIG_PRINTK 3263#ifdef CONFIG_PRINTK
3240__printf(2, 3) 3264__printf(2, 3)
3241void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); 3265void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
3242#else 3266#else
3243static inline __printf(2, 3) 3267#define btrfs_printk(fs_info, fmt, args...) \
3244void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 3268 btrfs_no_printk(fs_info, fmt, ##args)
3245{
3246}
3247#endif 3269#endif
3248 3270
3249#define btrfs_emerg(fs_info, fmt, args...) \ 3271#define btrfs_emerg(fs_info, fmt, args...) \
@@ -3314,7 +3336,35 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
3314 btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args) 3336 btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args)
3315#define btrfs_info_rl(fs_info, fmt, args...) \ 3337#define btrfs_info_rl(fs_info, fmt, args...) \
3316 btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args) 3338 btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args)
3317#ifdef DEBUG 3339
3340#if defined(CONFIG_DYNAMIC_DEBUG)
3341#define btrfs_debug(fs_info, fmt, args...) \
3342do { \
3343 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
3344 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
3345 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args); \
3346} while (0)
3347#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
3348do { \
3349 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
3350 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
3351 btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args); \
3352} while (0)
3353#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
3354do { \
3355 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
3356 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
3357 btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, \
3358 ##args);\
3359} while (0)
3360#define btrfs_debug_rl(fs_info, fmt, args...) \
3361do { \
3362 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
3363 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
3364 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, \
3365 ##args); \
3366} while (0)
3367#elif defined(DEBUG)
3318#define btrfs_debug(fs_info, fmt, args...) \ 3368#define btrfs_debug(fs_info, fmt, args...) \
3319 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) 3369 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
3320#define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3370#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
@@ -3325,13 +3375,13 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
3325 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args) 3375 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args)
3326#else 3376#else
3327#define btrfs_debug(fs_info, fmt, args...) \ 3377#define btrfs_debug(fs_info, fmt, args...) \
3328 no_printk(KERN_DEBUG fmt, ##args) 3378 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
3329#define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3379#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
3330 no_printk(KERN_DEBUG fmt, ##args) 3380 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
3331#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ 3381#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
3332 no_printk(KERN_DEBUG fmt, ##args) 3382 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
3333#define btrfs_debug_rl(fs_info, fmt, args...) \ 3383#define btrfs_debug_rl(fs_info, fmt, args...) \
3334 no_printk(KERN_DEBUG fmt, ##args) 3384 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
3335#endif 3385#endif
3336 3386
3337#define btrfs_printk_in_rcu(fs_info, fmt, args...) \ 3387#define btrfs_printk_in_rcu(fs_info, fmt, args...) \
@@ -3362,7 +3412,7 @@ do { \
3362__cold 3412__cold
3363static inline void assfail(char *expr, char *file, int line) 3413static inline void assfail(char *expr, char *file, int line)
3364{ 3414{
3365 pr_err("BTRFS: assertion failed: %s, file: %s, line: %d", 3415 pr_err("assertion failed: %s, file: %s, line: %d\n",
3366 expr, file, line); 3416 expr, file, line);
3367 BUG(); 3417 BUG();
3368} 3418}
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3eeb9cd8cfa5..0fcf5f25d524 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -385,11 +385,8 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
385 struct btrfs_delayed_node *delayed_node, 385 struct btrfs_delayed_node *delayed_node,
386 struct btrfs_key *key) 386 struct btrfs_key *key)
387{ 387{
388 struct btrfs_delayed_item *item; 388 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
389
390 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
391 NULL, NULL); 389 NULL, NULL);
392 return item;
393} 390}
394 391
395static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, 392static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
@@ -1481,11 +1478,10 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1481 mutex_lock(&delayed_node->mutex); 1478 mutex_lock(&delayed_node->mutex);
1482 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); 1479 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1483 if (unlikely(ret)) { 1480 if (unlikely(ret)) {
1484 btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) " 1481 btrfs_err(root->fs_info,
1485 "into the insertion tree of the delayed node" 1482 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1486 "(root id: %llu, inode id: %llu, errno: %d)", 1483 name_len, name, delayed_node->root->objectid,
1487 name_len, name, delayed_node->root->objectid, 1484 delayed_node->inode_id, ret);
1488 delayed_node->inode_id, ret);
1489 BUG(); 1485 BUG();
1490 } 1486 }
1491 mutex_unlock(&delayed_node->mutex); 1487 mutex_unlock(&delayed_node->mutex);
@@ -1553,11 +1549,9 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1553 mutex_lock(&node->mutex); 1549 mutex_lock(&node->mutex);
1554 ret = __btrfs_add_delayed_deletion_item(node, item); 1550 ret = __btrfs_add_delayed_deletion_item(node, item);
1555 if (unlikely(ret)) { 1551 if (unlikely(ret)) {
1556 btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) " 1552 btrfs_err(root->fs_info,
1557 "into the deletion tree of the delayed node" 1553 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1558 "(root id: %llu, inode id: %llu, errno: %d)", 1554 index, node->root->objectid, node->inode_id, ret);
1559 index, node->root->objectid, node->inode_id,
1560 ret);
1561 BUG(); 1555 BUG();
1562 } 1556 }
1563 mutex_unlock(&node->mutex); 1557 mutex_unlock(&node->mutex);
@@ -1874,7 +1868,8 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
1874 * leads to enospc problems. This means we also can't do 1868 * leads to enospc problems. This means we also can't do
1875 * delayed inode refs 1869 * delayed inode refs
1876 */ 1870 */
1877 if (BTRFS_I(inode)->root->fs_info->log_root_recovering) 1871 if (test_bit(BTRFS_FS_LOG_RECOVERING,
1872 &BTRFS_I(inode)->root->fs_info->flags))
1878 return -EAGAIN; 1873 return -EAGAIN;
1879 1874
1880 delayed_node = btrfs_get_or_create_delayed_node(inode); 1875 delayed_node = btrfs_get_or_create_delayed_node(inode);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ac02e041464b..8d93854a4b4f 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -322,10 +322,11 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
322 elem = list_first_entry(&fs_info->tree_mod_seq_list, 322 elem = list_first_entry(&fs_info->tree_mod_seq_list,
323 struct seq_list, list); 323 struct seq_list, list);
324 if (seq >= elem->seq) { 324 if (seq >= elem->seq) {
325 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n", 325 btrfs_debug(fs_info,
326 (u32)(seq >> 32), (u32)seq, 326 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
327 (u32)(elem->seq >> 32), (u32)elem->seq, 327 (u32)(seq >> 32), (u32)seq,
328 delayed_refs); 328 (u32)(elem->seq >> 32), (u32)elem->seq,
329 delayed_refs);
329 ret = 1; 330 ret = 1;
330 } 331 }
331 } 332 }
@@ -770,7 +771,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
770 if (!head_ref) 771 if (!head_ref)
771 goto free_ref; 772 goto free_ref;
772 773
773 if (fs_info->quota_enabled && is_fstree(ref_root)) { 774 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
775 is_fstree(ref_root)) {
774 record = kmalloc(sizeof(*record), GFP_NOFS); 776 record = kmalloc(sizeof(*record), GFP_NOFS);
775 if (!record) 777 if (!record)
776 goto free_head_ref; 778 goto free_head_ref;
@@ -828,7 +830,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
828 return -ENOMEM; 830 return -ENOMEM;
829 } 831 }
830 832
831 if (fs_info->quota_enabled && is_fstree(ref_root)) { 833 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
834 is_fstree(ref_root)) {
832 record = kmalloc(sizeof(*record), GFP_NOFS); 835 record = kmalloc(sizeof(*record), GFP_NOFS);
833 if (!record) { 836 if (!record) {
834 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); 837 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index e9bbff3c0029..05169ef30596 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -218,8 +218,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
218 } 218 }
219 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 219 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
220 if (ret < 0) { 220 if (ret < 0) {
221 btrfs_warn(fs_info, "error %d while searching for dev_replace item!", 221 btrfs_warn(fs_info,
222 ret); 222 "error %d while searching for dev_replace item!",
223 ret);
223 goto out; 224 goto out;
224 } 225 }
225 226
@@ -238,8 +239,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
238 */ 239 */
239 ret = btrfs_del_item(trans, dev_root, path); 240 ret = btrfs_del_item(trans, dev_root, path);
240 if (ret != 0) { 241 if (ret != 0) {
241 btrfs_warn(fs_info, "delete too small dev_replace item failed %d!", 242 btrfs_warn(fs_info,
242 ret); 243 "delete too small dev_replace item failed %d!",
244 ret);
243 goto out; 245 goto out;
244 } 246 }
245 ret = 1; 247 ret = 1;
@@ -251,8 +253,8 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
251 ret = btrfs_insert_empty_item(trans, dev_root, path, 253 ret = btrfs_insert_empty_item(trans, dev_root, path,
252 &key, sizeof(*ptr)); 254 &key, sizeof(*ptr));
253 if (ret < 0) { 255 if (ret < 0) {
254 btrfs_warn(fs_info, "insert dev_replace item failed %d!", 256 btrfs_warn(fs_info,
255 ret); 257 "insert dev_replace item failed %d!", ret);
256 goto out; 258 goto out;
257 } 259 }
258 } 260 }
@@ -383,7 +385,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
383 385
384 ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device); 386 ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device);
385 if (ret) 387 if (ret)
386 btrfs_err(fs_info, "kobj add dev failed %d\n", ret); 388 btrfs_err(fs_info, "kobj add dev failed %d", ret);
387 389
388 btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1); 390 btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
389 391
@@ -772,9 +774,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
772 break; 774 break;
773 } 775 }
774 if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) { 776 if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
775 btrfs_info(fs_info, "cannot continue dev_replace, tgtdev is missing");
776 btrfs_info(fs_info, 777 btrfs_info(fs_info,
777 "you may cancel the operation after 'mount -o degraded'"); 778 "cannot continue dev_replace, tgtdev is missing");
779 btrfs_info(fs_info,
780 "you may cancel the operation after 'mount -o degraded'");
778 btrfs_dev_replace_unlock(dev_replace, 1); 781 btrfs_dev_replace_unlock(dev_replace, 1);
779 return 0; 782 return 0;
780 } 783 }
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 1752625fb4dd..0dc1a033275e 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -472,9 +472,10 @@ int verify_dir_item(struct btrfs_root *root,
472 /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */ 472 /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
473 if ((btrfs_dir_data_len(leaf, dir_item) + 473 if ((btrfs_dir_data_len(leaf, dir_item) +
474 btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) { 474 btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
475 btrfs_crit(root->fs_info, "invalid dir item name + data len: %u + %u", 475 btrfs_crit(root->fs_info,
476 (unsigned)btrfs_dir_name_len(leaf, dir_item), 476 "invalid dir item name + data len: %u + %u",
477 (unsigned)btrfs_dir_data_len(leaf, dir_item)); 477 (unsigned)btrfs_dir_name_len(leaf, dir_item),
478 (unsigned)btrfs_dir_data_len(leaf, dir_item));
478 return 1; 479 return 1;
479 } 480 }
480 481
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 54bc8c7c6bcd..e720d3e6ec20 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -326,8 +326,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
326 326
327 read_extent_buffer(buf, &val, 0, csum_size); 327 read_extent_buffer(buf, &val, 0, csum_size);
328 btrfs_warn_rl(fs_info, 328 btrfs_warn_rl(fs_info,
329 "%s checksum verify failed on %llu wanted %X found %X " 329 "%s checksum verify failed on %llu wanted %X found %X level %d",
330 "level %d",
331 fs_info->sb->s_id, buf->start, 330 fs_info->sb->s_id, buf->start,
332 val, found, btrfs_header_level(buf)); 331 val, found, btrfs_header_level(buf));
333 if (result != (char *)&inline_result) 332 if (result != (char *)&inline_result)
@@ -402,7 +401,8 @@ out:
402 * Return 0 if the superblock checksum type matches the checksum value of that 401 * Return 0 if the superblock checksum type matches the checksum value of that
403 * algorithm. Pass the raw disk superblock data. 402 * algorithm. Pass the raw disk superblock data.
404 */ 403 */
405static int btrfs_check_super_csum(char *raw_disk_sb) 404static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
405 char *raw_disk_sb)
406{ 406{
407 struct btrfs_super_block *disk_sb = 407 struct btrfs_super_block *disk_sb =
408 (struct btrfs_super_block *)raw_disk_sb; 408 (struct btrfs_super_block *)raw_disk_sb;
@@ -428,7 +428,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
428 } 428 }
429 429
430 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { 430 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
431 printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n", 431 btrfs_err(fs_info, "unsupported checksum algorithm %u",
432 csum_type); 432 csum_type);
433 ret = 1; 433 ret = 1;
434 } 434 }
@@ -442,7 +442,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
442 */ 442 */
443static int btree_read_extent_buffer_pages(struct btrfs_root *root, 443static int btree_read_extent_buffer_pages(struct btrfs_root *root,
444 struct extent_buffer *eb, 444 struct extent_buffer *eb,
445 u64 start, u64 parent_transid) 445 u64 parent_transid)
446{ 446{
447 struct extent_io_tree *io_tree; 447 struct extent_io_tree *io_tree;
448 int failed = 0; 448 int failed = 0;
@@ -454,8 +454,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
454 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 454 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
455 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; 455 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
456 while (1) { 456 while (1) {
457 ret = read_extent_buffer_pages(io_tree, eb, start, 457 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
458 WAIT_COMPLETE,
459 btree_get_extent, mirror_num); 458 btree_get_extent, mirror_num);
460 if (!ret) { 459 if (!ret) {
461 if (!verify_parent_transid(io_tree, eb, 460 if (!verify_parent_transid(io_tree, eb,
@@ -547,9 +546,10 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
547} 546}
548 547
549#define CORRUPT(reason, eb, root, slot) \ 548#define CORRUPT(reason, eb, root, slot) \
550 btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \ 549 btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu," \
551 "root=%llu, slot=%d", reason, \ 550 " root=%llu, slot=%d", \
552 btrfs_header_bytenr(eb), root->objectid, slot) 551 btrfs_header_level(eb) == 0 ? "leaf" : "node",\
552 reason, btrfs_header_bytenr(eb), root->objectid, slot)
553 553
554static noinline int check_leaf(struct btrfs_root *root, 554static noinline int check_leaf(struct btrfs_root *root,
555 struct extent_buffer *leaf) 555 struct extent_buffer *leaf)
@@ -636,6 +636,10 @@ static noinline int check_leaf(struct btrfs_root *root,
636static int check_node(struct btrfs_root *root, struct extent_buffer *node) 636static int check_node(struct btrfs_root *root, struct extent_buffer *node)
637{ 637{
638 unsigned long nr = btrfs_header_nritems(node); 638 unsigned long nr = btrfs_header_nritems(node);
639 struct btrfs_key key, next_key;
640 int slot;
641 u64 bytenr;
642 int ret = 0;
639 643
640 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) { 644 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
641 btrfs_crit(root->fs_info, 645 btrfs_crit(root->fs_info,
@@ -643,7 +647,26 @@ static int check_node(struct btrfs_root *root, struct extent_buffer *node)
643 node->start, root->objectid, nr); 647 node->start, root->objectid, nr);
644 return -EIO; 648 return -EIO;
645 } 649 }
646 return 0; 650
651 for (slot = 0; slot < nr - 1; slot++) {
652 bytenr = btrfs_node_blockptr(node, slot);
653 btrfs_node_key_to_cpu(node, &key, slot);
654 btrfs_node_key_to_cpu(node, &next_key, slot + 1);
655
656 if (!bytenr) {
657 CORRUPT("invalid item slot", node, root, slot);
658 ret = -EIO;
659 goto out;
660 }
661
662 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
663 CORRUPT("bad key order", node, root, slot);
664 ret = -EIO;
665 goto out;
666 }
667 }
668out:
669 return ret;
647} 670}
648 671
649static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 672static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
@@ -1132,7 +1155,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
1132 if (IS_ERR(buf)) 1155 if (IS_ERR(buf))
1133 return; 1156 return;
1134 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, 1157 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1135 buf, 0, WAIT_NONE, btree_get_extent, 0); 1158 buf, WAIT_NONE, btree_get_extent, 0);
1136 free_extent_buffer(buf); 1159 free_extent_buffer(buf);
1137} 1160}
1138 1161
@@ -1150,7 +1173,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
1150 1173
1151 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); 1174 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1152 1175
1153 ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK, 1176 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1154 btree_get_extent, mirror_num); 1177 btree_get_extent, mirror_num);
1155 if (ret) { 1178 if (ret) {
1156 free_extent_buffer(buf); 1179 free_extent_buffer(buf);
@@ -1206,7 +1229,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1206 if (IS_ERR(buf)) 1229 if (IS_ERR(buf))
1207 return buf; 1230 return buf;
1208 1231
1209 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 1232 ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
1210 if (ret) { 1233 if (ret) {
1211 free_extent_buffer(buf); 1234 free_extent_buffer(buf);
1212 return ERR_PTR(ret); 1235 return ERR_PTR(ret);
@@ -1839,7 +1862,7 @@ static int cleaner_kthread(void *arg)
1839 * Do not do anything if we might cause open_ctree() to block 1862 * Do not do anything if we might cause open_ctree() to block
1840 * before we have finished mounting the filesystem. 1863 * before we have finished mounting the filesystem.
1841 */ 1864 */
1842 if (!root->fs_info->open) 1865 if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags))
1843 goto sleep; 1866 goto sleep;
1844 1867
1845 if (!mutex_trylock(&root->fs_info->cleaner_mutex)) 1868 if (!mutex_trylock(&root->fs_info->cleaner_mutex))
@@ -2332,8 +2355,6 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2332 fs_info->qgroup_op_tree = RB_ROOT; 2355 fs_info->qgroup_op_tree = RB_ROOT;
2333 INIT_LIST_HEAD(&fs_info->dirty_qgroups); 2356 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2334 fs_info->qgroup_seq = 1; 2357 fs_info->qgroup_seq = 1;
2335 fs_info->quota_enabled = 0;
2336 fs_info->pending_quota_state = 0;
2337 fs_info->qgroup_ulist = NULL; 2358 fs_info->qgroup_ulist = NULL;
2338 fs_info->qgroup_rescan_running = false; 2359 fs_info->qgroup_rescan_running = false;
2339 mutex_init(&fs_info->qgroup_rescan_lock); 2360 mutex_init(&fs_info->qgroup_rescan_lock);
@@ -2518,8 +2539,7 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
2518 root = btrfs_read_tree_root(tree_root, &location); 2539 root = btrfs_read_tree_root(tree_root, &location);
2519 if (!IS_ERR(root)) { 2540 if (!IS_ERR(root)) {
2520 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2541 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2521 fs_info->quota_enabled = 1; 2542 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2522 fs_info->pending_quota_state = 1;
2523 fs_info->quota_root = root; 2543 fs_info->quota_root = root;
2524 } 2544 }
2525 2545
@@ -2710,8 +2730,7 @@ int open_ctree(struct super_block *sb,
2710 extent_io_tree_init(&fs_info->freed_extents[1], 2730 extent_io_tree_init(&fs_info->freed_extents[1],
2711 fs_info->btree_inode->i_mapping); 2731 fs_info->btree_inode->i_mapping);
2712 fs_info->pinned_extents = &fs_info->freed_extents[0]; 2732 fs_info->pinned_extents = &fs_info->freed_extents[0];
2713 fs_info->do_barriers = 1; 2733 set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2714
2715 2734
2716 mutex_init(&fs_info->ordered_operations_mutex); 2735 mutex_init(&fs_info->ordered_operations_mutex);
2717 mutex_init(&fs_info->tree_log_mutex); 2736 mutex_init(&fs_info->tree_log_mutex);
@@ -2762,7 +2781,7 @@ int open_ctree(struct super_block *sb,
2762 * We want to check superblock checksum, the type is stored inside. 2781 * We want to check superblock checksum, the type is stored inside.
2763 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). 2782 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2764 */ 2783 */
2765 if (btrfs_check_super_csum(bh->b_data)) { 2784 if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2766 btrfs_err(fs_info, "superblock checksum mismatch"); 2785 btrfs_err(fs_info, "superblock checksum mismatch");
2767 err = -EINVAL; 2786 err = -EINVAL;
2768 brelse(bh); 2787 brelse(bh);
@@ -3199,10 +3218,9 @@ retry_root_backup:
3199 return ret; 3218 return ret;
3200 } 3219 }
3201 } else { 3220 } else {
3202 fs_info->update_uuid_tree_gen = 1; 3221 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3203 } 3222 }
3204 3223 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3205 fs_info->open = 1;
3206 3224
3207 /* 3225 /*
3208 * backuproot only affect mount behavior, and if open_ctree succeeded, 3226 * backuproot only affect mount behavior, and if open_ctree succeeded,
@@ -3607,7 +3625,7 @@ int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3607 } 3625 }
3608 3626
3609 if (min_tolerated == INT_MAX) { 3627 if (min_tolerated == INT_MAX) {
3610 pr_warn("BTRFS: unknown raid flag: %llu\n", flags); 3628 pr_warn("BTRFS: unknown raid flag: %llu", flags);
3611 min_tolerated = 0; 3629 min_tolerated = 0;
3612 } 3630 }
3613 3631
@@ -3893,8 +3911,7 @@ void close_ctree(struct btrfs_root *root)
3893 struct btrfs_fs_info *fs_info = root->fs_info; 3911 struct btrfs_fs_info *fs_info = root->fs_info;
3894 int ret; 3912 int ret;
3895 3913
3896 fs_info->closing = 1; 3914 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3897 smp_mb();
3898 3915
3899 /* wait for the qgroup rescan worker to stop */ 3916 /* wait for the qgroup rescan worker to stop */
3900 btrfs_qgroup_wait_for_completion(fs_info, false); 3917 btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -3939,8 +3956,7 @@ void close_ctree(struct btrfs_root *root)
3939 kthread_stop(fs_info->transaction_kthread); 3956 kthread_stop(fs_info->transaction_kthread);
3940 kthread_stop(fs_info->cleaner_kthread); 3957 kthread_stop(fs_info->cleaner_kthread);
3941 3958
3942 fs_info->closing = 2; 3959 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3943 smp_mb();
3944 3960
3945 btrfs_free_qgroup_config(fs_info); 3961 btrfs_free_qgroup_config(fs_info);
3946 3962
@@ -3965,7 +3981,7 @@ void close_ctree(struct btrfs_root *root)
3965 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3981 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3966 btrfs_stop_all_workers(fs_info); 3982 btrfs_stop_all_workers(fs_info);
3967 3983
3968 fs_info->open = 0; 3984 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3969 free_root_pointers(fs_info, 1); 3985 free_root_pointers(fs_info, 1);
3970 3986
3971 iput(fs_info->btree_inode); 3987 iput(fs_info->btree_inode);
@@ -4036,8 +4052,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4036 root = BTRFS_I(buf->pages[0]->mapping->host)->root; 4052 root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4037 btrfs_assert_tree_locked(buf); 4053 btrfs_assert_tree_locked(buf);
4038 if (transid != root->fs_info->generation) 4054 if (transid != root->fs_info->generation)
4039 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, " 4055 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4040 "found %llu running %llu\n",
4041 buf->start, transid, root->fs_info->generation); 4056 buf->start, transid, root->fs_info->generation);
4042 was_dirty = set_extent_buffer_dirty(buf); 4057 was_dirty = set_extent_buffer_dirty(buf);
4043 if (!was_dirty) 4058 if (!was_dirty)
@@ -4088,7 +4103,7 @@ void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
4088int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) 4103int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
4089{ 4104{
4090 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; 4105 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4091 return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 4106 return btree_read_extent_buffer_pages(root, buf, parent_transid);
4092} 4107}
4093 4108
4094static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 4109static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@ -4100,24 +4115,24 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4100 int ret = 0; 4115 int ret = 0;
4101 4116
4102 if (btrfs_super_magic(sb) != BTRFS_MAGIC) { 4117 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
4103 printk(KERN_ERR "BTRFS: no valid FS found\n"); 4118 btrfs_err(fs_info, "no valid FS found");
4104 ret = -EINVAL; 4119 ret = -EINVAL;
4105 } 4120 }
4106 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) 4121 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
4107 printk(KERN_WARNING "BTRFS: unrecognized super flag: %llu\n", 4122 btrfs_warn(fs_info, "unrecognized super flag: %llu",
4108 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); 4123 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
4109 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { 4124 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
4110 printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n", 4125 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
4111 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); 4126 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
4112 ret = -EINVAL; 4127 ret = -EINVAL;
4113 } 4128 }
4114 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { 4129 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
4115 printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n", 4130 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
4116 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); 4131 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
4117 ret = -EINVAL; 4132 ret = -EINVAL;
4118 } 4133 }
4119 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { 4134 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
4120 printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n", 4135 btrfs_err(fs_info, "log_root level too big: %d >= %d",
4121 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); 4136 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
4122 ret = -EINVAL; 4137 ret = -EINVAL;
4123 } 4138 }
@@ -4128,47 +4143,48 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4128 */ 4143 */
4129 if (!is_power_of_2(sectorsize) || sectorsize < 4096 || 4144 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
4130 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { 4145 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4131 printk(KERN_ERR "BTRFS: invalid sectorsize %llu\n", sectorsize); 4146 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
4132 ret = -EINVAL; 4147 ret = -EINVAL;
4133 } 4148 }
4134 /* Only PAGE SIZE is supported yet */ 4149 /* Only PAGE SIZE is supported yet */
4135 if (sectorsize != PAGE_SIZE) { 4150 if (sectorsize != PAGE_SIZE) {
4136 printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n", 4151 btrfs_err(fs_info,
4137 sectorsize, PAGE_SIZE); 4152 "sectorsize %llu not supported yet, only support %lu",
4153 sectorsize, PAGE_SIZE);
4138 ret = -EINVAL; 4154 ret = -EINVAL;
4139 } 4155 }
4140 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 4156 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
4141 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { 4157 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4142 printk(KERN_ERR "BTRFS: invalid nodesize %llu\n", nodesize); 4158 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
4143 ret = -EINVAL; 4159 ret = -EINVAL;
4144 } 4160 }
4145 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { 4161 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
4146 printk(KERN_ERR "BTRFS: invalid leafsize %u, should be %llu\n", 4162 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
4147 le32_to_cpu(sb->__unused_leafsize), 4163 le32_to_cpu(sb->__unused_leafsize), nodesize);
4148 nodesize);
4149 ret = -EINVAL; 4164 ret = -EINVAL;
4150 } 4165 }
4151 4166
4152 /* Root alignment check */ 4167 /* Root alignment check */
4153 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { 4168 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
4154 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 4169 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
4155 btrfs_super_root(sb)); 4170 btrfs_super_root(sb));
4156 ret = -EINVAL; 4171 ret = -EINVAL;
4157 } 4172 }
4158 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { 4173 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
4159 printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n", 4174 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
4160 btrfs_super_chunk_root(sb)); 4175 btrfs_super_chunk_root(sb));
4161 ret = -EINVAL; 4176 ret = -EINVAL;
4162 } 4177 }
4163 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { 4178 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
4164 printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n", 4179 btrfs_warn(fs_info, "log_root block unaligned: %llu",
4165 btrfs_super_log_root(sb)); 4180 btrfs_super_log_root(sb));
4166 ret = -EINVAL; 4181 ret = -EINVAL;
4167 } 4182 }
4168 4183
4169 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { 4184 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
4170 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", 4185 btrfs_err(fs_info,
4171 fs_info->fsid, sb->dev_item.fsid); 4186 "dev_item UUID does not match fsid: %pU != %pU",
4187 fs_info->fsid, sb->dev_item.fsid);
4172 ret = -EINVAL; 4188 ret = -EINVAL;
4173 } 4189 }
4174 4190
@@ -4178,25 +4194,25 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4178 */ 4194 */
4179 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { 4195 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4180 btrfs_err(fs_info, "bytes_used is too small %llu", 4196 btrfs_err(fs_info, "bytes_used is too small %llu",
4181 btrfs_super_bytes_used(sb)); 4197 btrfs_super_bytes_used(sb));
4182 ret = -EINVAL; 4198 ret = -EINVAL;
4183 } 4199 }
4184 if (!is_power_of_2(btrfs_super_stripesize(sb))) { 4200 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4185 btrfs_err(fs_info, "invalid stripesize %u", 4201 btrfs_err(fs_info, "invalid stripesize %u",
4186 btrfs_super_stripesize(sb)); 4202 btrfs_super_stripesize(sb));
4187 ret = -EINVAL; 4203 ret = -EINVAL;
4188 } 4204 }
4189 if (btrfs_super_num_devices(sb) > (1UL << 31)) 4205 if (btrfs_super_num_devices(sb) > (1UL << 31))
4190 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", 4206 btrfs_warn(fs_info, "suspicious number of devices: %llu",
4191 btrfs_super_num_devices(sb)); 4207 btrfs_super_num_devices(sb));
4192 if (btrfs_super_num_devices(sb) == 0) { 4208 if (btrfs_super_num_devices(sb) == 0) {
4193 printk(KERN_ERR "BTRFS: number of devices is 0\n"); 4209 btrfs_err(fs_info, "number of devices is 0");
4194 ret = -EINVAL; 4210 ret = -EINVAL;
4195 } 4211 }
4196 4212
4197 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { 4213 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4198 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", 4214 btrfs_err(fs_info, "super offset mismatch %llu != %u",
4199 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); 4215 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4200 ret = -EINVAL; 4216 ret = -EINVAL;
4201 } 4217 }
4202 4218
@@ -4205,17 +4221,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4205 * and one chunk 4221 * and one chunk
4206 */ 4222 */
4207 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4223 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4208 printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n", 4224 btrfs_err(fs_info, "system chunk array too big %u > %u",
4209 btrfs_super_sys_array_size(sb), 4225 btrfs_super_sys_array_size(sb),
4210 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); 4226 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4211 ret = -EINVAL; 4227 ret = -EINVAL;
4212 } 4228 }
4213 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) 4229 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4214 + sizeof(struct btrfs_chunk)) { 4230 + sizeof(struct btrfs_chunk)) {
4215 printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n", 4231 btrfs_err(fs_info, "system chunk array too small %u < %zu",
4216 btrfs_super_sys_array_size(sb), 4232 btrfs_super_sys_array_size(sb),
4217 sizeof(struct btrfs_disk_key) 4233 sizeof(struct btrfs_disk_key)
4218 + sizeof(struct btrfs_chunk)); 4234 + sizeof(struct btrfs_chunk));
4219 ret = -EINVAL; 4235 ret = -EINVAL;
4220 } 4236 }
4221 4237
@@ -4224,14 +4240,16 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4224 * but it's still possible that it's the one that's wrong. 4240 * but it's still possible that it's the one that's wrong.
4225 */ 4241 */
4226 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) 4242 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4227 printk(KERN_WARNING 4243 btrfs_warn(fs_info,
4228 "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n", 4244 "suspicious: generation < chunk_root_generation: %llu < %llu",
4229 btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb)); 4245 btrfs_super_generation(sb),
4246 btrfs_super_chunk_root_generation(sb));
4230 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) 4247 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4231 && btrfs_super_cache_generation(sb) != (u64)-1) 4248 && btrfs_super_cache_generation(sb) != (u64)-1)
4232 printk(KERN_WARNING 4249 btrfs_warn(fs_info,
4233 "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n", 4250 "suspicious: generation < cache_generation: %llu < %llu",
4234 btrfs_super_generation(sb), btrfs_super_cache_generation(sb)); 4251 btrfs_super_generation(sb),
4252 btrfs_super_cache_generation(sb));
4235 4253
4236 return ret; 4254 return ret;
4237} 4255}
@@ -4475,9 +4493,80 @@ again:
4475 return 0; 4493 return 0;
4476} 4494}
4477 4495
4496static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4497{
4498 struct inode *inode;
4499
4500 inode = cache->io_ctl.inode;
4501 if (inode) {
4502 invalidate_inode_pages2(inode->i_mapping);
4503 BTRFS_I(inode)->generation = 0;
4504 cache->io_ctl.inode = NULL;
4505 iput(inode);
4506 }
4507 btrfs_put_block_group(cache);
4508}
4509
4510void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4511 struct btrfs_root *root)
4512{
4513 struct btrfs_block_group_cache *cache;
4514
4515 spin_lock(&cur_trans->dirty_bgs_lock);
4516 while (!list_empty(&cur_trans->dirty_bgs)) {
4517 cache = list_first_entry(&cur_trans->dirty_bgs,
4518 struct btrfs_block_group_cache,
4519 dirty_list);
4520 if (!cache) {
4521 btrfs_err(root->fs_info,
4522 "orphan block group dirty_bgs list");
4523 spin_unlock(&cur_trans->dirty_bgs_lock);
4524 return;
4525 }
4526
4527 if (!list_empty(&cache->io_list)) {
4528 spin_unlock(&cur_trans->dirty_bgs_lock);
4529 list_del_init(&cache->io_list);
4530 btrfs_cleanup_bg_io(cache);
4531 spin_lock(&cur_trans->dirty_bgs_lock);
4532 }
4533
4534 list_del_init(&cache->dirty_list);
4535 spin_lock(&cache->lock);
4536 cache->disk_cache_state = BTRFS_DC_ERROR;
4537 spin_unlock(&cache->lock);
4538
4539 spin_unlock(&cur_trans->dirty_bgs_lock);
4540 btrfs_put_block_group(cache);
4541 spin_lock(&cur_trans->dirty_bgs_lock);
4542 }
4543 spin_unlock(&cur_trans->dirty_bgs_lock);
4544
4545 while (!list_empty(&cur_trans->io_bgs)) {
4546 cache = list_first_entry(&cur_trans->io_bgs,
4547 struct btrfs_block_group_cache,
4548 io_list);
4549 if (!cache) {
4550 btrfs_err(root->fs_info,
4551 "orphan block group on io_bgs list");
4552 return;
4553 }
4554
4555 list_del_init(&cache->io_list);
4556 spin_lock(&cache->lock);
4557 cache->disk_cache_state = BTRFS_DC_ERROR;
4558 spin_unlock(&cache->lock);
4559 btrfs_cleanup_bg_io(cache);
4560 }
4561}
4562
4478void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, 4563void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4479 struct btrfs_root *root) 4564 struct btrfs_root *root)
4480{ 4565{
4566 btrfs_cleanup_dirty_bgs(cur_trans, root);
4567 ASSERT(list_empty(&cur_trans->dirty_bgs));
4568 ASSERT(list_empty(&cur_trans->io_bgs));
4569
4481 btrfs_destroy_delayed_refs(cur_trans, root); 4570 btrfs_destroy_delayed_refs(cur_trans, root);
4482 4571
4483 cur_trans->state = TRANS_STATE_COMMIT_START; 4572 cur_trans->state = TRANS_STATE_COMMIT_START;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index f19a982f5a4f..1a3237e5700f 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -136,6 +136,8 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
136 struct btrfs_fs_info *fs_info); 136 struct btrfs_fs_info *fs_info);
137int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 137int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
138 struct btrfs_root *root); 138 struct btrfs_root *root);
139void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
140 struct btrfs_root *root);
139void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans, 141void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
140 struct btrfs_root *root); 142 struct btrfs_root *root);
141struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, 143struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 665da8f66ff1..210c94ac8818 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -87,7 +87,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
87 int force); 87 int force);
88static int find_next_key(struct btrfs_path *path, int level, 88static int find_next_key(struct btrfs_path *path, int level,
89 struct btrfs_key *key); 89 struct btrfs_key *key);
90static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 90static void dump_space_info(struct btrfs_fs_info *fs_info,
91 struct btrfs_space_info *info, u64 bytes,
91 int dump_block_groups); 92 int dump_block_groups);
92static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, 93static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
93 u64 ram_bytes, u64 num_bytes, int delalloc); 94 u64 ram_bytes, u64 num_bytes, int delalloc);
@@ -266,9 +267,8 @@ static int exclude_super_stripes(struct btrfs_root *root,
266 267
267 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 268 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
268 bytenr = btrfs_sb_offset(i); 269 bytenr = btrfs_sb_offset(i);
269 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 270 ret = btrfs_rmap_block(root->fs_info, cache->key.objectid,
270 cache->key.objectid, bytenr, 271 bytenr, 0, &logical, &nr, &stripe_len);
271 0, &logical, &nr, &stripe_len);
272 if (ret) 272 if (ret)
273 return ret; 273 return ret;
274 274
@@ -730,11 +730,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
730static struct btrfs_block_group_cache * 730static struct btrfs_block_group_cache *
731btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) 731btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
732{ 732{
733 struct btrfs_block_group_cache *cache; 733 return block_group_cache_tree_search(info, bytenr, 0);
734
735 cache = block_group_cache_tree_search(info, bytenr, 0);
736
737 return cache;
738} 734}
739 735
740/* 736/*
@@ -744,11 +740,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
744 struct btrfs_fs_info *info, 740 struct btrfs_fs_info *info,
745 u64 bytenr) 741 u64 bytenr)
746{ 742{
747 struct btrfs_block_group_cache *cache; 743 return block_group_cache_tree_search(info, bytenr, 1);
748
749 cache = block_group_cache_tree_search(info, bytenr, 1);
750
751 return cache;
752} 744}
753 745
754static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, 746static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
@@ -2360,7 +2352,13 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2360 ins.type = BTRFS_EXTENT_ITEM_KEY; 2352 ins.type = BTRFS_EXTENT_ITEM_KEY;
2361 } 2353 }
2362 2354
2363 BUG_ON(node->ref_mod != 1); 2355 if (node->ref_mod != 1) {
2356 btrfs_err(root->fs_info,
2357 "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2358 node->bytenr, node->ref_mod, node->action, ref_root,
2359 parent);
2360 return -EIO;
2361 }
2364 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 2362 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2365 BUG_ON(!extent_op || !extent_op->update_flags); 2363 BUG_ON(!extent_op || !extent_op->update_flags);
2366 ret = alloc_reserved_tree_block(trans, root, 2364 ret = alloc_reserved_tree_block(trans, root,
@@ -2590,7 +2588,9 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2590 if (must_insert_reserved) 2588 if (must_insert_reserved)
2591 locked_ref->must_insert_reserved = 1; 2589 locked_ref->must_insert_reserved = 1;
2592 locked_ref->processing = 0; 2590 locked_ref->processing = 0;
2593 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); 2591 btrfs_debug(fs_info,
2592 "run_delayed_extent_op returned %d",
2593 ret);
2594 btrfs_delayed_ref_unlock(locked_ref); 2594 btrfs_delayed_ref_unlock(locked_ref);
2595 return ret; 2595 return ret;
2596 } 2596 }
@@ -2650,7 +2650,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2650 locked_ref->processing = 0; 2650 locked_ref->processing = 0;
2651 btrfs_delayed_ref_unlock(locked_ref); 2651 btrfs_delayed_ref_unlock(locked_ref);
2652 btrfs_put_delayed_ref(ref); 2652 btrfs_put_delayed_ref(ref);
2653 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret); 2653 btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2654 ret);
2654 return ret; 2655 return ret;
2655 } 2656 }
2656 2657
@@ -2940,7 +2941,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2940 if (trans->aborted) 2941 if (trans->aborted)
2941 return 0; 2942 return 0;
2942 2943
2943 if (root->fs_info->creating_free_space_tree) 2944 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
2944 return 0; 2945 return 0;
2945 2946
2946 if (root == root->fs_info->extent_root) 2947 if (root == root->fs_info->extent_root)
@@ -2971,7 +2972,6 @@ again:
2971 spin_unlock(&delayed_refs->lock); 2972 spin_unlock(&delayed_refs->lock);
2972 goto out; 2973 goto out;
2973 } 2974 }
2974 count = (unsigned long)-1;
2975 2975
2976 while (node) { 2976 while (node) {
2977 head = rb_entry(node, struct btrfs_delayed_ref_head, 2977 head = rb_entry(node, struct btrfs_delayed_ref_head,
@@ -3694,6 +3694,8 @@ again:
3694 goto again; 3694 goto again;
3695 } 3695 }
3696 spin_unlock(&cur_trans->dirty_bgs_lock); 3696 spin_unlock(&cur_trans->dirty_bgs_lock);
3697 } else if (ret < 0) {
3698 btrfs_cleanup_dirty_bgs(cur_trans, root);
3697 } 3699 }
3698 3700
3699 btrfs_free_path(path); 3701 btrfs_free_path(path);
@@ -4429,7 +4431,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
4429 if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) { 4431 if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
4430 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu", 4432 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4431 left, thresh, type); 4433 left, thresh, type);
4432 dump_space_info(info, 0, 0); 4434 dump_space_info(root->fs_info, info, 0, 0);
4433 } 4435 }
4434 4436
4435 if (left < thresh) { 4437 if (left < thresh) {
@@ -5186,7 +5188,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
5186 * which means we won't have fs_info->fs_root set, so don't do 5188 * which means we won't have fs_info->fs_root set, so don't do
5187 * the async reclaim as we will panic. 5189 * the async reclaim as we will panic.
5188 */ 5190 */
5189 if (!root->fs_info->log_root_recovering && 5191 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) &&
5190 need_do_async_reclaim(space_info, root, used) && 5192 need_do_async_reclaim(space_info, root, used) &&
5191 !work_busy(&root->fs_info->async_reclaim_work)) { 5193 !work_busy(&root->fs_info->async_reclaim_work)) {
5192 trace_btrfs_trigger_flush(root->fs_info, 5194 trace_btrfs_trigger_flush(root->fs_info,
@@ -5792,7 +5794,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5792 int ret; 5794 int ret;
5793 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 5795 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5794 5796
5795 if (root->fs_info->quota_enabled) { 5797 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
5796 /* One for parent inode, two for dir entries */ 5798 /* One for parent inode, two for dir entries */
5797 num_bytes = 3 * root->nodesize; 5799 num_bytes = 3 * root->nodesize;
5798 ret = btrfs_qgroup_reserve_meta(root, num_bytes); 5800 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
@@ -5970,7 +5972,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5970 csum_bytes = BTRFS_I(inode)->csum_bytes; 5972 csum_bytes = BTRFS_I(inode)->csum_bytes;
5971 spin_unlock(&BTRFS_I(inode)->lock); 5973 spin_unlock(&BTRFS_I(inode)->lock);
5972 5974
5973 if (root->fs_info->quota_enabled) { 5975 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
5974 ret = btrfs_qgroup_reserve_meta(root, 5976 ret = btrfs_qgroup_reserve_meta(root,
5975 nr_extents * root->nodesize); 5977 nr_extents * root->nodesize);
5976 if (ret) 5978 if (ret)
@@ -6110,8 +6112,6 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
6110 * @start: start range we are writing to 6112 * @start: start range we are writing to
6111 * @len: how long the range we are writing to 6113 * @len: how long the range we are writing to
6112 * 6114 *
6113 * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
6114 *
6115 * This will do the following things 6115 * This will do the following things
6116 * 6116 *
6117 * o reserve space in data space info for num bytes 6117 * o reserve space in data space info for num bytes
@@ -6930,8 +6930,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6930 } 6930 }
6931 6931
6932 if (ret) { 6932 if (ret) {
6933 btrfs_err(info, "umm, got %d back from search, was looking for %llu", 6933 btrfs_err(info,
6934 ret, bytenr); 6934 "umm, got %d back from search, was looking for %llu",
6935 ret, bytenr);
6935 if (ret > 0) 6936 if (ret > 0)
6936 btrfs_print_leaf(extent_root, 6937 btrfs_print_leaf(extent_root,
6937 path->nodes[0]); 6938 path->nodes[0]);
@@ -6977,7 +6978,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6977 ret = btrfs_search_slot(trans, extent_root, &key, path, 6978 ret = btrfs_search_slot(trans, extent_root, &key, path,
6978 -1, 1); 6979 -1, 1);
6979 if (ret) { 6980 if (ret) {
6980 btrfs_err(info, "umm, got %d back from search, was looking for %llu", 6981 btrfs_err(info,
6982 "umm, got %d back from search, was looking for %llu",
6981 ret, bytenr); 6983 ret, bytenr);
6982 btrfs_print_leaf(extent_root, path->nodes[0]); 6984 btrfs_print_leaf(extent_root, path->nodes[0]);
6983 } 6985 }
@@ -7004,8 +7006,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
7004 7006
7005 refs = btrfs_extent_refs(leaf, ei); 7007 refs = btrfs_extent_refs(leaf, ei);
7006 if (refs < refs_to_drop) { 7008 if (refs < refs_to_drop) {
7007 btrfs_err(info, "trying to drop %d refs but we only have %Lu " 7009 btrfs_err(info,
7008 "for bytenr %Lu", refs_to_drop, refs, bytenr); 7010 "trying to drop %d refs but we only have %Lu for bytenr %Lu",
7011 refs_to_drop, refs, bytenr);
7009 ret = -EINVAL; 7012 ret = -EINVAL;
7010 btrfs_abort_transaction(trans, ret); 7013 btrfs_abort_transaction(trans, ret);
7011 goto out; 7014 goto out;
@@ -7901,23 +7904,24 @@ out:
7901 return ret; 7904 return ret;
7902} 7905}
7903 7906
7904static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 7907static void dump_space_info(struct btrfs_fs_info *fs_info,
7908 struct btrfs_space_info *info, u64 bytes,
7905 int dump_block_groups) 7909 int dump_block_groups)
7906{ 7910{
7907 struct btrfs_block_group_cache *cache; 7911 struct btrfs_block_group_cache *cache;
7908 int index = 0; 7912 int index = 0;
7909 7913
7910 spin_lock(&info->lock); 7914 spin_lock(&info->lock);
7911 printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n", 7915 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
7912 info->flags, 7916 info->flags,
7913 info->total_bytes - info->bytes_used - info->bytes_pinned - 7917 info->total_bytes - info->bytes_used - info->bytes_pinned -
7914 info->bytes_reserved - info->bytes_readonly - 7918 info->bytes_reserved - info->bytes_readonly -
7915 info->bytes_may_use, (info->full) ? "" : "not "); 7919 info->bytes_may_use, (info->full) ? "" : "not ");
7916 printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, " 7920 btrfs_info(fs_info,
7917 "reserved=%llu, may_use=%llu, readonly=%llu\n", 7921 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
7918 info->total_bytes, info->bytes_used, info->bytes_pinned, 7922 info->total_bytes, info->bytes_used, info->bytes_pinned,
7919 info->bytes_reserved, info->bytes_may_use, 7923 info->bytes_reserved, info->bytes_may_use,
7920 info->bytes_readonly); 7924 info->bytes_readonly);
7921 spin_unlock(&info->lock); 7925 spin_unlock(&info->lock);
7922 7926
7923 if (!dump_block_groups) 7927 if (!dump_block_groups)
@@ -7927,12 +7931,11 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7927again: 7931again:
7928 list_for_each_entry(cache, &info->block_groups[index], list) { 7932 list_for_each_entry(cache, &info->block_groups[index], list) {
7929 spin_lock(&cache->lock); 7933 spin_lock(&cache->lock);
7930 printk(KERN_INFO "BTRFS: " 7934 btrfs_info(fs_info,
7931 "block group %llu has %llu bytes, " 7935 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
7932 "%llu used %llu pinned %llu reserved %s\n", 7936 cache->key.objectid, cache->key.offset,
7933 cache->key.objectid, cache->key.offset, 7937 btrfs_block_group_used(&cache->item), cache->pinned,
7934 btrfs_block_group_used(&cache->item), cache->pinned, 7938 cache->reserved, cache->ro ? "[readonly]" : "");
7935 cache->reserved, cache->ro ? "[readonly]" : "");
7936 btrfs_dump_free_space(cache, bytes); 7939 btrfs_dump_free_space(cache, bytes);
7937 spin_unlock(&cache->lock); 7940 spin_unlock(&cache->lock);
7938 } 7941 }
@@ -7946,6 +7949,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
7946 u64 empty_size, u64 hint_byte, 7949 u64 empty_size, u64 hint_byte,
7947 struct btrfs_key *ins, int is_data, int delalloc) 7950 struct btrfs_key *ins, int is_data, int delalloc)
7948{ 7951{
7952 struct btrfs_fs_info *fs_info = root->fs_info;
7949 bool final_tried = num_bytes == min_alloc_size; 7953 bool final_tried = num_bytes == min_alloc_size;
7950 u64 flags; 7954 u64 flags;
7951 int ret; 7955 int ret;
@@ -7956,8 +7960,7 @@ again:
7956 ret = find_free_extent(root, ram_bytes, num_bytes, empty_size, 7960 ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
7957 hint_byte, ins, flags, delalloc); 7961 hint_byte, ins, flags, delalloc);
7958 if (!ret && !is_data) { 7962 if (!ret && !is_data) {
7959 btrfs_dec_block_group_reservations(root->fs_info, 7963 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
7960 ins->objectid);
7961 } else if (ret == -ENOSPC) { 7964 } else if (ret == -ENOSPC) {
7962 if (!final_tried && ins->offset) { 7965 if (!final_tried && ins->offset) {
7963 num_bytes = min(num_bytes >> 1, ins->offset); 7966 num_bytes = min(num_bytes >> 1, ins->offset);
@@ -7967,14 +7970,15 @@ again:
7967 if (num_bytes == min_alloc_size) 7970 if (num_bytes == min_alloc_size)
7968 final_tried = true; 7971 final_tried = true;
7969 goto again; 7972 goto again;
7970 } else if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) { 7973 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
7971 struct btrfs_space_info *sinfo; 7974 struct btrfs_space_info *sinfo;
7972 7975
7973 sinfo = __find_space_info(root->fs_info, flags); 7976 sinfo = __find_space_info(fs_info, flags);
7974 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu", 7977 btrfs_err(root->fs_info,
7975 flags, num_bytes); 7978 "allocation failed flags %llu, wanted %llu",
7979 flags, num_bytes);
7976 if (sinfo) 7980 if (sinfo)
7977 dump_space_info(sinfo, num_bytes, 1); 7981 dump_space_info(fs_info, sinfo, num_bytes, 1);
7978 } 7982 }
7979 } 7983 }
7980 7984
@@ -8462,7 +8466,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8462 u64 refs; 8466 u64 refs;
8463 u64 flags; 8467 u64 flags;
8464 u32 nritems; 8468 u32 nritems;
8465 u32 blocksize;
8466 struct btrfs_key key; 8469 struct btrfs_key key;
8467 struct extent_buffer *eb; 8470 struct extent_buffer *eb;
8468 int ret; 8471 int ret;
@@ -8480,7 +8483,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8480 8483
8481 eb = path->nodes[wc->level]; 8484 eb = path->nodes[wc->level];
8482 nritems = btrfs_header_nritems(eb); 8485 nritems = btrfs_header_nritems(eb);
8483 blocksize = root->nodesize;
8484 8486
8485 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 8487 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8486 if (nread >= wc->reada_count) 8488 if (nread >= wc->reada_count)
@@ -8544,7 +8546,7 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
8544 u64 bytenr, num_bytes; 8546 u64 bytenr, num_bytes;
8545 8547
8546 /* We can be called directly from walk_up_proc() */ 8548 /* We can be called directly from walk_up_proc() */
8547 if (!root->fs_info->quota_enabled) 8549 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
8548 return 0; 8550 return 0;
8549 8551
8550 for (i = 0; i < nr; i++) { 8552 for (i = 0; i < nr; i++) {
@@ -8653,7 +8655,7 @@ static int account_shared_subtree(struct btrfs_trans_handle *trans,
8653 BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL); 8655 BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8654 BUG_ON(root_eb == NULL); 8656 BUG_ON(root_eb == NULL);
8655 8657
8656 if (!root->fs_info->quota_enabled) 8658 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
8657 return 0; 8659 return 0;
8658 8660
8659 if (!extent_buffer_uptodate(root_eb)) { 8661 if (!extent_buffer_uptodate(root_eb)) {
@@ -8884,14 +8886,13 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8884 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1, 8886 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8885 &wc->refs[level - 1], 8887 &wc->refs[level - 1],
8886 &wc->flags[level - 1]); 8888 &wc->flags[level - 1]);
8887 if (ret < 0) { 8889 if (ret < 0)
8888 btrfs_tree_unlock(next); 8890 goto out_unlock;
8889 return ret;
8890 }
8891 8891
8892 if (unlikely(wc->refs[level - 1] == 0)) { 8892 if (unlikely(wc->refs[level - 1] == 0)) {
8893 btrfs_err(root->fs_info, "Missing references."); 8893 btrfs_err(root->fs_info, "Missing references.");
8894 BUG(); 8894 ret = -EIO;
8895 goto out_unlock;
8895 } 8896 }
8896 *lookup_info = 0; 8897 *lookup_info = 0;
8897 8898
@@ -8943,7 +8944,12 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8943 } 8944 }
8944 8945
8945 level--; 8946 level--;
8946 BUG_ON(level != btrfs_header_level(next)); 8947 ASSERT(level == btrfs_header_level(next));
8948 if (level != btrfs_header_level(next)) {
8949 btrfs_err(root->fs_info, "mismatched level");
8950 ret = -EIO;
8951 goto out_unlock;
8952 }
8947 path->nodes[level] = next; 8953 path->nodes[level] = next;
8948 path->slots[level] = 0; 8954 path->slots[level] = 0;
8949 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 8955 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
@@ -8958,8 +8964,15 @@ skip:
8958 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 8964 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8959 parent = path->nodes[level]->start; 8965 parent = path->nodes[level]->start;
8960 } else { 8966 } else {
8961 BUG_ON(root->root_key.objectid != 8967 ASSERT(root->root_key.objectid ==
8962 btrfs_header_owner(path->nodes[level])); 8968 btrfs_header_owner(path->nodes[level]));
8969 if (root->root_key.objectid !=
8970 btrfs_header_owner(path->nodes[level])) {
8971 btrfs_err(root->fs_info,
8972 "mismatched block owner");
8973 ret = -EIO;
8974 goto out_unlock;
8975 }
8963 parent = 0; 8976 parent = 0;
8964 } 8977 }
8965 8978
@@ -8968,20 +8981,24 @@ skip:
8968 generation, level - 1); 8981 generation, level - 1);
8969 if (ret) { 8982 if (ret) {
8970 btrfs_err_rl(root->fs_info, 8983 btrfs_err_rl(root->fs_info,
8971 "Error " 8984 "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
8972 "%d accounting shared subtree. Quota " 8985 ret);
8973 "is out of sync, rescan required.",
8974 ret);
8975 } 8986 }
8976 } 8987 }
8977 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent, 8988 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8978 root->root_key.objectid, level - 1, 0); 8989 root->root_key.objectid, level - 1, 0);
8979 BUG_ON(ret); /* -ENOMEM */ 8990 if (ret)
8991 goto out_unlock;
8980 } 8992 }
8993
8994 *lookup_info = 1;
8995 ret = 1;
8996
8997out_unlock:
8981 btrfs_tree_unlock(next); 8998 btrfs_tree_unlock(next);
8982 free_extent_buffer(next); 8999 free_extent_buffer(next);
8983 *lookup_info = 1; 9000
8984 return 1; 9001 return ret;
8985} 9002}
8986 9003
8987/* 9004/*
@@ -9061,10 +9078,8 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
9061 ret = account_leaf_items(trans, root, eb); 9078 ret = account_leaf_items(trans, root, eb);
9062 if (ret) { 9079 if (ret) {
9063 btrfs_err_rl(root->fs_info, 9080 btrfs_err_rl(root->fs_info,
9064 "error " 9081 "error %d accounting leaf items. Quota is out of sync, rescan required.",
9065 "%d accounting leaf items. Quota " 9082 ret);
9066 "is out of sync, rescan required.",
9067 ret);
9068 } 9083 }
9069 } 9084 }
9070 /* make block locked assertion in clean_tree_block happy */ 9085 /* make block locked assertion in clean_tree_block happy */
@@ -9180,9 +9195,10 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
9180 struct btrfs_block_rsv *block_rsv, int update_ref, 9195 struct btrfs_block_rsv *block_rsv, int update_ref,
9181 int for_reloc) 9196 int for_reloc)
9182{ 9197{
9198 struct btrfs_fs_info *fs_info = root->fs_info;
9183 struct btrfs_path *path; 9199 struct btrfs_path *path;
9184 struct btrfs_trans_handle *trans; 9200 struct btrfs_trans_handle *trans;
9185 struct btrfs_root *tree_root = root->fs_info->tree_root; 9201 struct btrfs_root *tree_root = fs_info->tree_root;
9186 struct btrfs_root_item *root_item = &root->root_item; 9202 struct btrfs_root_item *root_item = &root->root_item;
9187 struct walk_control *wc; 9203 struct walk_control *wc;
9188 struct btrfs_key key; 9204 struct btrfs_key key;
@@ -9191,7 +9207,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
9191 int level; 9207 int level;
9192 bool root_dropped = false; 9208 bool root_dropped = false;
9193 9209
9194 btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid); 9210 btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
9195 9211
9196 path = btrfs_alloc_path(); 9212 path = btrfs_alloc_path();
9197 if (!path) { 9213 if (!path) {
@@ -9320,7 +9336,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
9320 9336
9321 btrfs_end_transaction_throttle(trans, tree_root); 9337 btrfs_end_transaction_throttle(trans, tree_root);
9322 if (!for_reloc && btrfs_need_cleaner_sleep(root)) { 9338 if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
9323 pr_debug("BTRFS: drop snapshot early exit\n"); 9339 btrfs_debug(fs_info,
9340 "drop snapshot early exit");
9324 err = -EAGAIN; 9341 err = -EAGAIN;
9325 goto out_free; 9342 goto out_free;
9326 } 9343 }
@@ -9386,7 +9403,7 @@ out:
9386 if (!for_reloc && root_dropped == false) 9403 if (!for_reloc && root_dropped == false)
9387 btrfs_add_dead_root(root); 9404 btrfs_add_dead_root(root);
9388 if (err && err != -EAGAIN) 9405 if (err && err != -EAGAIN)
9389 btrfs_handle_fs_error(root->fs_info, err, NULL); 9406 btrfs_handle_fs_error(fs_info, err, NULL);
9390 return err; 9407 return err;
9391} 9408}
9392 9409
@@ -10020,7 +10037,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
10020 if (WARN_ON(space_info->bytes_pinned > 0 || 10037 if (WARN_ON(space_info->bytes_pinned > 0 ||
10021 space_info->bytes_reserved > 0 || 10038 space_info->bytes_reserved > 0 ||
10022 space_info->bytes_may_use > 0)) 10039 space_info->bytes_may_use > 0))
10023 dump_space_info(space_info, 0, 0); 10040 dump_space_info(info, space_info, 0, 0);
10024 list_del(&space_info->list); 10041 list_del(&space_info->list);
10025 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 10042 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
10026 struct kobject *kobj; 10043 struct kobject *kobj;
@@ -10069,7 +10086,8 @@ static void __link_block_group(struct btrfs_space_info *space_info,
10069 10086
10070 return; 10087 return;
10071out_err: 10088out_err:
10072 pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n"); 10089 btrfs_warn(cache->fs_info,
10090 "failed to add kobject for block cache, ignoring");
10073} 10091}
10074 10092
10075static struct btrfs_block_group_cache * 10093static struct btrfs_block_group_cache *
@@ -10127,6 +10145,11 @@ int btrfs_read_block_groups(struct btrfs_root *root)
10127 struct extent_buffer *leaf; 10145 struct extent_buffer *leaf;
10128 int need_clear = 0; 10146 int need_clear = 0;
10129 u64 cache_gen; 10147 u64 cache_gen;
10148 u64 feature;
10149 int mixed;
10150
10151 feature = btrfs_super_incompat_flags(info->super_copy);
10152 mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
10130 10153
10131 root = info->extent_root; 10154 root = info->extent_root;
10132 key.objectid = 0; 10155 key.objectid = 0;
@@ -10180,6 +10203,15 @@ int btrfs_read_block_groups(struct btrfs_root *root)
10180 btrfs_item_ptr_offset(leaf, path->slots[0]), 10203 btrfs_item_ptr_offset(leaf, path->slots[0]),
10181 sizeof(cache->item)); 10204 sizeof(cache->item));
10182 cache->flags = btrfs_block_group_flags(&cache->item); 10205 cache->flags = btrfs_block_group_flags(&cache->item);
10206 if (!mixed &&
10207 ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
10208 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
10209 btrfs_err(info,
10210"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
10211 cache->key.objectid);
10212 ret = -EINVAL;
10213 goto error;
10214 }
10183 10215
10184 key.objectid = found_key.objectid + found_key.offset; 10216 key.objectid = found_key.objectid + found_key.offset;
10185 btrfs_release_path(path); 10217 btrfs_release_path(path);
@@ -10789,7 +10821,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10789 struct btrfs_trans_handle *trans; 10821 struct btrfs_trans_handle *trans;
10790 int ret = 0; 10822 int ret = 0;
10791 10823
10792 if (!fs_info->open) 10824 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
10793 return; 10825 return;
10794 10826
10795 spin_lock(&fs_info->unused_bgs_lock); 10827 spin_lock(&fs_info->unused_bgs_lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 44fe66b53c8b..ee40384c394d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -20,6 +20,7 @@
20#include "locking.h" 20#include "locking.h"
21#include "rcu-string.h" 21#include "rcu-string.h"
22#include "backref.h" 22#include "backref.h"
23#include "transaction.h"
23 24
24static struct kmem_cache *extent_state_cache; 25static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache; 26static struct kmem_cache *extent_buffer_cache;
@@ -74,8 +75,7 @@ void btrfs_leak_debug_check(void)
74 75
75 while (!list_empty(&buffers)) { 76 while (!list_empty(&buffers)) {
76 eb = list_entry(buffers.next, struct extent_buffer, leak_list); 77 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
77 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu " 78 pr_err("BTRFS: buffer leak start %llu len %lu refs %d\n",
78 "refs %d\n",
79 eb->start, eb->len, atomic_read(&eb->refs)); 79 eb->start, eb->len, atomic_read(&eb->refs));
80 list_del(&eb->leak_list); 80 list_del(&eb->leak_list);
81 kmem_cache_free(extent_buffer_cache, eb); 81 kmem_cache_free(extent_buffer_cache, eb);
@@ -460,8 +460,7 @@ static int insert_state(struct extent_io_tree *tree,
460 if (node) { 460 if (node) {
461 struct extent_state *found; 461 struct extent_state *found;
462 found = rb_entry(node, struct extent_state, rb_node); 462 found = rb_entry(node, struct extent_state, rb_node);
463 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of " 463 pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
464 "%llu %llu\n",
465 found->start, found->end, start, end); 464 found->start, found->end, start, end);
466 return -EEXIST; 465 return -EEXIST;
467 } 466 }
@@ -572,9 +571,8 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
572 571
573static void extent_io_tree_panic(struct extent_io_tree *tree, int err) 572static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
574{ 573{
575 btrfs_panic(tree_fs_info(tree), err, "Locking error: " 574 btrfs_panic(tree_fs_info(tree), err,
576 "Extent tree was modified by another " 575 "Locking error: Extent tree was modified by another thread while locked.");
577 "thread while locked.");
578} 576}
579 577
580/* 578/*
@@ -1729,7 +1727,7 @@ out_failed:
1729} 1727}
1730 1728
1731void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, 1729void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1732 struct page *locked_page, 1730 u64 delalloc_end, struct page *locked_page,
1733 unsigned clear_bits, 1731 unsigned clear_bits,
1734 unsigned long page_ops) 1732 unsigned long page_ops)
1735{ 1733{
@@ -2122,8 +2120,9 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page,
2122 2120
2123 if (failrec->in_validation) { 2121 if (failrec->in_validation) {
2124 /* there was no real error, just free the record */ 2122 /* there was no real error, just free the record */
2125 pr_debug("clean_io_failure: freeing dummy error at %llu\n", 2123 btrfs_debug(fs_info,
2126 failrec->start); 2124 "clean_io_failure: freeing dummy error at %llu",
2125 failrec->start);
2127 goto out; 2126 goto out;
2128 } 2127 }
2129 if (fs_info->sb->s_flags & MS_RDONLY) 2128 if (fs_info->sb->s_flags & MS_RDONLY)
@@ -2189,6 +2188,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
2189int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, 2188int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2190 struct io_failure_record **failrec_ret) 2189 struct io_failure_record **failrec_ret)
2191{ 2190{
2191 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2192 struct io_failure_record *failrec; 2192 struct io_failure_record *failrec;
2193 struct extent_map *em; 2193 struct extent_map *em;
2194 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 2194 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
@@ -2236,8 +2236,9 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2236 em->compress_type); 2236 em->compress_type);
2237 } 2237 }
2238 2238
2239 pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n", 2239 btrfs_debug(fs_info,
2240 logical, start, failrec->len); 2240 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2241 logical, start, failrec->len);
2241 2242
2242 failrec->logical = logical; 2243 failrec->logical = logical;
2243 free_extent_map(em); 2244 free_extent_map(em);
@@ -2255,9 +2256,10 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2255 return ret; 2256 return ret;
2256 } 2257 }
2257 } else { 2258 } else {
2258 pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n", 2259 btrfs_debug(fs_info,
2259 failrec->logical, failrec->start, failrec->len, 2260 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2260 failrec->in_validation); 2261 failrec->logical, failrec->start, failrec->len,
2262 failrec->in_validation);
2261 /* 2263 /*
2262 * when data can be on disk more than twice, add to failrec here 2264 * when data can be on disk more than twice, add to failrec here
2263 * (e.g. with a list for failed_mirror) to make 2265 * (e.g. with a list for failed_mirror) to make
@@ -2273,18 +2275,19 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2273int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, 2275int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2274 struct io_failure_record *failrec, int failed_mirror) 2276 struct io_failure_record *failrec, int failed_mirror)
2275{ 2277{
2278 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2276 int num_copies; 2279 int num_copies;
2277 2280
2278 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info, 2281 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2279 failrec->logical, failrec->len);
2280 if (num_copies == 1) { 2282 if (num_copies == 1) {
2281 /* 2283 /*
2282 * we only have a single copy of the data, so don't bother with 2284 * we only have a single copy of the data, so don't bother with
2283 * all the retry and error correction code that follows. no 2285 * all the retry and error correction code that follows. no
2284 * matter what the error is, it is very likely to persist. 2286 * matter what the error is, it is very likely to persist.
2285 */ 2287 */
2286 pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n", 2288 btrfs_debug(fs_info,
2287 num_copies, failrec->this_mirror, failed_mirror); 2289 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2290 num_copies, failrec->this_mirror, failed_mirror);
2288 return 0; 2291 return 0;
2289 } 2292 }
2290 2293
@@ -2323,8 +2326,9 @@ int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2323 } 2326 }
2324 2327
2325 if (failrec->this_mirror > num_copies) { 2328 if (failrec->this_mirror > num_copies) {
2326 pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n", 2329 btrfs_debug(fs_info,
2327 num_copies, failrec->this_mirror, failed_mirror); 2330 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2331 num_copies, failrec->this_mirror, failed_mirror);
2328 return 0; 2332 return 0;
2329 } 2333 }
2330 2334
@@ -2415,8 +2419,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2415 } 2419 }
2416 bio_set_op_attrs(bio, REQ_OP_READ, read_mode); 2420 bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
2417 2421
2418 pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n", 2422 btrfs_debug(btrfs_sb(inode->i_sb),
2419 read_mode, failrec->this_mirror, failrec->in_validation); 2423 "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
2424 read_mode, failrec->this_mirror, failrec->in_validation);
2420 2425
2421 ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, 2426 ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
2422 failrec->bio_flags, 0); 2427 failrec->bio_flags, 0);
@@ -2484,8 +2489,7 @@ static void end_bio_extent_writepage(struct bio *bio)
2484 bvec->bv_offset, bvec->bv_len); 2489 bvec->bv_offset, bvec->bv_len);
2485 else 2490 else
2486 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, 2491 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2487 "incomplete page write in btrfs with offset %u and " 2492 "incomplete page write in btrfs with offset %u and length %u",
2488 "length %u",
2489 bvec->bv_offset, bvec->bv_len); 2493 bvec->bv_offset, bvec->bv_len);
2490 } 2494 }
2491 2495
@@ -2541,10 +2545,12 @@ static void end_bio_extent_readpage(struct bio *bio)
2541 bio_for_each_segment_all(bvec, bio, i) { 2545 bio_for_each_segment_all(bvec, bio, i) {
2542 struct page *page = bvec->bv_page; 2546 struct page *page = bvec->bv_page;
2543 struct inode *inode = page->mapping->host; 2547 struct inode *inode = page->mapping->host;
2548 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2544 2549
2545 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " 2550 btrfs_debug(fs_info,
2546 "mirror=%u\n", (u64)bio->bi_iter.bi_sector, 2551 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
2547 bio->bi_error, io_bio->mirror_num); 2552 (u64)bio->bi_iter.bi_sector, bio->bi_error,
2553 io_bio->mirror_num);
2548 tree = &BTRFS_I(inode)->io_tree; 2554 tree = &BTRFS_I(inode)->io_tree;
2549 2555
2550 /* We always issue full-page reads, but if some block 2556 /* We always issue full-page reads, but if some block
@@ -2554,13 +2560,12 @@ static void end_bio_extent_readpage(struct bio *bio)
2554 * if they don't add up to a full page. */ 2560 * if they don't add up to a full page. */
2555 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { 2561 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2556 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) 2562 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2557 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, 2563 btrfs_err(fs_info,
2558 "partial page read in btrfs with offset %u and length %u", 2564 "partial page read in btrfs with offset %u and length %u",
2559 bvec->bv_offset, bvec->bv_len); 2565 bvec->bv_offset, bvec->bv_len);
2560 else 2566 else
2561 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, 2567 btrfs_info(fs_info,
2562 "incomplete page read in btrfs with offset %u and " 2568 "incomplete page read in btrfs with offset %u and length %u",
2563 "length %u",
2564 bvec->bv_offset, bvec->bv_len); 2569 bvec->bv_offset, bvec->bv_len);
2565 } 2570 }
2566 2571
@@ -3624,7 +3629,6 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
3624static void set_btree_ioerr(struct page *page) 3629static void set_btree_ioerr(struct page *page)
3625{ 3630{
3626 struct extent_buffer *eb = (struct extent_buffer *)page->private; 3631 struct extent_buffer *eb = (struct extent_buffer *)page->private;
3627 struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
3628 3632
3629 SetPageError(page); 3633 SetPageError(page);
3630 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) 3634 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
@@ -3670,13 +3674,13 @@ static void set_btree_ioerr(struct page *page)
3670 */ 3674 */
3671 switch (eb->log_index) { 3675 switch (eb->log_index) {
3672 case -1: 3676 case -1:
3673 set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags); 3677 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
3674 break; 3678 break;
3675 case 0: 3679 case 0:
3676 set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags); 3680 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
3677 break; 3681 break;
3678 case 1: 3682 case 1:
3679 set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags); 3683 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
3680 break; 3684 break;
3681 default: 3685 default:
3682 BUG(); /* unexpected, logic error */ 3686 BUG(); /* unexpected, logic error */
@@ -3721,8 +3725,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3721 struct block_device *bdev = fs_info->fs_devices->latest_bdev; 3725 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3722 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; 3726 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3723 u64 offset = eb->start; 3727 u64 offset = eb->start;
3728 u32 nritems;
3724 unsigned long i, num_pages; 3729 unsigned long i, num_pages;
3725 unsigned long bio_flags = 0; 3730 unsigned long bio_flags = 0;
3731 unsigned long start, end;
3726 int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META; 3732 int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
3727 int ret = 0; 3733 int ret = 0;
3728 3734
@@ -3732,6 +3738,23 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3732 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID) 3738 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3733 bio_flags = EXTENT_BIO_TREE_LOG; 3739 bio_flags = EXTENT_BIO_TREE_LOG;
3734 3740
3741 /* set btree blocks beyond nritems with 0 to avoid stale content. */
3742 nritems = btrfs_header_nritems(eb);
3743 if (btrfs_header_level(eb) > 0) {
3744 end = btrfs_node_key_ptr_offset(nritems);
3745
3746 memset_extent_buffer(eb, 0, end, eb->len - end);
3747 } else {
3748 /*
3749 * leaf:
3750 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3751 */
3752 start = btrfs_item_nr_offset(nritems);
3753 end = btrfs_leaf_data(eb) +
3754 leaf_data_end(fs_info->tree_root, eb);
3755 memset_extent_buffer(eb, 0, start, end - start);
3756 }
3757
3735 for (i = 0; i < num_pages; i++) { 3758 for (i = 0; i < num_pages; i++) {
3736 struct page *p = eb->pages[i]; 3759 struct page *p = eb->pages[i];
3737 3760
@@ -4487,21 +4510,36 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4487 flags |= (FIEMAP_EXTENT_DELALLOC | 4510 flags |= (FIEMAP_EXTENT_DELALLOC |
4488 FIEMAP_EXTENT_UNKNOWN); 4511 FIEMAP_EXTENT_UNKNOWN);
4489 } else if (fieinfo->fi_extents_max) { 4512 } else if (fieinfo->fi_extents_max) {
4513 struct btrfs_trans_handle *trans;
4514
4490 u64 bytenr = em->block_start - 4515 u64 bytenr = em->block_start -
4491 (em->start - em->orig_start); 4516 (em->start - em->orig_start);
4492 4517
4493 disko = em->block_start + offset_in_extent; 4518 disko = em->block_start + offset_in_extent;
4494 4519
4495 /* 4520 /*
4521 * We need a trans handle to get delayed refs
4522 */
4523 trans = btrfs_join_transaction(root);
4524 /*
4525 * It's OK if we can't start a trans we can still check
4526 * from commit_root
4527 */
4528 if (IS_ERR(trans))
4529 trans = NULL;
4530
4531 /*
4496 * As btrfs supports shared space, this information 4532 * As btrfs supports shared space, this information
4497 * can be exported to userspace tools via 4533 * can be exported to userspace tools via
4498 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0 4534 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4499 * then we're just getting a count and we can skip the 4535 * then we're just getting a count and we can skip the
4500 * lookup stuff. 4536 * lookup stuff.
4501 */ 4537 */
4502 ret = btrfs_check_shared(NULL, root->fs_info, 4538 ret = btrfs_check_shared(trans, root->fs_info,
4503 root->objectid, 4539 root->objectid,
4504 btrfs_ino(inode), bytenr); 4540 btrfs_ino(inode), bytenr);
4541 if (trans)
4542 btrfs_end_transaction(trans, root);
4505 if (ret < 0) 4543 if (ret < 0)
4506 goto out_free; 4544 goto out_free;
4507 if (ret) 4545 if (ret)
@@ -5173,11 +5211,10 @@ int extent_buffer_uptodate(struct extent_buffer *eb)
5173} 5211}
5174 5212
5175int read_extent_buffer_pages(struct extent_io_tree *tree, 5213int read_extent_buffer_pages(struct extent_io_tree *tree,
5176 struct extent_buffer *eb, u64 start, int wait, 5214 struct extent_buffer *eb, int wait,
5177 get_extent_t *get_extent, int mirror_num) 5215 get_extent_t *get_extent, int mirror_num)
5178{ 5216{
5179 unsigned long i; 5217 unsigned long i;
5180 unsigned long start_i;
5181 struct page *page; 5218 struct page *page;
5182 int err; 5219 int err;
5183 int ret = 0; 5220 int ret = 0;
@@ -5191,16 +5228,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
5191 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 5228 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5192 return 0; 5229 return 0;
5193 5230
5194 if (start) {
5195 WARN_ON(start < eb->start);
5196 start_i = (start >> PAGE_SHIFT) -
5197 (eb->start >> PAGE_SHIFT);
5198 } else {
5199 start_i = 0;
5200 }
5201
5202 num_pages = num_extent_pages(eb->start, eb->len); 5231 num_pages = num_extent_pages(eb->start, eb->len);
5203 for (i = start_i; i < num_pages; i++) { 5232 for (i = 0; i < num_pages; i++) {
5204 page = eb->pages[i]; 5233 page = eb->pages[i];
5205 if (wait == WAIT_NONE) { 5234 if (wait == WAIT_NONE) {
5206 if (!trylock_page(page)) 5235 if (!trylock_page(page))
@@ -5209,21 +5238,29 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
5209 lock_page(page); 5238 lock_page(page);
5210 } 5239 }
5211 locked_pages++; 5240 locked_pages++;
5241 }
5242 /*
5243 * We need to firstly lock all pages to make sure that
5244 * the uptodate bit of our pages won't be affected by
5245 * clear_extent_buffer_uptodate().
5246 */
5247 for (i = 0; i < num_pages; i++) {
5248 page = eb->pages[i];
5212 if (!PageUptodate(page)) { 5249 if (!PageUptodate(page)) {
5213 num_reads++; 5250 num_reads++;
5214 all_uptodate = 0; 5251 all_uptodate = 0;
5215 } 5252 }
5216 } 5253 }
5254
5217 if (all_uptodate) { 5255 if (all_uptodate) {
5218 if (start_i == 0) 5256 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5219 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5220 goto unlock_exit; 5257 goto unlock_exit;
5221 } 5258 }
5222 5259
5223 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); 5260 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5224 eb->read_mirror = 0; 5261 eb->read_mirror = 0;
5225 atomic_set(&eb->io_pages, num_reads); 5262 atomic_set(&eb->io_pages, num_reads);
5226 for (i = start_i; i < num_pages; i++) { 5263 for (i = 0; i < num_pages; i++) {
5227 page = eb->pages[i]; 5264 page = eb->pages[i];
5228 5265
5229 if (!PageUptodate(page)) { 5266 if (!PageUptodate(page)) {
@@ -5264,7 +5301,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
5264 if (ret || wait != WAIT_COMPLETE) 5301 if (ret || wait != WAIT_COMPLETE)
5265 return ret; 5302 return ret;
5266 5303
5267 for (i = start_i; i < num_pages; i++) { 5304 for (i = 0; i < num_pages; i++) {
5268 page = eb->pages[i]; 5305 page = eb->pages[i];
5269 wait_on_page_locked(page); 5306 wait_on_page_locked(page);
5270 if (!PageUptodate(page)) 5307 if (!PageUptodate(page))
@@ -5274,12 +5311,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
5274 return ret; 5311 return ret;
5275 5312
5276unlock_exit: 5313unlock_exit:
5277 i = start_i;
5278 while (locked_pages > 0) { 5314 while (locked_pages > 0) {
5279 page = eb->pages[i];
5280 i++;
5281 unlock_page(page);
5282 locked_pages--; 5315 locked_pages--;
5316 page = eb->pages[locked_pages];
5317 unlock_page(page);
5283 } 5318 }
5284 return ret; 5319 return ret;
5285} 5320}
@@ -5382,8 +5417,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5382 } 5417 }
5383 5418
5384 if (start + min_len > eb->len) { 5419 if (start + min_len > eb->len) {
5385 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, " 5420 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5386 "wanted %lu %lu\n",
5387 eb->start, eb->len, start, min_len); 5421 eb->start, eb->len, start, min_len);
5388 return -EINVAL; 5422 return -EINVAL;
5389 } 5423 }
@@ -5713,14 +5747,14 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5713 5747
5714 if (src_offset + len > dst->len) { 5748 if (src_offset + len > dst->len) {
5715 btrfs_err(dst->fs_info, 5749 btrfs_err(dst->fs_info,
5716 "memmove bogus src_offset %lu move " 5750 "memmove bogus src_offset %lu move len %lu dst len %lu",
5717 "len %lu dst len %lu", src_offset, len, dst->len); 5751 src_offset, len, dst->len);
5718 BUG_ON(1); 5752 BUG_ON(1);
5719 } 5753 }
5720 if (dst_offset + len > dst->len) { 5754 if (dst_offset + len > dst->len) {
5721 btrfs_err(dst->fs_info, 5755 btrfs_err(dst->fs_info,
5722 "memmove bogus dst_offset %lu move " 5756 "memmove bogus dst_offset %lu move len %lu dst len %lu",
5723 "len %lu dst len %lu", dst_offset, len, dst->len); 5757 dst_offset, len, dst->len);
5724 BUG_ON(1); 5758 BUG_ON(1);
5725 } 5759 }
5726 5760
@@ -5760,13 +5794,15 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5760 unsigned long src_i; 5794 unsigned long src_i;
5761 5795
5762 if (src_offset + len > dst->len) { 5796 if (src_offset + len > dst->len) {
5763 btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move " 5797 btrfs_err(dst->fs_info,
5764 "len %lu len %lu", src_offset, len, dst->len); 5798 "memmove bogus src_offset %lu move len %lu len %lu",
5799 src_offset, len, dst->len);
5765 BUG_ON(1); 5800 BUG_ON(1);
5766 } 5801 }
5767 if (dst_offset + len > dst->len) { 5802 if (dst_offset + len > dst->len) {
5768 btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move " 5803 btrfs_err(dst->fs_info,
5769 "len %lu len %lu", dst_offset, len, dst->len); 5804 "memmove bogus dst_offset %lu move len %lu len %lu",
5805 dst_offset, len, dst->len);
5770 BUG_ON(1); 5806 BUG_ON(1);
5771 } 5807 }
5772 if (dst_offset < src_offset) { 5808 if (dst_offset < src_offset) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 28cd88fccc7e..4a094f1dc7ef 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -359,7 +359,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
359#define WAIT_COMPLETE 1 359#define WAIT_COMPLETE 1
360#define WAIT_PAGE_LOCK 2 360#define WAIT_PAGE_LOCK 2
361int read_extent_buffer_pages(struct extent_io_tree *tree, 361int read_extent_buffer_pages(struct extent_io_tree *tree,
362 struct extent_buffer *eb, u64 start, int wait, 362 struct extent_buffer *eb, int wait,
363 get_extent_t *get_extent, int mirror_num); 363 get_extent_t *get_extent, int mirror_num);
364void wait_on_extent_buffer_writeback(struct extent_buffer *eb); 364void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
365 365
@@ -413,7 +413,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
413void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); 413void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
414void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); 414void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
415void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, 415void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
416 struct page *locked_page, 416 u64 delalloc_end, struct page *locked_page,
417 unsigned bits_to_clear, 417 unsigned bits_to_clear,
418 unsigned long page_ops); 418 unsigned long page_ops);
419struct bio * 419struct bio *
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 36f4589e349c..3a14c87d9c92 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -503,7 +503,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
503 503
504 end_of_last_block = start_pos + num_bytes - 1; 504 end_of_last_block = start_pos + num_bytes - 1;
505 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 505 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
506 cached); 506 cached, 0);
507 if (err) 507 if (err)
508 return err; 508 return err;
509 509
@@ -1110,13 +1110,25 @@ again:
1110 1110
1111 leaf = path->nodes[0]; 1111 leaf = path->nodes[0];
1112 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1112 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1113 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); 1113 if (key.objectid != ino ||
1114 key.type != BTRFS_EXTENT_DATA_KEY) {
1115 ret = -EINVAL;
1116 btrfs_abort_transaction(trans, ret);
1117 goto out;
1118 }
1114 fi = btrfs_item_ptr(leaf, path->slots[0], 1119 fi = btrfs_item_ptr(leaf, path->slots[0],
1115 struct btrfs_file_extent_item); 1120 struct btrfs_file_extent_item);
1116 BUG_ON(btrfs_file_extent_type(leaf, fi) != 1121 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1117 BTRFS_FILE_EXTENT_PREALLOC); 1122 ret = -EINVAL;
1123 btrfs_abort_transaction(trans, ret);
1124 goto out;
1125 }
1118 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1126 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1119 BUG_ON(key.offset > start || extent_end < end); 1127 if (key.offset > start || extent_end < end) {
1128 ret = -EINVAL;
1129 btrfs_abort_transaction(trans, ret);
1130 goto out;
1131 }
1120 1132
1121 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1133 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1122 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1134 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
@@ -1213,12 +1225,19 @@ again:
1213 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 1225 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1214 root->root_key.objectid, 1226 root->root_key.objectid,
1215 ino, orig_offset); 1227 ino, orig_offset);
1216 BUG_ON(ret); /* -ENOMEM */ 1228 if (ret) {
1229 btrfs_abort_transaction(trans, ret);
1230 goto out;
1231 }
1217 1232
1218 if (split == start) { 1233 if (split == start) {
1219 key.offset = start; 1234 key.offset = start;
1220 } else { 1235 } else {
1221 BUG_ON(start != key.offset); 1236 if (start != key.offset) {
1237 ret = -EINVAL;
1238 btrfs_abort_transaction(trans, ret);
1239 goto out;
1240 }
1222 path->slots[0]--; 1241 path->slots[0]--;
1223 extent_end = end; 1242 extent_end = end;
1224 } 1243 }
@@ -1240,7 +1259,10 @@ again:
1240 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1259 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1241 0, root->root_key.objectid, 1260 0, root->root_key.objectid,
1242 ino, orig_offset); 1261 ino, orig_offset);
1243 BUG_ON(ret); /* -ENOMEM */ 1262 if (ret) {
1263 btrfs_abort_transaction(trans, ret);
1264 goto out;
1265 }
1244 } 1266 }
1245 other_start = 0; 1267 other_start = 0;
1246 other_end = start; 1268 other_end = start;
@@ -1257,7 +1279,10 @@ again:
1257 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1279 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1258 0, root->root_key.objectid, 1280 0, root->root_key.objectid,
1259 ino, orig_offset); 1281 ino, orig_offset);
1260 BUG_ON(ret); /* -ENOMEM */ 1282 if (ret) {
1283 btrfs_abort_transaction(trans, ret);
1284 goto out;
1285 }
1261 } 1286 }
1262 if (del_nr == 0) { 1287 if (del_nr == 0) {
1263 fi = btrfs_item_ptr(leaf, path->slots[0], 1288 fi = btrfs_item_ptr(leaf, path->slots[0],
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d571bd2b697b..e4b48f377d3a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -716,8 +716,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
716 716
717 if (BTRFS_I(inode)->generation != generation) { 717 if (BTRFS_I(inode)->generation != generation) {
718 btrfs_err(root->fs_info, 718 btrfs_err(root->fs_info,
719 "free space inode generation (%llu) " 719 "free space inode generation (%llu) did not match free space cache generation (%llu)",
720 "did not match free space cache generation (%llu)",
721 BTRFS_I(inode)->generation, generation); 720 BTRFS_I(inode)->generation, generation);
722 return 0; 721 return 0;
723 } 722 }
@@ -879,8 +878,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
879 878
880 if (!matched) { 879 if (!matched) {
881 __btrfs_remove_free_space_cache(ctl); 880 __btrfs_remove_free_space_cache(ctl);
882 btrfs_warn(fs_info, "block group %llu has wrong amount of free space", 881 btrfs_warn(fs_info,
883 block_group->key.objectid); 882 "block group %llu has wrong amount of free space",
883 block_group->key.objectid);
884 ret = -1; 884 ret = -1;
885 } 885 }
886out: 886out:
@@ -891,8 +891,9 @@ out:
891 spin_unlock(&block_group->lock); 891 spin_unlock(&block_group->lock);
892 ret = 0; 892 ret = 0;
893 893
894 btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now", 894 btrfs_warn(fs_info,
895 block_group->key.objectid); 895 "failed to load free space cache for block group %llu, rebuilding it now",
896 block_group->key.objectid);
896 } 897 }
897 898
898 iput(inode); 899 iput(inode);
@@ -2298,7 +2299,8 @@ static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2298 } 2299 }
2299} 2300}
2300 2301
2301int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, 2302int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
2303 struct btrfs_free_space_ctl *ctl,
2302 u64 offset, u64 bytes) 2304 u64 offset, u64 bytes)
2303{ 2305{
2304 struct btrfs_free_space *info; 2306 struct btrfs_free_space *info;
@@ -2345,7 +2347,7 @@ out:
2345 spin_unlock(&ctl->tree_lock); 2347 spin_unlock(&ctl->tree_lock);
2346 2348
2347 if (ret) { 2349 if (ret) {
2348 printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret); 2350 btrfs_crit(fs_info, "unable to add free space :%d", ret);
2349 ASSERT(ret != -EEXIST); 2351 ASSERT(ret != -EEXIST);
2350 } 2352 }
2351 2353
@@ -2621,7 +2623,8 @@ out:
2621 spin_unlock(&ctl->tree_lock); 2623 spin_unlock(&ctl->tree_lock);
2622 2624
2623 if (align_gap_len) 2625 if (align_gap_len)
2624 __btrfs_add_free_space(ctl, align_gap, align_gap_len); 2626 __btrfs_add_free_space(block_group->fs_info, ctl,
2627 align_gap, align_gap_len);
2625 return ret; 2628 return ret;
2626} 2629}
2627 2630
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 3af651c2bbc7..363fdd955e5d 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -89,13 +89,15 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
89 struct inode *inode); 89 struct inode *inode);
90 90
91void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); 91void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
92int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, 92int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
93 struct btrfs_free_space_ctl *ctl,
93 u64 bytenr, u64 size); 94 u64 bytenr, u64 size);
94static inline int 95static inline int
95btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 96btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
96 u64 bytenr, u64 size) 97 u64 bytenr, u64 size)
97{ 98{
98 return __btrfs_add_free_space(block_group->free_space_ctl, 99 return __btrfs_add_free_space(block_group->fs_info,
100 block_group->free_space_ctl,
99 bytenr, size); 101 bytenr, size);
100} 102}
101int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 103int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 87e7e3d3e676..e4a42a8e4f84 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -107,7 +107,7 @@ search_free_space_info(struct btrfs_trans_handle *trans,
107 if (ret < 0) 107 if (ret < 0)
108 return ERR_PTR(ret); 108 return ERR_PTR(ret);
109 if (ret != 0) { 109 if (ret != 0) {
110 btrfs_warn(fs_info, "missing free space info for %llu\n", 110 btrfs_warn(fs_info, "missing free space info for %llu",
111 block_group->key.objectid); 111 block_group->key.objectid);
112 ASSERT(0); 112 ASSERT(0);
113 return ERR_PTR(-ENOENT); 113 return ERR_PTR(-ENOENT);
@@ -261,7 +261,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
261 btrfs_release_path(path); 261 btrfs_release_path(path);
262 262
263 if (extent_count != expected_extent_count) { 263 if (extent_count != expected_extent_count) {
264 btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", 264 btrfs_err(fs_info,
265 "incorrect extent count for %llu; counted %u, expected %u",
265 block_group->key.objectid, extent_count, 266 block_group->key.objectid, extent_count,
266 expected_extent_count); 267 expected_extent_count);
267 ASSERT(0); 268 ASSERT(0);
@@ -442,7 +443,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
442 } 443 }
443 444
444 if (extent_count != expected_extent_count) { 445 if (extent_count != expected_extent_count) {
445 btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", 446 btrfs_err(fs_info,
447 "incorrect extent count for %llu; counted %u, expected %u",
446 block_group->key.objectid, extent_count, 448 block_group->key.objectid, extent_count,
447 expected_extent_count); 449 expected_extent_count);
448 ASSERT(0); 450 ASSERT(0);
@@ -1163,7 +1165,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
1163 if (IS_ERR(trans)) 1165 if (IS_ERR(trans))
1164 return PTR_ERR(trans); 1166 return PTR_ERR(trans);
1165 1167
1166 fs_info->creating_free_space_tree = 1; 1168 set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1167 free_space_root = btrfs_create_tree(trans, fs_info, 1169 free_space_root = btrfs_create_tree(trans, fs_info,
1168 BTRFS_FREE_SPACE_TREE_OBJECTID); 1170 BTRFS_FREE_SPACE_TREE_OBJECTID);
1169 if (IS_ERR(free_space_root)) { 1171 if (IS_ERR(free_space_root)) {
@@ -1183,7 +1185,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
1183 } 1185 }
1184 1186
1185 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1187 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1186 fs_info->creating_free_space_tree = 0; 1188 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1187 1189
1188 ret = btrfs_commit_transaction(trans, tree_root); 1190 ret = btrfs_commit_transaction(trans, tree_root);
1189 if (ret) 1191 if (ret)
@@ -1192,7 +1194,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
1192 return 0; 1194 return 0;
1193 1195
1194abort: 1196abort:
1195 fs_info->creating_free_space_tree = 0; 1197 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1196 btrfs_abort_transaction(trans, ret); 1198 btrfs_abort_transaction(trans, ret);
1197 btrfs_end_transaction(trans, tree_root); 1199 btrfs_end_transaction(trans, tree_root);
1198 return ret; 1200 return ret;
@@ -1480,7 +1482,8 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
1480 } 1482 }
1481 1483
1482 if (extent_count != expected_extent_count) { 1484 if (extent_count != expected_extent_count) {
1483 btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", 1485 btrfs_err(fs_info,
1486 "incorrect extent count for %llu; counted %u, expected %u",
1484 block_group->key.objectid, extent_count, 1487 block_group->key.objectid, extent_count,
1485 expected_extent_count); 1488 expected_extent_count);
1486 ASSERT(0); 1489 ASSERT(0);
@@ -1542,7 +1545,8 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
1542 } 1545 }
1543 1546
1544 if (extent_count != expected_extent_count) { 1547 if (extent_count != expected_extent_count) {
1545 btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", 1548 btrfs_err(fs_info,
1549 "incorrect extent count for %llu; counted %u, expected %u",
1546 block_group->key.objectid, extent_count, 1550 block_group->key.objectid, extent_count,
1547 expected_extent_count); 1551 expected_extent_count);
1548 ASSERT(0); 1552 ASSERT(0);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 359ee861b5a4..d27014b8bf72 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -104,7 +104,7 @@ again:
104 break; 104 break;
105 105
106 if (last != (u64)-1 && last + 1 != key.objectid) { 106 if (last != (u64)-1 && last + 1 != key.objectid) {
107 __btrfs_add_free_space(ctl, last + 1, 107 __btrfs_add_free_space(fs_info, ctl, last + 1,
108 key.objectid - last - 1); 108 key.objectid - last - 1);
109 wake_up(&root->ino_cache_wait); 109 wake_up(&root->ino_cache_wait);
110 } 110 }
@@ -115,7 +115,7 @@ next:
115 } 115 }
116 116
117 if (last < root->highest_objectid - 1) { 117 if (last < root->highest_objectid - 1) {
118 __btrfs_add_free_space(ctl, last + 1, 118 __btrfs_add_free_space(fs_info, ctl, last + 1,
119 root->highest_objectid - last - 1); 119 root->highest_objectid - last - 1);
120 } 120 }
121 121
@@ -136,12 +136,13 @@ out:
136 136
137static void start_caching(struct btrfs_root *root) 137static void start_caching(struct btrfs_root *root)
138{ 138{
139 struct btrfs_fs_info *fs_info = root->fs_info;
139 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 140 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
140 struct task_struct *tsk; 141 struct task_struct *tsk;
141 int ret; 142 int ret;
142 u64 objectid; 143 u64 objectid;
143 144
144 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) 145 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
145 return; 146 return;
146 147
147 spin_lock(&root->ino_cache_lock); 148 spin_lock(&root->ino_cache_lock);
@@ -153,7 +154,7 @@ static void start_caching(struct btrfs_root *root)
153 root->ino_cache_state = BTRFS_CACHE_STARTED; 154 root->ino_cache_state = BTRFS_CACHE_STARTED;
154 spin_unlock(&root->ino_cache_lock); 155 spin_unlock(&root->ino_cache_lock);
155 156
156 ret = load_free_ino_cache(root->fs_info, root); 157 ret = load_free_ino_cache(fs_info, root);
157 if (ret == 1) { 158 if (ret == 1) {
158 spin_lock(&root->ino_cache_lock); 159 spin_lock(&root->ino_cache_lock);
159 root->ino_cache_state = BTRFS_CACHE_FINISHED; 160 root->ino_cache_state = BTRFS_CACHE_FINISHED;
@@ -170,15 +171,15 @@ static void start_caching(struct btrfs_root *root)
170 */ 171 */
171 ret = btrfs_find_free_objectid(root, &objectid); 172 ret = btrfs_find_free_objectid(root, &objectid);
172 if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { 173 if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
173 __btrfs_add_free_space(ctl, objectid, 174 __btrfs_add_free_space(fs_info, ctl, objectid,
174 BTRFS_LAST_FREE_OBJECTID - objectid + 1); 175 BTRFS_LAST_FREE_OBJECTID - objectid + 1);
175 } 176 }
176 177
177 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu", 178 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
178 root->root_key.objectid); 179 root->root_key.objectid);
179 if (IS_ERR(tsk)) { 180 if (IS_ERR(tsk)) {
180 btrfs_warn(root->fs_info, "failed to start inode caching task"); 181 btrfs_warn(fs_info, "failed to start inode caching task");
181 btrfs_clear_pending_and_info(root->fs_info, INODE_MAP_CACHE, 182 btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
182 "disabling inode map caching"); 183 "disabling inode map caching");
183 } 184 }
184} 185}
@@ -209,28 +210,29 @@ again:
209 210
210void btrfs_return_ino(struct btrfs_root *root, u64 objectid) 211void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
211{ 212{
213 struct btrfs_fs_info *fs_info = root->fs_info;
212 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; 214 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
213 215
214 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) 216 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
215 return; 217 return;
216again: 218again:
217 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { 219 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
218 __btrfs_add_free_space(pinned, objectid, 1); 220 __btrfs_add_free_space(fs_info, pinned, objectid, 1);
219 } else { 221 } else {
220 down_write(&root->fs_info->commit_root_sem); 222 down_write(&fs_info->commit_root_sem);
221 spin_lock(&root->ino_cache_lock); 223 spin_lock(&root->ino_cache_lock);
222 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { 224 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
223 spin_unlock(&root->ino_cache_lock); 225 spin_unlock(&root->ino_cache_lock);
224 up_write(&root->fs_info->commit_root_sem); 226 up_write(&fs_info->commit_root_sem);
225 goto again; 227 goto again;
226 } 228 }
227 spin_unlock(&root->ino_cache_lock); 229 spin_unlock(&root->ino_cache_lock);
228 230
229 start_caching(root); 231 start_caching(root);
230 232
231 __btrfs_add_free_space(pinned, objectid, 1); 233 __btrfs_add_free_space(fs_info, pinned, objectid, 1);
232 234
233 up_write(&root->fs_info->commit_root_sem); 235 up_write(&fs_info->commit_root_sem);
234 } 236 }
235} 237}
236 238
@@ -277,7 +279,8 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
277 rb_erase(&info->offset_index, rbroot); 279 rb_erase(&info->offset_index, rbroot);
278 spin_unlock(rbroot_lock); 280 spin_unlock(rbroot_lock);
279 if (add_to_ctl) 281 if (add_to_ctl)
280 __btrfs_add_free_space(ctl, info->offset, count); 282 __btrfs_add_free_space(root->fs_info, ctl,
283 info->offset, count);
281 kmem_cache_free(btrfs_free_space_cachep, info); 284 kmem_cache_free(btrfs_free_space_cachep, info);
282 } 285 }
283} 286}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 22a7ca43c7cd..2b790bda7998 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -560,8 +560,9 @@ cont:
560 * we don't need to create any more async work items. 560 * we don't need to create any more async work items.
561 * Unlock and free up our temp pages. 561 * Unlock and free up our temp pages.
562 */ 562 */
563 extent_clear_unlock_delalloc(inode, start, end, NULL, 563 extent_clear_unlock_delalloc(inode, start, end, end,
564 clear_flags, PAGE_UNLOCK | 564 NULL, clear_flags,
565 PAGE_UNLOCK |
565 PAGE_CLEAR_DIRTY | 566 PAGE_CLEAR_DIRTY |
566 PAGE_SET_WRITEBACK | 567 PAGE_SET_WRITEBACK |
567 page_error_op | 568 page_error_op |
@@ -837,6 +838,8 @@ retry:
837 extent_clear_unlock_delalloc(inode, async_extent->start, 838 extent_clear_unlock_delalloc(inode, async_extent->start,
838 async_extent->start + 839 async_extent->start +
839 async_extent->ram_size - 1, 840 async_extent->ram_size - 1,
841 async_extent->start +
842 async_extent->ram_size - 1,
840 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 843 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
841 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 844 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
842 PAGE_SET_WRITEBACK); 845 PAGE_SET_WRITEBACK);
@@ -856,7 +859,8 @@ retry:
856 tree->ops->writepage_end_io_hook(p, start, end, 859 tree->ops->writepage_end_io_hook(p, start, end,
857 NULL, 0); 860 NULL, 0);
858 p->mapping = NULL; 861 p->mapping = NULL;
859 extent_clear_unlock_delalloc(inode, start, end, NULL, 0, 862 extent_clear_unlock_delalloc(inode, start, end, end,
863 NULL, 0,
860 PAGE_END_WRITEBACK | 864 PAGE_END_WRITEBACK |
861 PAGE_SET_ERROR); 865 PAGE_SET_ERROR);
862 free_async_extent_pages(async_extent); 866 free_async_extent_pages(async_extent);
@@ -873,6 +877,8 @@ out_free:
873 extent_clear_unlock_delalloc(inode, async_extent->start, 877 extent_clear_unlock_delalloc(inode, async_extent->start,
874 async_extent->start + 878 async_extent->start +
875 async_extent->ram_size - 1, 879 async_extent->ram_size - 1,
880 async_extent->start +
881 async_extent->ram_size - 1,
876 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 882 NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
877 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 883 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
878 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 884 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
@@ -966,7 +972,8 @@ static noinline int cow_file_range(struct inode *inode,
966 ret = cow_file_range_inline(root, inode, start, end, 0, 0, 972 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
967 NULL); 973 NULL);
968 if (ret == 0) { 974 if (ret == 0) {
969 extent_clear_unlock_delalloc(inode, start, end, NULL, 975 extent_clear_unlock_delalloc(inode, start, end,
976 delalloc_end, NULL,
970 EXTENT_LOCKED | EXTENT_DELALLOC | 977 EXTENT_LOCKED | EXTENT_DELALLOC |
971 EXTENT_DEFRAG, PAGE_UNLOCK | 978 EXTENT_DEFRAG, PAGE_UNLOCK |
972 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 979 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
@@ -1062,7 +1069,8 @@ static noinline int cow_file_range(struct inode *inode,
1062 op |= PAGE_SET_PRIVATE2; 1069 op |= PAGE_SET_PRIVATE2;
1063 1070
1064 extent_clear_unlock_delalloc(inode, start, 1071 extent_clear_unlock_delalloc(inode, start,
1065 start + ram_size - 1, locked_page, 1072 start + ram_size - 1,
1073 delalloc_end, locked_page,
1066 EXTENT_LOCKED | EXTENT_DELALLOC, 1074 EXTENT_LOCKED | EXTENT_DELALLOC,
1067 op); 1075 op);
1068 disk_num_bytes -= cur_alloc_size; 1076 disk_num_bytes -= cur_alloc_size;
@@ -1079,7 +1087,8 @@ out_reserve:
1079 btrfs_dec_block_group_reservations(root->fs_info, ins.objectid); 1087 btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
1080 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 1088 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
1081out_unlock: 1089out_unlock:
1082 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1090 extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
1091 locked_page,
1083 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 1092 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1084 EXTENT_DELALLOC | EXTENT_DEFRAG, 1093 EXTENT_DELALLOC | EXTENT_DEFRAG,
1085 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 1094 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
@@ -1258,7 +1267,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1258 1267
1259 path = btrfs_alloc_path(); 1268 path = btrfs_alloc_path();
1260 if (!path) { 1269 if (!path) {
1261 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1270 extent_clear_unlock_delalloc(inode, start, end, end,
1271 locked_page,
1262 EXTENT_LOCKED | EXTENT_DELALLOC | 1272 EXTENT_LOCKED | EXTENT_DELALLOC |
1263 EXTENT_DO_ACCOUNTING | 1273 EXTENT_DO_ACCOUNTING |
1264 EXTENT_DEFRAG, PAGE_UNLOCK | 1274 EXTENT_DEFRAG, PAGE_UNLOCK |
@@ -1276,7 +1286,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1276 trans = btrfs_join_transaction(root); 1286 trans = btrfs_join_transaction(root);
1277 1287
1278 if (IS_ERR(trans)) { 1288 if (IS_ERR(trans)) {
1279 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1289 extent_clear_unlock_delalloc(inode, start, end, end,
1290 locked_page,
1280 EXTENT_LOCKED | EXTENT_DELALLOC | 1291 EXTENT_LOCKED | EXTENT_DELALLOC |
1281 EXTENT_DO_ACCOUNTING | 1292 EXTENT_DO_ACCOUNTING |
1282 EXTENT_DEFRAG, PAGE_UNLOCK | 1293 EXTENT_DEFRAG, PAGE_UNLOCK |
@@ -1490,7 +1501,7 @@ out_check:
1490 } 1501 }
1491 1502
1492 extent_clear_unlock_delalloc(inode, cur_offset, 1503 extent_clear_unlock_delalloc(inode, cur_offset,
1493 cur_offset + num_bytes - 1, 1504 cur_offset + num_bytes - 1, end,
1494 locked_page, EXTENT_LOCKED | 1505 locked_page, EXTENT_LOCKED |
1495 EXTENT_DELALLOC | 1506 EXTENT_DELALLOC |
1496 EXTENT_CLEAR_DATA_RESV, 1507 EXTENT_CLEAR_DATA_RESV,
@@ -1522,7 +1533,7 @@ error:
1522 ret = err; 1533 ret = err;
1523 1534
1524 if (ret && cur_offset < end) 1535 if (ret && cur_offset < end)
1525 extent_clear_unlock_delalloc(inode, cur_offset, end, 1536 extent_clear_unlock_delalloc(inode, cur_offset, end, end,
1526 locked_page, EXTENT_LOCKED | 1537 locked_page, EXTENT_LOCKED |
1527 EXTENT_DELALLOC | EXTENT_DEFRAG | 1538 EXTENT_DELALLOC | EXTENT_DEFRAG |
1528 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1539 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
@@ -1988,7 +1999,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1988} 1999}
1989 2000
1990int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 2001int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1991 struct extent_state **cached_state) 2002 struct extent_state **cached_state, int dedupe)
1992{ 2003{
1993 WARN_ON((end & (PAGE_SIZE - 1)) == 0); 2004 WARN_ON((end & (PAGE_SIZE - 1)) == 0);
1994 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 2005 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
@@ -2052,7 +2063,8 @@ again:
2052 goto out; 2063 goto out;
2053 } 2064 }
2054 2065
2055 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 2066 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
2067 0);
2056 ClearPageChecked(page); 2068 ClearPageChecked(page);
2057 set_page_dirty(page); 2069 set_page_dirty(page);
2058out: 2070out:
@@ -2309,7 +2321,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2309 if (PTR_ERR(root) == -ENOENT) 2321 if (PTR_ERR(root) == -ENOENT)
2310 return 0; 2322 return 0;
2311 WARN_ON(1); 2323 WARN_ON(1);
2312 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n", 2324 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2313 inum, offset, root_id); 2325 inum, offset, root_id);
2314 return PTR_ERR(root); 2326 return PTR_ERR(root);
2315 } 2327 }
@@ -3936,7 +3948,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3936 */ 3948 */
3937 if (!btrfs_is_free_space_inode(inode) 3949 if (!btrfs_is_free_space_inode(inode)
3938 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 3950 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3939 && !root->fs_info->log_root_recovering) { 3951 && !test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
3940 btrfs_update_root_times(trans, root); 3952 btrfs_update_root_times(trans, root);
3941 3953
3942 ret = btrfs_delayed_update_inode(trans, root, inode); 3954 ret = btrfs_delayed_update_inode(trans, root, inode);
@@ -4757,7 +4769,7 @@ again:
4757 0, 0, &cached_state, GFP_NOFS); 4769 0, 0, &cached_state, GFP_NOFS);
4758 4770
4759 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 4771 ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
4760 &cached_state); 4772 &cached_state, 0);
4761 if (ret) { 4773 if (ret) {
4762 unlock_extent_cached(io_tree, block_start, block_end, 4774 unlock_extent_cached(io_tree, block_start, block_end,
4763 &cached_state, GFP_NOFS); 4775 &cached_state, GFP_NOFS);
@@ -5223,7 +5235,7 @@ void btrfs_evict_inode(struct inode *inode)
5223 5235
5224 btrfs_free_io_failure_record(inode, 0, (u64)-1); 5236 btrfs_free_io_failure_record(inode, 0, (u64)-1);
5225 5237
5226 if (root->fs_info->log_root_recovering) { 5238 if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
5227 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 5239 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5228 &BTRFS_I(inode)->runtime_flags)); 5240 &BTRFS_I(inode)->runtime_flags));
5229 goto no_delete; 5241 goto no_delete;
@@ -7012,8 +7024,9 @@ not_found_em:
7012insert: 7024insert:
7013 btrfs_release_path(path); 7025 btrfs_release_path(path);
7014 if (em->start > start || extent_map_end(em) <= start) { 7026 if (em->start > start || extent_map_end(em) <= start) {
7015 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]", 7027 btrfs_err(root->fs_info,
7016 em->start, em->len, start, len); 7028 "bad extent! em: [%llu %llu] passed [%llu %llu]",
7029 em->start, em->len, start, len);
7017 err = -EIO; 7030 err = -EIO;
7018 goto out; 7031 goto out;
7019 } 7032 }
@@ -7865,18 +7878,19 @@ static int btrfs_check_dio_repairable(struct inode *inode,
7865 struct io_failure_record *failrec, 7878 struct io_failure_record *failrec,
7866 int failed_mirror) 7879 int failed_mirror)
7867{ 7880{
7881 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7868 int num_copies; 7882 int num_copies;
7869 7883
7870 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info, 7884 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
7871 failrec->logical, failrec->len);
7872 if (num_copies == 1) { 7885 if (num_copies == 1) {
7873 /* 7886 /*
7874 * we only have a single copy of the data, so don't bother with 7887 * we only have a single copy of the data, so don't bother with
7875 * all the retry and error correction code that follows. no 7888 * all the retry and error correction code that follows. no
7876 * matter what the error is, it is very likely to persist. 7889 * matter what the error is, it is very likely to persist.
7877 */ 7890 */
7878 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n", 7891 btrfs_debug(fs_info,
7879 num_copies, failrec->this_mirror, failed_mirror); 7892 "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
7893 num_copies, failrec->this_mirror, failed_mirror);
7880 return 0; 7894 return 0;
7881 } 7895 }
7882 7896
@@ -7886,8 +7900,9 @@ static int btrfs_check_dio_repairable(struct inode *inode,
7886 failrec->this_mirror++; 7900 failrec->this_mirror++;
7887 7901
7888 if (failrec->this_mirror > num_copies) { 7902 if (failrec->this_mirror > num_copies) {
7889 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n", 7903 btrfs_debug(fs_info,
7890 num_copies, failrec->this_mirror, failed_mirror); 7904 "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
7905 num_copies, failrec->this_mirror, failed_mirror);
7891 return 0; 7906 return 0;
7892 } 7907 }
7893 7908
@@ -9055,7 +9070,7 @@ again:
9055 0, 0, &cached_state, GFP_NOFS); 9070 0, 0, &cached_state, GFP_NOFS);
9056 9071
9057 ret = btrfs_set_extent_delalloc(inode, page_start, end, 9072 ret = btrfs_set_extent_delalloc(inode, page_start, end,
9058 &cached_state); 9073 &cached_state, 0);
9059 if (ret) { 9074 if (ret) {
9060 unlock_extent_cached(io_tree, page_start, page_end, 9075 unlock_extent_cached(io_tree, page_start, page_end,
9061 &cached_state, GFP_NOFS); 9076 &cached_state, GFP_NOFS);
@@ -9377,8 +9392,9 @@ void btrfs_destroy_inode(struct inode *inode)
9377 if (!ordered) 9392 if (!ordered)
9378 break; 9393 break;
9379 else { 9394 else {
9380 btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup", 9395 btrfs_err(root->fs_info,
9381 ordered->file_offset, ordered->len); 9396 "found ordered extent %llu %llu on inode cleanup",
9397 ordered->file_offset, ordered->len);
9382 btrfs_remove_ordered_extent(inode, ordered); 9398 btrfs_remove_ordered_extent(inode, ordered);
9383 btrfs_put_ordered_extent(ordered); 9399 btrfs_put_ordered_extent(ordered);
9384 btrfs_put_ordered_extent(ordered); 9400 btrfs_put_ordered_extent(ordered);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b182197f7091..18e1aa0f85f5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1903,8 +1903,9 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
1903 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 1903 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1904 if (key.objectid == root->root_key.objectid) { 1904 if (key.objectid == root->root_key.objectid) {
1905 ret = -EPERM; 1905 ret = -EPERM;
1906 btrfs_err(root->fs_info, "deleting default subvolume " 1906 btrfs_err(root->fs_info,
1907 "%llu is not allowed", key.objectid); 1907 "deleting default subvolume %llu is not allowed",
1908 key.objectid);
1908 goto out; 1909 goto out;
1909 } 1910 }
1910 btrfs_release_path(path); 1911 btrfs_release_path(path);
@@ -4097,8 +4098,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4097 if (IS_ERR_OR_NULL(di)) { 4098 if (IS_ERR_OR_NULL(di)) {
4098 btrfs_free_path(path); 4099 btrfs_free_path(path);
4099 btrfs_end_transaction(trans, root); 4100 btrfs_end_transaction(trans, root);
4100 btrfs_err(new_root->fs_info, "Umm, you don't have the default dir" 4101 btrfs_err(new_root->fs_info,
4101 "item, this isn't going to work"); 4102 "Umm, you don't have the default diritem, this isn't going to work");
4102 ret = -ENOENT; 4103 ret = -ENOENT;
4103 goto out; 4104 goto out;
4104 } 4105 }
@@ -5307,8 +5308,9 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5307 return -EFAULT; 5308 return -EFAULT;
5308 5309
5309 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) { 5310 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5310 btrfs_err(root->fs_info, "unable to set label with more than %d bytes", 5311 btrfs_err(root->fs_info,
5311 BTRFS_LABEL_SIZE - 1); 5312 "unable to set label with more than %d bytes",
5313 BTRFS_LABEL_SIZE - 1);
5312 return -EINVAL; 5314 return -EINVAL;
5313 } 5315 }
5314 5316
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 1adfbe7be6b8..48655da0f4ca 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -141,7 +141,7 @@ static int lzo_compress_pages(struct list_head *ws,
141 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, 141 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
142 &out_len, workspace->mem); 142 &out_len, workspace->mem);
143 if (ret != LZO_E_OK) { 143 if (ret != LZO_E_OK) {
144 printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n", 144 pr_debug("BTRFS: deflate in loop returned %d\n",
145 ret); 145 ret);
146 ret = -EIO; 146 ret = -EIO;
147 goto out; 147 goto out;
@@ -356,7 +356,7 @@ cont:
356 if (need_unmap) 356 if (need_unmap)
357 kunmap(pages_in[page_in_index - 1]); 357 kunmap(pages_in[page_in_index - 1]);
358 if (ret != LZO_E_OK) { 358 if (ret != LZO_E_OK) {
359 printk(KERN_WARNING "BTRFS: decompress failed\n"); 359 pr_warn("BTRFS: decompress failed\n");
360 ret = -EIO; 360 ret = -EIO;
361 break; 361 break;
362 } 362 }
@@ -402,7 +402,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
402 out_len = PAGE_SIZE; 402 out_len = PAGE_SIZE;
403 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); 403 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
404 if (ret != LZO_E_OK) { 404 if (ret != LZO_E_OK) {
405 printk(KERN_WARNING "BTRFS: decompress failed!\n"); 405 pr_warn("BTRFS: decompress failed!\n");
406 ret = -EIO; 406 ret = -EIO;
407 goto out; 407 goto out;
408 } 408 }
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 3b78d38173b3..b2d1e95de7be 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -67,8 +67,8 @@ static void ordered_data_tree_panic(struct inode *inode, int errno,
67 u64 offset) 67 u64 offset)
68{ 68{
69 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 69 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
70 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " 70 btrfs_panic(fs_info, errno,
71 "%llu", offset); 71 "Inconsistency in ordered tree at offset %llu", offset);
72} 72}
73 73
74/* 74/*
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 147dc6ca5de1..438575ea8d25 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -24,12 +24,11 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
24{ 24{
25 int num_stripes = btrfs_chunk_num_stripes(eb, chunk); 25 int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
26 int i; 26 int i;
27 printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu " 27 pr_info("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n",
28 "num_stripes %d\n",
29 btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk), 28 btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk),
30 btrfs_chunk_type(eb, chunk), num_stripes); 29 btrfs_chunk_type(eb, chunk), num_stripes);
31 for (i = 0 ; i < num_stripes ; i++) { 30 for (i = 0 ; i < num_stripes ; i++) {
32 printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i, 31 pr_info("\t\t\tstripe %d devid %llu offset %llu\n", i,
33 btrfs_stripe_devid_nr(eb, chunk, i), 32 btrfs_stripe_devid_nr(eb, chunk, i),
34 btrfs_stripe_offset_nr(eb, chunk, i)); 33 btrfs_stripe_offset_nr(eb, chunk, i));
35 } 34 }
@@ -37,8 +36,7 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
37static void print_dev_item(struct extent_buffer *eb, 36static void print_dev_item(struct extent_buffer *eb,
38 struct btrfs_dev_item *dev_item) 37 struct btrfs_dev_item *dev_item)
39{ 38{
40 printk(KERN_INFO "\t\tdev item devid %llu " 39 pr_info("\t\tdev item devid %llu total_bytes %llu bytes used %llu\n",
41 "total_bytes %llu bytes used %llu\n",
42 btrfs_device_id(eb, dev_item), 40 btrfs_device_id(eb, dev_item),
43 btrfs_device_total_bytes(eb, dev_item), 41 btrfs_device_total_bytes(eb, dev_item),
44 btrfs_device_bytes_used(eb, dev_item)); 42 btrfs_device_bytes_used(eb, dev_item));
@@ -46,8 +44,7 @@ static void print_dev_item(struct extent_buffer *eb,
46static void print_extent_data_ref(struct extent_buffer *eb, 44static void print_extent_data_ref(struct extent_buffer *eb,
47 struct btrfs_extent_data_ref *ref) 45 struct btrfs_extent_data_ref *ref)
48{ 46{
49 printk(KERN_INFO "\t\textent data backref root %llu " 47 pr_info("\t\textent data backref root %llu objectid %llu offset %llu count %u\n",
50 "objectid %llu offset %llu count %u\n",
51 btrfs_extent_data_ref_root(eb, ref), 48 btrfs_extent_data_ref_root(eb, ref),
52 btrfs_extent_data_ref_objectid(eb, ref), 49 btrfs_extent_data_ref_objectid(eb, ref),
53 btrfs_extent_data_ref_offset(eb, ref), 50 btrfs_extent_data_ref_offset(eb, ref),
@@ -72,7 +69,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
72 struct btrfs_extent_item_v0 *ei0; 69 struct btrfs_extent_item_v0 *ei0;
73 BUG_ON(item_size != sizeof(*ei0)); 70 BUG_ON(item_size != sizeof(*ei0));
74 ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0); 71 ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
75 printk(KERN_INFO "\t\textent refs %u\n", 72 pr_info("\t\textent refs %u\n",
76 btrfs_extent_refs_v0(eb, ei0)); 73 btrfs_extent_refs_v0(eb, ei0));
77 return; 74 return;
78#else 75#else
@@ -83,7 +80,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
83 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item); 80 ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
84 flags = btrfs_extent_flags(eb, ei); 81 flags = btrfs_extent_flags(eb, ei);
85 82
86 printk(KERN_INFO "\t\textent refs %llu gen %llu flags %llu\n", 83 pr_info("\t\textent refs %llu gen %llu flags %llu\n",
87 btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei), 84 btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
88 flags); 85 flags);
89 86
@@ -92,8 +89,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
92 struct btrfs_tree_block_info *info; 89 struct btrfs_tree_block_info *info;
93 info = (struct btrfs_tree_block_info *)(ei + 1); 90 info = (struct btrfs_tree_block_info *)(ei + 1);
94 btrfs_tree_block_key(eb, info, &key); 91 btrfs_tree_block_key(eb, info, &key);
95 printk(KERN_INFO "\t\ttree block key (%llu %u %llu) " 92 pr_info("\t\ttree block key (%llu %u %llu) level %d\n",
96 "level %d\n",
97 btrfs_disk_key_objectid(&key), key.type, 93 btrfs_disk_key_objectid(&key), key.type,
98 btrfs_disk_key_offset(&key), 94 btrfs_disk_key_offset(&key),
99 btrfs_tree_block_level(eb, info)); 95 btrfs_tree_block_level(eb, info));
@@ -110,12 +106,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
110 offset = btrfs_extent_inline_ref_offset(eb, iref); 106 offset = btrfs_extent_inline_ref_offset(eb, iref);
111 switch (type) { 107 switch (type) {
112 case BTRFS_TREE_BLOCK_REF_KEY: 108 case BTRFS_TREE_BLOCK_REF_KEY:
113 printk(KERN_INFO "\t\ttree block backref " 109 pr_info("\t\ttree block backref root %llu\n", offset);
114 "root %llu\n", offset);
115 break; 110 break;
116 case BTRFS_SHARED_BLOCK_REF_KEY: 111 case BTRFS_SHARED_BLOCK_REF_KEY:
117 printk(KERN_INFO "\t\tshared block backref " 112 pr_info("\t\tshared block backref parent %llu\n", offset);
118 "parent %llu\n", offset);
119 break; 113 break;
120 case BTRFS_EXTENT_DATA_REF_KEY: 114 case BTRFS_EXTENT_DATA_REF_KEY:
121 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 115 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@@ -123,8 +117,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
123 break; 117 break;
124 case BTRFS_SHARED_DATA_REF_KEY: 118 case BTRFS_SHARED_DATA_REF_KEY:
125 sref = (struct btrfs_shared_data_ref *)(iref + 1); 119 sref = (struct btrfs_shared_data_ref *)(iref + 1);
126 printk(KERN_INFO "\t\tshared data backref " 120 pr_info("\t\tshared data backref parent %llu count %u\n",
127 "parent %llu count %u\n",
128 offset, btrfs_shared_data_ref_count(eb, sref)); 121 offset, btrfs_shared_data_ref_count(eb, sref));
129 break; 122 break;
130 default: 123 default:
@@ -141,8 +134,7 @@ static void print_extent_ref_v0(struct extent_buffer *eb, int slot)
141 struct btrfs_extent_ref_v0 *ref0; 134 struct btrfs_extent_ref_v0 *ref0;
142 135
143 ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0); 136 ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0);
144 printk("\t\textent back ref root %llu gen %llu " 137 printk("\t\textent back ref root %llu gen %llu owner %llu num_refs %lu\n",
145 "owner %llu num_refs %lu\n",
146 btrfs_ref_root_v0(eb, ref0), 138 btrfs_ref_root_v0(eb, ref0),
147 btrfs_ref_generation_v0(eb, ref0), 139 btrfs_ref_generation_v0(eb, ref0),
148 btrfs_ref_objectid_v0(eb, ref0), 140 btrfs_ref_objectid_v0(eb, ref0),
@@ -162,7 +154,7 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
162 __le64 subvol_id; 154 __le64 subvol_id;
163 155
164 read_extent_buffer(l, &subvol_id, offset, sizeof(subvol_id)); 156 read_extent_buffer(l, &subvol_id, offset, sizeof(subvol_id));
165 printk(KERN_INFO "\t\tsubvol_id %llu\n", 157 pr_info("\t\tsubvol_id %llu\n",
166 (unsigned long long)le64_to_cpu(subvol_id)); 158 (unsigned long long)le64_to_cpu(subvol_id));
167 item_size -= sizeof(u64); 159 item_size -= sizeof(u64);
168 offset += sizeof(u64); 160 offset += sizeof(u64);
@@ -196,15 +188,13 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
196 item = btrfs_item_nr(i); 188 item = btrfs_item_nr(i);
197 btrfs_item_key_to_cpu(l, &key, i); 189 btrfs_item_key_to_cpu(l, &key, i);
198 type = key.type; 190 type = key.type;
199 printk(KERN_INFO "\titem %d key (%llu %u %llu) itemoff %d " 191 pr_info("\titem %d key (%llu %u %llu) itemoff %d itemsize %d\n",
200 "itemsize %d\n",
201 i, key.objectid, type, key.offset, 192 i, key.objectid, type, key.offset,
202 btrfs_item_offset(l, item), btrfs_item_size(l, item)); 193 btrfs_item_offset(l, item), btrfs_item_size(l, item));
203 switch (type) { 194 switch (type) {
204 case BTRFS_INODE_ITEM_KEY: 195 case BTRFS_INODE_ITEM_KEY:
205 ii = btrfs_item_ptr(l, i, struct btrfs_inode_item); 196 ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
206 printk(KERN_INFO "\t\tinode generation %llu size %llu " 197 pr_info("\t\tinode generation %llu size %llu mode %o\n",
207 "mode %o\n",
208 btrfs_inode_generation(l, ii), 198 btrfs_inode_generation(l, ii),
209 btrfs_inode_size(l, ii), 199 btrfs_inode_size(l, ii),
210 btrfs_inode_mode(l, ii)); 200 btrfs_inode_mode(l, ii));
@@ -212,13 +202,13 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
212 case BTRFS_DIR_ITEM_KEY: 202 case BTRFS_DIR_ITEM_KEY:
213 di = btrfs_item_ptr(l, i, struct btrfs_dir_item); 203 di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
214 btrfs_dir_item_key_to_cpu(l, di, &found_key); 204 btrfs_dir_item_key_to_cpu(l, di, &found_key);
215 printk(KERN_INFO "\t\tdir oid %llu type %u\n", 205 pr_info("\t\tdir oid %llu type %u\n",
216 found_key.objectid, 206 found_key.objectid,
217 btrfs_dir_type(l, di)); 207 btrfs_dir_type(l, di));
218 break; 208 break;
219 case BTRFS_ROOT_ITEM_KEY: 209 case BTRFS_ROOT_ITEM_KEY:
220 ri = btrfs_item_ptr(l, i, struct btrfs_root_item); 210 ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
221 printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n", 211 pr_info("\t\troot data bytenr %llu refs %u\n",
222 btrfs_disk_root_bytenr(l, ri), 212 btrfs_disk_root_bytenr(l, ri),
223 btrfs_disk_root_refs(l, ri)); 213 btrfs_disk_root_refs(l, ri));
224 break; 214 break;
@@ -227,10 +217,10 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
227 print_extent_item(l, i, type); 217 print_extent_item(l, i, type);
228 break; 218 break;
229 case BTRFS_TREE_BLOCK_REF_KEY: 219 case BTRFS_TREE_BLOCK_REF_KEY:
230 printk(KERN_INFO "\t\ttree block backref\n"); 220 pr_info("\t\ttree block backref\n");
231 break; 221 break;
232 case BTRFS_SHARED_BLOCK_REF_KEY: 222 case BTRFS_SHARED_BLOCK_REF_KEY:
233 printk(KERN_INFO "\t\tshared block backref\n"); 223 pr_info("\t\tshared block backref\n");
234 break; 224 break;
235 case BTRFS_EXTENT_DATA_REF_KEY: 225 case BTRFS_EXTENT_DATA_REF_KEY:
236 dref = btrfs_item_ptr(l, i, 226 dref = btrfs_item_ptr(l, i,
@@ -240,7 +230,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
240 case BTRFS_SHARED_DATA_REF_KEY: 230 case BTRFS_SHARED_DATA_REF_KEY:
241 sref = btrfs_item_ptr(l, i, 231 sref = btrfs_item_ptr(l, i,
242 struct btrfs_shared_data_ref); 232 struct btrfs_shared_data_ref);
243 printk(KERN_INFO "\t\tshared data backref count %u\n", 233 pr_info("\t\tshared data backref count %u\n",
244 btrfs_shared_data_ref_count(l, sref)); 234 btrfs_shared_data_ref_count(l, sref));
245 break; 235 break;
246 case BTRFS_EXTENT_DATA_KEY: 236 case BTRFS_EXTENT_DATA_KEY:
@@ -248,17 +238,14 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
248 struct btrfs_file_extent_item); 238 struct btrfs_file_extent_item);
249 if (btrfs_file_extent_type(l, fi) == 239 if (btrfs_file_extent_type(l, fi) ==
250 BTRFS_FILE_EXTENT_INLINE) { 240 BTRFS_FILE_EXTENT_INLINE) {
251 printk(KERN_INFO "\t\tinline extent data " 241 pr_info("\t\tinline extent data size %u\n",
252 "size %u\n",
253 btrfs_file_extent_inline_len(l, i, fi)); 242 btrfs_file_extent_inline_len(l, i, fi));
254 break; 243 break;
255 } 244 }
256 printk(KERN_INFO "\t\textent data disk bytenr %llu " 245 pr_info("\t\textent data disk bytenr %llu nr %llu\n",
257 "nr %llu\n",
258 btrfs_file_extent_disk_bytenr(l, fi), 246 btrfs_file_extent_disk_bytenr(l, fi),
259 btrfs_file_extent_disk_num_bytes(l, fi)); 247 btrfs_file_extent_disk_num_bytes(l, fi));
260 printk(KERN_INFO "\t\textent data offset %llu " 248 pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
261 "nr %llu ram %llu\n",
262 btrfs_file_extent_offset(l, fi), 249 btrfs_file_extent_offset(l, fi),
263 btrfs_file_extent_num_bytes(l, fi), 250 btrfs_file_extent_num_bytes(l, fi),
264 btrfs_file_extent_ram_bytes(l, fi)); 251 btrfs_file_extent_ram_bytes(l, fi));
@@ -273,7 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
273 case BTRFS_BLOCK_GROUP_ITEM_KEY: 260 case BTRFS_BLOCK_GROUP_ITEM_KEY:
274 bi = btrfs_item_ptr(l, i, 261 bi = btrfs_item_ptr(l, i,
275 struct btrfs_block_group_item); 262 struct btrfs_block_group_item);
276 printk(KERN_INFO "\t\tblock group used %llu\n", 263 pr_info("\t\tblock group used %llu\n",
277 btrfs_disk_block_group_used(l, bi)); 264 btrfs_disk_block_group_used(l, bi));
278 break; 265 break;
279 case BTRFS_CHUNK_ITEM_KEY: 266 case BTRFS_CHUNK_ITEM_KEY:
@@ -287,38 +274,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
287 case BTRFS_DEV_EXTENT_KEY: 274 case BTRFS_DEV_EXTENT_KEY:
288 dev_extent = btrfs_item_ptr(l, i, 275 dev_extent = btrfs_item_ptr(l, i,
289 struct btrfs_dev_extent); 276 struct btrfs_dev_extent);
290 printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n" 277 pr_info("\t\tdev extent chunk_tree %llu\n\t\tchunk objectid %llu chunk offset %llu length %llu\n",
291 "\t\tchunk objectid %llu chunk offset %llu "
292 "length %llu\n",
293 btrfs_dev_extent_chunk_tree(l, dev_extent), 278 btrfs_dev_extent_chunk_tree(l, dev_extent),
294 btrfs_dev_extent_chunk_objectid(l, dev_extent), 279 btrfs_dev_extent_chunk_objectid(l, dev_extent),
295 btrfs_dev_extent_chunk_offset(l, dev_extent), 280 btrfs_dev_extent_chunk_offset(l, dev_extent),
296 btrfs_dev_extent_length(l, dev_extent)); 281 btrfs_dev_extent_length(l, dev_extent));
297 break; 282 break;
298 case BTRFS_PERSISTENT_ITEM_KEY: 283 case BTRFS_PERSISTENT_ITEM_KEY:
299 printk(KERN_INFO "\t\tpersistent item objectid %llu offset %llu\n", 284 pr_info("\t\tpersistent item objectid %llu offset %llu\n",
300 key.objectid, key.offset); 285 key.objectid, key.offset);
301 switch (key.objectid) { 286 switch (key.objectid) {
302 case BTRFS_DEV_STATS_OBJECTID: 287 case BTRFS_DEV_STATS_OBJECTID:
303 printk(KERN_INFO "\t\tdevice stats\n"); 288 pr_info("\t\tdevice stats\n");
304 break; 289 break;
305 default: 290 default:
306 printk(KERN_INFO "\t\tunknown persistent item\n"); 291 pr_info("\t\tunknown persistent item\n");
307 } 292 }
308 break; 293 break;
309 case BTRFS_TEMPORARY_ITEM_KEY: 294 case BTRFS_TEMPORARY_ITEM_KEY:
310 printk(KERN_INFO "\t\ttemporary item objectid %llu offset %llu\n", 295 pr_info("\t\ttemporary item objectid %llu offset %llu\n",
311 key.objectid, key.offset); 296 key.objectid, key.offset);
312 switch (key.objectid) { 297 switch (key.objectid) {
313 case BTRFS_BALANCE_OBJECTID: 298 case BTRFS_BALANCE_OBJECTID:
314 printk(KERN_INFO "\t\tbalance status\n"); 299 pr_info("\t\tbalance status\n");
315 break; 300 break;
316 default: 301 default:
317 printk(KERN_INFO "\t\tunknown temporary item\n"); 302 pr_info("\t\tunknown temporary item\n");
318 } 303 }
319 break; 304 break;
320 case BTRFS_DEV_REPLACE_KEY: 305 case BTRFS_DEV_REPLACE_KEY:
321 printk(KERN_INFO "\t\tdev replace\n"); 306 pr_info("\t\tdev replace\n");
322 break; 307 break;
323 case BTRFS_UUID_KEY_SUBVOL: 308 case BTRFS_UUID_KEY_SUBVOL:
324 case BTRFS_UUID_KEY_RECEIVED_SUBVOL: 309 case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
@@ -343,12 +328,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
343 btrfs_print_leaf(root, c); 328 btrfs_print_leaf(root, c);
344 return; 329 return;
345 } 330 }
346 btrfs_info(root->fs_info, "node %llu level %d total ptrs %d free spc %u", 331 btrfs_info(root->fs_info,
347 btrfs_header_bytenr(c), level, nr, 332 "node %llu level %d total ptrs %d free spc %u",
348 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); 333 btrfs_header_bytenr(c), level, nr,
334 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
349 for (i = 0; i < nr; i++) { 335 for (i = 0; i < nr; i++) {
350 btrfs_node_key_to_cpu(c, &key, i); 336 btrfs_node_key_to_cpu(c, &key, i);
351 printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n", 337 pr_info("\tkey %d (%llu %u %llu) block %llu\n",
352 i, key.objectid, key.type, key.offset, 338 i, key.objectid, key.type, key.offset,
353 btrfs_node_blockptr(c, i)); 339 btrfs_node_blockptr(c, i));
354 } 340 }
@@ -356,6 +342,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
356 struct extent_buffer *next = read_tree_block(root, 342 struct extent_buffer *next = read_tree_block(root,
357 btrfs_node_blockptr(c, i), 343 btrfs_node_blockptr(c, i),
358 btrfs_node_ptr_generation(c, i)); 344 btrfs_node_ptr_generation(c, i));
345 if (IS_ERR(next)) {
346 continue;
347 } else if (!extent_buffer_uptodate(next)) {
348 free_extent_buffer(next);
349 continue;
350 }
351
359 if (btrfs_is_leaf(next) && 352 if (btrfs_is_leaf(next) &&
360 level != 1) 353 level != 1)
361 BUG(); 354 BUG();
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 8db2e29fdcf4..11f4fffe503e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -309,7 +309,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
309 u64 flags = 0; 309 u64 flags = 0;
310 u64 rescan_progress = 0; 310 u64 rescan_progress = 0;
311 311
312 if (!fs_info->quota_enabled) 312 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
313 return 0; 313 return 0;
314 314
315 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS); 315 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
@@ -360,8 +360,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
360 fs_info->generation) { 360 fs_info->generation) {
361 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 361 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
362 btrfs_err(fs_info, 362 btrfs_err(fs_info,
363 "qgroup generation mismatch, " 363 "qgroup generation mismatch, marked as inconsistent");
364 "marked as inconsistent");
365 } 364 }
366 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, 365 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
367 ptr); 366 ptr);
@@ -463,13 +462,11 @@ next2:
463 } 462 }
464out: 463out:
465 fs_info->qgroup_flags |= flags; 464 fs_info->qgroup_flags |= flags;
466 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) { 465 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
467 fs_info->quota_enabled = 0; 466 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
468 fs_info->pending_quota_state = 0; 467 else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
469 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && 468 ret >= 0)
470 ret >= 0) {
471 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 469 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
472 }
473 btrfs_free_path(path); 470 btrfs_free_path(path);
474 471
475 if (ret < 0) { 472 if (ret < 0) {
@@ -847,7 +844,7 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
847 } 844 }
848 ret = 0; 845 ret = 0;
849out: 846out:
850 root->fs_info->pending_quota_state = 0; 847 set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
851 btrfs_free_path(path); 848 btrfs_free_path(path);
852 return ret; 849 return ret;
853} 850}
@@ -868,7 +865,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
868 865
869 mutex_lock(&fs_info->qgroup_ioctl_lock); 866 mutex_lock(&fs_info->qgroup_ioctl_lock);
870 if (fs_info->quota_root) { 867 if (fs_info->quota_root) {
871 fs_info->pending_quota_state = 1; 868 set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
872 goto out; 869 goto out;
873 } 870 }
874 871
@@ -964,7 +961,7 @@ out_add_root:
964 } 961 }
965 spin_lock(&fs_info->qgroup_lock); 962 spin_lock(&fs_info->qgroup_lock);
966 fs_info->quota_root = quota_root; 963 fs_info->quota_root = quota_root;
967 fs_info->pending_quota_state = 1; 964 set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
968 spin_unlock(&fs_info->qgroup_lock); 965 spin_unlock(&fs_info->qgroup_lock);
969out_free_path: 966out_free_path:
970 btrfs_free_path(path); 967 btrfs_free_path(path);
@@ -993,8 +990,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
993 mutex_lock(&fs_info->qgroup_ioctl_lock); 990 mutex_lock(&fs_info->qgroup_ioctl_lock);
994 if (!fs_info->quota_root) 991 if (!fs_info->quota_root)
995 goto out; 992 goto out;
996 fs_info->quota_enabled = 0; 993 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
997 fs_info->pending_quota_state = 0; 994 set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
998 btrfs_qgroup_wait_for_completion(fs_info, false); 995 btrfs_qgroup_wait_for_completion(fs_info, false);
999 spin_lock(&fs_info->qgroup_lock); 996 spin_lock(&fs_info->qgroup_lock);
1000 quota_root = fs_info->quota_root; 997 quota_root = fs_info->quota_root;
@@ -1490,7 +1487,8 @@ int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
1490 struct btrfs_delayed_ref_root *delayed_refs; 1487 struct btrfs_delayed_ref_root *delayed_refs;
1491 int ret; 1488 int ret;
1492 1489
1493 if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0) 1490 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1491 || bytenr == 0 || num_bytes == 0)
1494 return 0; 1492 return 0;
1495 if (WARN_ON(trans == NULL)) 1493 if (WARN_ON(trans == NULL))
1496 return -EINVAL; 1494 return -EINVAL;
@@ -1713,7 +1711,7 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1713 if (old_roots) 1711 if (old_roots)
1714 nr_old_roots = old_roots->nnodes; 1712 nr_old_roots = old_roots->nnodes;
1715 1713
1716 if (!fs_info->quota_enabled) 1714 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1717 goto out_free; 1715 goto out_free;
1718 BUG_ON(!fs_info->quota_root); 1716 BUG_ON(!fs_info->quota_root);
1719 1717
@@ -1833,10 +1831,14 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1833 if (!quota_root) 1831 if (!quota_root)
1834 goto out; 1832 goto out;
1835 1833
1836 if (!fs_info->quota_enabled && fs_info->pending_quota_state) 1834 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1835 test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
1837 start_rescan_worker = 1; 1836 start_rescan_worker = 1;
1838 1837
1839 fs_info->quota_enabled = fs_info->pending_quota_state; 1838 if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
1839 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1840 if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
1841 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1840 1842
1841 spin_lock(&fs_info->qgroup_lock); 1843 spin_lock(&fs_info->qgroup_lock);
1842 while (!list_empty(&fs_info->dirty_qgroups)) { 1844 while (!list_empty(&fs_info->dirty_qgroups)) {
@@ -1855,7 +1857,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1855 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1857 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1856 spin_lock(&fs_info->qgroup_lock); 1858 spin_lock(&fs_info->qgroup_lock);
1857 } 1859 }
1858 if (fs_info->quota_enabled) 1860 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1859 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 1861 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1860 else 1862 else
1861 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1863 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -1900,7 +1902,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1900 u64 nums; 1902 u64 nums;
1901 1903
1902 mutex_lock(&fs_info->qgroup_ioctl_lock); 1904 mutex_lock(&fs_info->qgroup_ioctl_lock);
1903 if (!fs_info->quota_enabled) 1905 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1904 goto out; 1906 goto out;
1905 1907
1906 if (!quota_root) { 1908 if (!quota_root) {
@@ -1991,8 +1993,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1991 ret = update_qgroup_limit_item(trans, quota_root, dstgroup); 1993 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
1992 if (ret) { 1994 if (ret) {
1993 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1995 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1994 btrfs_info(fs_info, "unable to update quota limit for %llu", 1996 btrfs_info(fs_info,
1995 dstgroup->qgroupid); 1997 "unable to update quota limit for %llu",
1998 dstgroup->qgroupid);
1996 goto unlock; 1999 goto unlock;
1997 } 2000 }
1998 } 2001 }
@@ -2226,8 +2229,7 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2226 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq) 2229 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2227 return; 2230 return;
2228 btrfs_err(trans->fs_info, 2231 btrfs_err(trans->fs_info,
2229 "qgroups not uptodate in trans handle %p: list is%s empty, " 2232 "qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x",
2230 "seq is %#x.%x",
2231 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not", 2233 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2232 (u32)(trans->delayed_ref_elem.seq >> 32), 2234 (u32)(trans->delayed_ref_elem.seq >> 32),
2233 (u32)trans->delayed_ref_elem.seq); 2235 (u32)trans->delayed_ref_elem.seq);
@@ -2255,10 +2257,11 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2255 &fs_info->qgroup_rescan_progress, 2257 &fs_info->qgroup_rescan_progress,
2256 path, 1, 0); 2258 path, 1, 0);
2257 2259
2258 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n", 2260 btrfs_debug(fs_info,
2259 fs_info->qgroup_rescan_progress.objectid, 2261 "current progress key (%llu %u %llu), search_slot ret %d",
2260 fs_info->qgroup_rescan_progress.type, 2262 fs_info->qgroup_rescan_progress.objectid,
2261 fs_info->qgroup_rescan_progress.offset, ret); 2263 fs_info->qgroup_rescan_progress.type,
2264 fs_info->qgroup_rescan_progress.offset, ret);
2262 2265
2263 if (ret) { 2266 if (ret) {
2264 /* 2267 /*
@@ -2347,7 +2350,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2347 err = PTR_ERR(trans); 2350 err = PTR_ERR(trans);
2348 break; 2351 break;
2349 } 2352 }
2350 if (!fs_info->quota_enabled) { 2353 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
2351 err = -EINTR; 2354 err = -EINTR;
2352 } else { 2355 } else {
2353 err = qgroup_rescan_leaf(fs_info, path, trans); 2356 err = qgroup_rescan_leaf(fs_info, path, trans);
@@ -2388,7 +2391,7 @@ out:
2388 ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root); 2391 ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2389 if (ret < 0) { 2392 if (ret < 0) {
2390 err = ret; 2393 err = ret;
2391 btrfs_err(fs_info, "fail to update qgroup status: %d\n", err); 2394 btrfs_err(fs_info, "fail to update qgroup status: %d", err);
2392 } 2395 }
2393 btrfs_end_transaction(trans, fs_info->quota_root); 2396 btrfs_end_transaction(trans, fs_info->quota_root);
2394 2397
@@ -2578,8 +2581,8 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
2578 struct ulist_iterator uiter; 2581 struct ulist_iterator uiter;
2579 int ret; 2582 int ret;
2580 2583
2581 if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) || 2584 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2582 len == 0) 2585 !is_fstree(root->objectid) || len == 0)
2583 return 0; 2586 return 0;
2584 2587
2585 changeset.bytes_changed = 0; 2588 changeset.bytes_changed = 0;
@@ -2676,8 +2679,8 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
2676{ 2679{
2677 int ret; 2680 int ret;
2678 2681
2679 if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) || 2682 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2680 num_bytes == 0) 2683 !is_fstree(root->objectid) || num_bytes == 0)
2681 return 0; 2684 return 0;
2682 2685
2683 BUG_ON(num_bytes != round_down(num_bytes, root->nodesize)); 2686 BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
@@ -2692,7 +2695,8 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2692{ 2695{
2693 int reserved; 2696 int reserved;
2694 2697
2695 if (!root->fs_info->quota_enabled || !is_fstree(root->objectid)) 2698 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2699 !is_fstree(root->objectid))
2696 return; 2700 return;
2697 2701
2698 reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); 2702 reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
@@ -2703,7 +2707,8 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2703 2707
2704void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) 2708void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2705{ 2709{
2706 if (!root->fs_info->quota_enabled || !is_fstree(root->objectid)) 2710 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2711 !is_fstree(root->objectid))
2707 return; 2712 return;
2708 2713
2709 BUG_ON(num_bytes != round_down(num_bytes, root->nodesize)); 2714 BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index cd8d302a1f61..d016d4a79864 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2143,7 +2143,10 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2143 2143
2144 rbio->faila = find_logical_bio_stripe(rbio, bio); 2144 rbio->faila = find_logical_bio_stripe(rbio, bio);
2145 if (rbio->faila == -1) { 2145 if (rbio->faila == -1) {
2146 BUG(); 2146 btrfs_warn(root->fs_info,
2147 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2148 __func__, (u64)bio->bi_iter.bi_sector << 9,
2149 (u64)bio->bi_iter.bi_size, bbio->map_type);
2147 if (generic_io) 2150 if (generic_io)
2148 btrfs_put_bbio(bbio); 2151 btrfs_put_bbio(bbio);
2149 kfree(rbio); 2152 kfree(rbio);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 8428db7cd88f..75bab76739be 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -820,7 +820,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
820 820
821 spin_lock(&fs_info->reada_lock); 821 spin_lock(&fs_info->reada_lock);
822 list_for_each_entry(device, &fs_devices->devices, dev_list) { 822 list_for_each_entry(device, &fs_devices->devices, dev_list) {
823 printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid, 823 btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
824 atomic_read(&device->reada_in_flight)); 824 atomic_read(&device->reada_in_flight));
825 index = 0; 825 index = 0;
826 while (1) { 826 while (1) {
@@ -829,17 +829,17 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
829 (void **)&zone, index, 1); 829 (void **)&zone, index, 1);
830 if (ret == 0) 830 if (ret == 0)
831 break; 831 break;
832 printk(KERN_DEBUG " zone %llu-%llu elems %llu locked " 832 pr_debug(" zone %llu-%llu elems %llu locked %d devs",
833 "%d devs", zone->start, zone->end, zone->elems, 833 zone->start, zone->end, zone->elems,
834 zone->locked); 834 zone->locked);
835 for (j = 0; j < zone->ndevs; ++j) { 835 for (j = 0; j < zone->ndevs; ++j) {
836 printk(KERN_CONT " %lld", 836 pr_cont(" %lld",
837 zone->devs[j]->devid); 837 zone->devs[j]->devid);
838 } 838 }
839 if (device->reada_curr_zone == zone) 839 if (device->reada_curr_zone == zone)
840 printk(KERN_CONT " curr off %llu", 840 pr_cont(" curr off %llu",
841 device->reada_next - zone->start); 841 device->reada_next - zone->start);
842 printk(KERN_CONT "\n"); 842 pr_cont("\n");
843 index = (zone->end >> PAGE_SHIFT) + 1; 843 index = (zone->end >> PAGE_SHIFT) + 1;
844 } 844 }
845 cnt = 0; 845 cnt = 0;
@@ -851,21 +851,20 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
851 (void **)&re, index, 1); 851 (void **)&re, index, 1);
852 if (ret == 0) 852 if (ret == 0)
853 break; 853 break;
854 printk(KERN_DEBUG 854 pr_debug(" re: logical %llu size %u empty %d scheduled %d",
855 " re: logical %llu size %u empty %d scheduled %d",
856 re->logical, fs_info->tree_root->nodesize, 855 re->logical, fs_info->tree_root->nodesize,
857 list_empty(&re->extctl), re->scheduled); 856 list_empty(&re->extctl), re->scheduled);
858 857
859 for (i = 0; i < re->nzones; ++i) { 858 for (i = 0; i < re->nzones; ++i) {
860 printk(KERN_CONT " zone %llu-%llu devs", 859 pr_cont(" zone %llu-%llu devs",
861 re->zones[i]->start, 860 re->zones[i]->start,
862 re->zones[i]->end); 861 re->zones[i]->end);
863 for (j = 0; j < re->zones[i]->ndevs; ++j) { 862 for (j = 0; j < re->zones[i]->ndevs; ++j) {
864 printk(KERN_CONT " %lld", 863 pr_cont(" %lld",
865 re->zones[i]->devs[j]->devid); 864 re->zones[i]->devs[j]->devid);
866 } 865 }
867 } 866 }
868 printk(KERN_CONT "\n"); 867 pr_cont("\n");
869 index = (re->logical >> PAGE_SHIFT) + 1; 868 index = (re->logical >> PAGE_SHIFT) + 1;
870 if (++cnt > 15) 869 if (++cnt > 15)
871 break; 870 break;
@@ -885,20 +884,19 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
885 index = (re->logical >> PAGE_SHIFT) + 1; 884 index = (re->logical >> PAGE_SHIFT) + 1;
886 continue; 885 continue;
887 } 886 }
888 printk(KERN_DEBUG 887 pr_debug("re: logical %llu size %u list empty %d scheduled %d",
889 "re: logical %llu size %u list empty %d scheduled %d",
890 re->logical, fs_info->tree_root->nodesize, 888 re->logical, fs_info->tree_root->nodesize,
891 list_empty(&re->extctl), re->scheduled); 889 list_empty(&re->extctl), re->scheduled);
892 for (i = 0; i < re->nzones; ++i) { 890 for (i = 0; i < re->nzones; ++i) {
893 printk(KERN_CONT " zone %llu-%llu devs", 891 pr_cont(" zone %llu-%llu devs",
894 re->zones[i]->start, 892 re->zones[i]->start,
895 re->zones[i]->end); 893 re->zones[i]->end);
896 for (j = 0; j < re->zones[i]->ndevs; ++j) { 894 for (j = 0; j < re->zones[i]->ndevs; ++j) {
897 printk(KERN_CONT " %lld", 895 pr_cont(" %lld",
898 re->zones[i]->devs[j]->devid); 896 re->zones[i]->devs[j]->devid);
899 } 897 }
900 } 898 }
901 printk(KERN_CONT "\n"); 899 pr_cont("\n");
902 index = (re->logical >> PAGE_SHIFT) + 1; 900 index = (re->logical >> PAGE_SHIFT) + 1;
903 } 901 }
904 spin_unlock(&fs_info->reada_lock); 902 spin_unlock(&fs_info->reada_lock);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c0c13dc6fe12..0ec8ffa37ab0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -337,8 +337,9 @@ static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
337 rb_node); 337 rb_node);
338 if (bnode->root) 338 if (bnode->root)
339 fs_info = bnode->root->fs_info; 339 fs_info = bnode->root->fs_info;
340 btrfs_panic(fs_info, errno, "Inconsistency in backref cache " 340 btrfs_panic(fs_info, errno,
341 "found at offset %llu", bytenr); 341 "Inconsistency in backref cache found at offset %llu",
342 bytenr);
342} 343}
343 344
344/* 345/*
@@ -923,9 +924,16 @@ again:
923 path2->slots[level]--; 924 path2->slots[level]--;
924 925
925 eb = path2->nodes[level]; 926 eb = path2->nodes[level];
926 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != 927 if (btrfs_node_blockptr(eb, path2->slots[level]) !=
927 cur->bytenr); 928 cur->bytenr) {
928 929 btrfs_err(root->fs_info,
930 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
931 cur->bytenr, level - 1, root->objectid,
932 node_key->objectid, node_key->type,
933 node_key->offset);
934 err = -ENOENT;
935 goto out;
936 }
929 lower = cur; 937 lower = cur;
930 need_check = true; 938 need_check = true;
931 for (; level < BTRFS_MAX_LEVEL; level++) { 939 for (; level < BTRFS_MAX_LEVEL; level++) {
@@ -1296,9 +1304,9 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
1296 node->bytenr, &node->rb_node); 1304 node->bytenr, &node->rb_node);
1297 spin_unlock(&rc->reloc_root_tree.lock); 1305 spin_unlock(&rc->reloc_root_tree.lock);
1298 if (rb_node) { 1306 if (rb_node) {
1299 btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found " 1307 btrfs_panic(root->fs_info, -EEXIST,
1300 "for start=%llu while inserting into relocation " 1308 "Duplicate root found for start=%llu while inserting into relocation tree",
1301 "tree", node->bytenr); 1309 node->bytenr);
1302 kfree(node); 1310 kfree(node);
1303 return -EEXIST; 1311 return -EEXIST;
1304 } 1312 }
@@ -2350,6 +2358,10 @@ void free_reloc_roots(struct list_head *list)
2350 while (!list_empty(list)) { 2358 while (!list_empty(list)) {
2351 reloc_root = list_entry(list->next, struct btrfs_root, 2359 reloc_root = list_entry(list->next, struct btrfs_root,
2352 root_list); 2360 root_list);
2361 free_extent_buffer(reloc_root->node);
2362 free_extent_buffer(reloc_root->commit_root);
2363 reloc_root->node = NULL;
2364 reloc_root->commit_root = NULL;
2353 __del_reloc_root(reloc_root); 2365 __del_reloc_root(reloc_root);
2354 } 2366 }
2355} 2367}
@@ -2686,11 +2698,15 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2686 2698
2687 if (!upper->eb) { 2699 if (!upper->eb) {
2688 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2700 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2689 if (ret < 0) { 2701 if (ret) {
2690 err = ret; 2702 if (ret < 0)
2703 err = ret;
2704 else
2705 err = -ENOENT;
2706
2707 btrfs_release_path(path);
2691 break; 2708 break;
2692 } 2709 }
2693 BUG_ON(ret > 0);
2694 2710
2695 if (!upper->eb) { 2711 if (!upper->eb) {
2696 upper->eb = path->nodes[upper->level]; 2712 upper->eb = path->nodes[upper->level];
@@ -3203,7 +3219,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3203 nr++; 3219 nr++;
3204 } 3220 }
3205 3221
3206 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); 3222 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
3207 set_page_dirty(page); 3223 set_page_dirty(page);
3208 3224
3209 unlock_extent(&BTRFS_I(inode)->io_tree, 3225 unlock_extent(&BTRFS_I(inode)->io_tree,
@@ -3952,7 +3968,7 @@ static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
3952 struct btrfs_key key; 3968 struct btrfs_key key;
3953 int ret = 0; 3969 int ret = 0;
3954 3970
3955 if (!fs_info->quota_enabled) 3971 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
3956 return 0; 3972 return 0;
3957 3973
3958 /* 3974 /*
@@ -4365,8 +4381,9 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
4365 goto out; 4381 goto out;
4366 } 4382 }
4367 4383
4368 btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu", 4384 btrfs_info(extent_root->fs_info,
4369 rc->block_group->key.objectid, rc->block_group->flags); 4385 "relocating block group %llu flags %llu",
4386 rc->block_group->key.objectid, rc->block_group->flags);
4370 4387
4371 btrfs_wait_block_group_reservations(rc->block_group); 4388 btrfs_wait_block_group_reservations(rc->block_group);
4372 btrfs_wait_nocow_writers(rc->block_group); 4389 btrfs_wait_nocow_writers(rc->block_group);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 091296062456..edae751e870c 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -46,12 +46,7 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
46 != btrfs_root_generation_v2(item)) { 46 != btrfs_root_generation_v2(item)) {
47 if (btrfs_root_generation_v2(item) != 0) { 47 if (btrfs_root_generation_v2(item) != 0) {
48 btrfs_warn(eb->fs_info, 48 btrfs_warn(eb->fs_info,
49 "mismatching " 49 "mismatching generation and generation_v2 found in root item. This root was probably mounted with an older kernel. Resetting all new fields.");
50 "generation and generation_v2 "
51 "found in root item. This root "
52 "was probably mounted with an "
53 "older kernel. Resetting all "
54 "new fields.");
55 } 50 }
56 need_reset = 1; 51 need_reset = 1;
57 } 52 }
@@ -156,8 +151,9 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
156 151
157 if (ret != 0) { 152 if (ret != 0) {
158 btrfs_print_leaf(root, path->nodes[0]); 153 btrfs_print_leaf(root, path->nodes[0]);
159 btrfs_crit(root->fs_info, "unable to update root key %llu %u %llu", 154 btrfs_crit(root->fs_info,
160 key->objectid, key->type, key->offset); 155 "unable to update root key %llu %u %llu",
156 key->objectid, key->type, key->offset);
161 BUG_ON(1); 157 BUG_ON(1);
162 } 158 }
163 159
@@ -302,8 +298,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
302 if (IS_ERR(trans)) { 298 if (IS_ERR(trans)) {
303 err = PTR_ERR(trans); 299 err = PTR_ERR(trans);
304 btrfs_handle_fs_error(tree_root->fs_info, err, 300 btrfs_handle_fs_error(tree_root->fs_info, err,
305 "Failed to start trans to delete " 301 "Failed to start trans to delete orphan item");
306 "orphan item");
307 break; 302 break;
308 } 303 }
309 err = btrfs_del_orphan_item(trans, tree_root, 304 err = btrfs_del_orphan_item(trans, tree_root,
@@ -311,8 +306,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
311 btrfs_end_transaction(trans, tree_root); 306 btrfs_end_transaction(trans, tree_root);
312 if (err) { 307 if (err) {
313 btrfs_handle_fs_error(tree_root->fs_info, err, 308 btrfs_handle_fs_error(tree_root->fs_info, err,
314 "Failed to delete root orphan " 309 "Failed to delete root orphan item");
315 "item");
316 break; 310 break;
317 } 311 }
318 continue; 312 continue;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 1d195d2b32c6..fffb9ab8526e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -575,23 +575,25 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
575 * hold all of the paths here 575 * hold all of the paths here
576 */ 576 */
577 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 577 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
578 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev " 578 btrfs_warn_in_rcu(fs_info,
579 "%s, sector %llu, root %llu, inode %llu, offset %llu, " 579 "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
580 "length %llu, links %u (path: %s)", swarn->errstr, 580 swarn->errstr, swarn->logical,
581 swarn->logical, rcu_str_deref(swarn->dev->name), 581 rcu_str_deref(swarn->dev->name),
582 (unsigned long long)swarn->sector, root, inum, offset, 582 (unsigned long long)swarn->sector,
583 min(isize - offset, (u64)PAGE_SIZE), nlink, 583 root, inum, offset,
584 (char *)(unsigned long)ipath->fspath->val[i]); 584 min(isize - offset, (u64)PAGE_SIZE), nlink,
585 (char *)(unsigned long)ipath->fspath->val[i]);
585 586
586 free_ipath(ipath); 587 free_ipath(ipath);
587 return 0; 588 return 0;
588 589
589err: 590err:
590 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev " 591 btrfs_warn_in_rcu(fs_info,
591 "%s, sector %llu, root %llu, inode %llu, offset %llu: path " 592 "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
592 "resolving failed with ret=%d", swarn->errstr, 593 swarn->errstr, swarn->logical,
593 swarn->logical, rcu_str_deref(swarn->dev->name), 594 rcu_str_deref(swarn->dev->name),
594 (unsigned long long)swarn->sector, root, inum, offset, ret); 595 (unsigned long long)swarn->sector,
596 root, inum, offset, ret);
595 597
596 free_ipath(ipath); 598 free_ipath(ipath);
597 return 0; 599 return 0;
@@ -645,9 +647,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
645 item_size, &ref_root, 647 item_size, &ref_root,
646 &ref_level); 648 &ref_level);
647 btrfs_warn_in_rcu(fs_info, 649 btrfs_warn_in_rcu(fs_info,
648 "%s at logical %llu on dev %s, " 650 "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
649 "sector %llu: metadata %s (level %d) in tree " 651 errstr, swarn.logical,
650 "%llu", errstr, swarn.logical,
651 rcu_str_deref(dev->name), 652 rcu_str_deref(dev->name),
652 (unsigned long long)swarn.sector, 653 (unsigned long long)swarn.sector,
653 ref_level ? "node" : "leaf", 654 ref_level ? "node" : "leaf",
@@ -1574,8 +1575,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1574 1575
1575 if (!page_bad->dev->bdev) { 1576 if (!page_bad->dev->bdev) {
1576 btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info, 1577 btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
1577 "scrub_repair_page_from_good_copy(bdev == NULL) " 1578 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1578 "is unexpected");
1579 return -EIO; 1579 return -EIO;
1580 } 1580 }
1581 1581
@@ -2961,7 +2961,8 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2961 (key.objectid < logic_start || 2961 (key.objectid < logic_start ||
2962 key.objectid + bytes > 2962 key.objectid + bytes >
2963 logic_start + map->stripe_len)) { 2963 logic_start + map->stripe_len)) {
2964 btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu", 2964 btrfs_err(fs_info,
2965 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2965 key.objectid, logic_start); 2966 key.objectid, logic_start);
2966 spin_lock(&sctx->stat_lock); 2967 spin_lock(&sctx->stat_lock);
2967 sctx->stat.uncorrectable_errors++; 2968 sctx->stat.uncorrectable_errors++;
@@ -3312,8 +3313,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3312 key.objectid + bytes > 3313 key.objectid + bytes >
3313 logical + map->stripe_len)) { 3314 logical + map->stripe_len)) {
3314 btrfs_err(fs_info, 3315 btrfs_err(fs_info,
3315 "scrub: tree block %llu spanning " 3316 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3316 "stripes, ignored. logical=%llu",
3317 key.objectid, logical); 3317 key.objectid, logical);
3318 spin_lock(&sctx->stat_lock); 3318 spin_lock(&sctx->stat_lock);
3319 sctx->stat.uncorrectable_errors++; 3319 sctx->stat.uncorrectable_errors++;
@@ -3640,7 +3640,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3640 */ 3640 */
3641 ro_set = 0; 3641 ro_set = 0;
3642 } else { 3642 } else {
3643 btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n", 3643 btrfs_warn(fs_info,
3644 "failed setting block group ro, ret=%d\n",
3644 ret); 3645 ret);
3645 btrfs_put_block_group(cache); 3646 btrfs_put_block_group(cache);
3646 break; 3647 break;
@@ -3861,8 +3862,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3861 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) { 3862 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
3862 /* not supported for data w/o checksums */ 3863 /* not supported for data w/o checksums */
3863 btrfs_err_rl(fs_info, 3864 btrfs_err_rl(fs_info,
3864 "scrub: size assumption sectorsize != PAGE_SIZE " 3865 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3865 "(%d != %lu) fails",
3866 fs_info->chunk_root->sectorsize, PAGE_SIZE); 3866 fs_info->chunk_root->sectorsize, PAGE_SIZE);
3867 return -EINVAL; 3867 return -EINVAL;
3868 } 3868 }
@@ -3875,8 +3875,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3875 * would exhaust the array bounds of pagev member in 3875 * would exhaust the array bounds of pagev member in
3876 * struct scrub_block 3876 * struct scrub_block
3877 */ 3877 */
3878 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize " 3878 btrfs_err(fs_info,
3879 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", 3879 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3880 fs_info->chunk_root->nodesize, 3880 fs_info->chunk_root->nodesize,
3881 SCRUB_MAX_PAGES_PER_BLOCK, 3881 SCRUB_MAX_PAGES_PER_BLOCK,
3882 fs_info->chunk_root->sectorsize, 3882 fs_info->chunk_root->sectorsize,
@@ -4202,10 +4202,10 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
4202 ret = iterate_inodes_from_logical(logical, fs_info, path, 4202 ret = iterate_inodes_from_logical(logical, fs_info, path,
4203 record_inode_for_nocow, nocow_ctx); 4203 record_inode_for_nocow, nocow_ctx);
4204 if (ret != 0 && ret != -ENOENT) { 4204 if (ret != 0 && ret != -ENOENT) {
4205 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, " 4205 btrfs_warn(fs_info,
4206 "phys %llu, len %llu, mir %u, ret %d", 4206 "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4207 logical, physical_for_dev_replace, len, mirror_num, 4207 logical, physical_for_dev_replace, len, mirror_num,
4208 ret); 4208 ret);
4209 not_written = 1; 4209 not_written = 1;
4210 goto out; 4210 goto out;
4211 } 4211 }
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 1379e59277e2..01bc36cec26e 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -36,10 +36,6 @@
36#include "transaction.h" 36#include "transaction.h"
37#include "compression.h" 37#include "compression.h"
38 38
39static int g_verbose = 0;
40
41#define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
42
43/* 39/*
44 * A fs_path is a helper to dynamically build path names with unknown size. 40 * A fs_path is a helper to dynamically build path names with unknown size.
45 * It reallocates the internal buffer on demand. 41 * It reallocates the internal buffer on demand.
@@ -727,9 +723,10 @@ static int send_cmd(struct send_ctx *sctx)
727static int send_rename(struct send_ctx *sctx, 723static int send_rename(struct send_ctx *sctx,
728 struct fs_path *from, struct fs_path *to) 724 struct fs_path *from, struct fs_path *to)
729{ 725{
726 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
730 int ret; 727 int ret;
731 728
732verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start); 729 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
733 730
734 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); 731 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
735 if (ret < 0) 732 if (ret < 0)
@@ -751,9 +748,10 @@ out:
751static int send_link(struct send_ctx *sctx, 748static int send_link(struct send_ctx *sctx,
752 struct fs_path *path, struct fs_path *lnk) 749 struct fs_path *path, struct fs_path *lnk)
753{ 750{
751 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
754 int ret; 752 int ret;
755 753
756verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start); 754 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
757 755
758 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); 756 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
759 if (ret < 0) 757 if (ret < 0)
@@ -774,9 +772,10 @@ out:
774 */ 772 */
775static int send_unlink(struct send_ctx *sctx, struct fs_path *path) 773static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
776{ 774{
775 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
777 int ret; 776 int ret;
778 777
779verbose_printk("btrfs: send_unlink %s\n", path->start); 778 btrfs_debug(fs_info, "send_unlink %s", path->start);
780 779
781 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); 780 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
782 if (ret < 0) 781 if (ret < 0)
@@ -796,9 +795,10 @@ out:
796 */ 795 */
797static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) 796static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
798{ 797{
798 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
799 int ret; 799 int ret;
800 800
801verbose_printk("btrfs: send_rmdir %s\n", path->start); 801 btrfs_debug(fs_info, "send_rmdir %s", path->start);
802 802
803 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); 803 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
804 if (ret < 0) 804 if (ret < 0)
@@ -1313,6 +1313,7 @@ static int find_extent_clone(struct send_ctx *sctx,
1313 u64 ino_size, 1313 u64 ino_size,
1314 struct clone_root **found) 1314 struct clone_root **found)
1315{ 1315{
1316 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1316 int ret; 1317 int ret;
1317 int extent_type; 1318 int extent_type;
1318 u64 logical; 1319 u64 logical;
@@ -1371,10 +1372,10 @@ static int find_extent_clone(struct send_ctx *sctx,
1371 } 1372 }
1372 logical = disk_byte + btrfs_file_extent_offset(eb, fi); 1373 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1373 1374
1374 down_read(&sctx->send_root->fs_info->commit_root_sem); 1375 down_read(&fs_info->commit_root_sem);
1375 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, 1376 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1376 &found_key, &flags); 1377 &found_key, &flags);
1377 up_read(&sctx->send_root->fs_info->commit_root_sem); 1378 up_read(&fs_info->commit_root_sem);
1378 btrfs_release_path(tmp_path); 1379 btrfs_release_path(tmp_path);
1379 1380
1380 if (ret < 0) 1381 if (ret < 0)
@@ -1429,7 +1430,7 @@ static int find_extent_clone(struct send_ctx *sctx,
1429 extent_item_pos = logical - found_key.objectid; 1430 extent_item_pos = logical - found_key.objectid;
1430 else 1431 else
1431 extent_item_pos = 0; 1432 extent_item_pos = 0;
1432 ret = iterate_extent_inodes(sctx->send_root->fs_info, 1433 ret = iterate_extent_inodes(fs_info,
1433 found_key.objectid, extent_item_pos, 1, 1434 found_key.objectid, extent_item_pos, 1,
1434 __iterate_backrefs, backref_ctx); 1435 __iterate_backrefs, backref_ctx);
1435 1436
@@ -1439,20 +1440,18 @@ static int find_extent_clone(struct send_ctx *sctx,
1439 if (!backref_ctx->found_itself) { 1440 if (!backref_ctx->found_itself) {
1440 /* found a bug in backref code? */ 1441 /* found a bug in backref code? */
1441 ret = -EIO; 1442 ret = -EIO;
1442 btrfs_err(sctx->send_root->fs_info, "did not find backref in " 1443 btrfs_err(fs_info,
1443 "send_root. inode=%llu, offset=%llu, " 1444 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1444 "disk_byte=%llu found extent=%llu", 1445 ino, data_offset, disk_byte, found_key.objectid);
1445 ino, data_offset, disk_byte, found_key.objectid);
1446 goto out; 1446 goto out;
1447 } 1447 }
1448 1448
1449verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " 1449 btrfs_debug(fs_info,
1450 "ino=%llu, " 1450 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1451 "num_bytes=%llu, logical=%llu\n", 1451 data_offset, ino, num_bytes, logical);
1452 data_offset, ino, num_bytes, logical);
1453 1452
1454 if (!backref_ctx->found) 1453 if (!backref_ctx->found)
1455 verbose_printk("btrfs: no clones found\n"); 1454 btrfs_debug(fs_info, "no clones found");
1456 1455
1457 cur_clone_root = NULL; 1456 cur_clone_root = NULL;
1458 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1457 for (i = 0; i < sctx->clone_roots_cnt; i++) {
@@ -2423,10 +2422,11 @@ out:
2423 2422
2424static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) 2423static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2425{ 2424{
2425 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2426 int ret = 0; 2426 int ret = 0;
2427 struct fs_path *p; 2427 struct fs_path *p;
2428 2428
2429verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size); 2429 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2430 2430
2431 p = fs_path_alloc(); 2431 p = fs_path_alloc();
2432 if (!p) 2432 if (!p)
@@ -2452,10 +2452,11 @@ out:
2452 2452
2453static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) 2453static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2454{ 2454{
2455 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2455 int ret = 0; 2456 int ret = 0;
2456 struct fs_path *p; 2457 struct fs_path *p;
2457 2458
2458verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode); 2459 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2459 2460
2460 p = fs_path_alloc(); 2461 p = fs_path_alloc();
2461 if (!p) 2462 if (!p)
@@ -2481,10 +2482,12 @@ out:
2481 2482
2482static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) 2483static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2483{ 2484{
2485 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2484 int ret = 0; 2486 int ret = 0;
2485 struct fs_path *p; 2487 struct fs_path *p;
2486 2488
2487verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid); 2489 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2490 ino, uid, gid);
2488 2491
2489 p = fs_path_alloc(); 2492 p = fs_path_alloc();
2490 if (!p) 2493 if (!p)
@@ -2511,6 +2514,7 @@ out:
2511 2514
2512static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) 2515static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2513{ 2516{
2517 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2514 int ret = 0; 2518 int ret = 0;
2515 struct fs_path *p = NULL; 2519 struct fs_path *p = NULL;
2516 struct btrfs_inode_item *ii; 2520 struct btrfs_inode_item *ii;
@@ -2519,7 +2523,7 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2519 struct btrfs_key key; 2523 struct btrfs_key key;
2520 int slot; 2524 int slot;
2521 2525
2522verbose_printk("btrfs: send_utimes %llu\n", ino); 2526 btrfs_debug(fs_info, "send_utimes %llu", ino);
2523 2527
2524 p = fs_path_alloc(); 2528 p = fs_path_alloc();
2525 if (!p) 2529 if (!p)
@@ -2573,6 +2577,7 @@ out:
2573 */ 2577 */
2574static int send_create_inode(struct send_ctx *sctx, u64 ino) 2578static int send_create_inode(struct send_ctx *sctx, u64 ino)
2575{ 2579{
2580 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2576 int ret = 0; 2581 int ret = 0;
2577 struct fs_path *p; 2582 struct fs_path *p;
2578 int cmd; 2583 int cmd;
@@ -2580,7 +2585,7 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino)
2580 u64 mode; 2585 u64 mode;
2581 u64 rdev; 2586 u64 rdev;
2582 2587
2583verbose_printk("btrfs: send_create_inode %llu\n", ino); 2588 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2584 2589
2585 p = fs_path_alloc(); 2590 p = fs_path_alloc();
2586 if (!p) 2591 if (!p)
@@ -3638,6 +3643,7 @@ out:
3638 */ 3643 */
3639static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) 3644static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3640{ 3645{
3646 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3641 int ret = 0; 3647 int ret = 0;
3642 struct recorded_ref *cur; 3648 struct recorded_ref *cur;
3643 struct recorded_ref *cur2; 3649 struct recorded_ref *cur2;
@@ -3650,7 +3656,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3650 u64 last_dir_ino_rm = 0; 3656 u64 last_dir_ino_rm = 0;
3651 bool can_rename = true; 3657 bool can_rename = true;
3652 3658
3653verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); 3659 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3654 3660
3655 /* 3661 /*
3656 * This should never happen as the root dir always has the same ref 3662 * This should never happen as the root dir always has the same ref
@@ -4398,12 +4404,8 @@ static int process_new_xattr(struct send_ctx *sctx)
4398 4404
4399static int process_deleted_xattr(struct send_ctx *sctx) 4405static int process_deleted_xattr(struct send_ctx *sctx)
4400{ 4406{
4401 int ret; 4407 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4402 4408 sctx->cmp_key, __process_deleted_xattr, sctx);
4403 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4404 sctx->cmp_key, __process_deleted_xattr, sctx);
4405
4406 return ret;
4407} 4409}
4408 4410
4409struct find_xattr_ctx { 4411struct find_xattr_ctx {
@@ -4664,6 +4666,7 @@ out:
4664 */ 4666 */
4665static int send_write(struct send_ctx *sctx, u64 offset, u32 len) 4667static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4666{ 4668{
4669 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4667 int ret = 0; 4670 int ret = 0;
4668 struct fs_path *p; 4671 struct fs_path *p;
4669 ssize_t num_read = 0; 4672 ssize_t num_read = 0;
@@ -4672,7 +4675,7 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4672 if (!p) 4675 if (!p)
4673 return -ENOMEM; 4676 return -ENOMEM;
4674 4677
4675verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len); 4678 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
4676 4679
4677 num_read = fill_read_buf(sctx, offset, len); 4680 num_read = fill_read_buf(sctx, offset, len);
4678 if (num_read <= 0) { 4681 if (num_read <= 0) {
@@ -4714,10 +4717,10 @@ static int send_clone(struct send_ctx *sctx,
4714 struct fs_path *p; 4717 struct fs_path *p;
4715 u64 gen; 4718 u64 gen;
4716 4719
4717verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, " 4720 btrfs_debug(sctx->send_root->fs_info,
4718 "clone_inode=%llu, clone_offset=%llu\n", offset, len, 4721 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4719 clone_root->root->objectid, clone_root->ino, 4722 offset, len, clone_root->root->objectid, clone_root->ino,
4720 clone_root->offset); 4723 clone_root->offset);
4721 4724
4722 p = fs_path_alloc(); 4725 p = fs_path_alloc();
4723 if (!p) 4726 if (!p)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4071fe2bd098..74ed5aae6cea 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -151,12 +151,11 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
151 vaf.fmt = fmt; 151 vaf.fmt = fmt;
152 vaf.va = &args; 152 vaf.va = &args;
153 153
154 printk(KERN_CRIT 154 pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
155 "BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
156 sb->s_id, function, line, errno, errstr, &vaf); 155 sb->s_id, function, line, errno, errstr, &vaf);
157 va_end(args); 156 va_end(args);
158 } else { 157 } else {
159 printk(KERN_CRIT "BTRFS: error (device %s) in %s:%d: errno=%d %s\n", 158 pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
160 sb->s_id, function, line, errno, errstr); 159 sb->s_id, function, line, errno, errstr);
161 } 160 }
162#endif 161#endif
@@ -462,9 +461,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
462 case Opt_datasum: 461 case Opt_datasum:
463 if (btrfs_test_opt(info, NODATASUM)) { 462 if (btrfs_test_opt(info, NODATASUM)) {
464 if (btrfs_test_opt(info, NODATACOW)) 463 if (btrfs_test_opt(info, NODATACOW))
465 btrfs_info(root->fs_info, "setting datasum, datacow enabled"); 464 btrfs_info(root->fs_info,
465 "setting datasum, datacow enabled");
466 else 466 else
467 btrfs_info(root->fs_info, "setting datasum"); 467 btrfs_info(root->fs_info,
468 "setting datasum");
468 } 469 }
469 btrfs_clear_opt(info->mount_opt, NODATACOW); 470 btrfs_clear_opt(info->mount_opt, NODATACOW);
470 btrfs_clear_opt(info->mount_opt, NODATASUM); 471 btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -476,7 +477,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
476 btrfs_info(root->fs_info, 477 btrfs_info(root->fs_info,
477 "setting nodatacow, compression disabled"); 478 "setting nodatacow, compression disabled");
478 } else { 479 } else {
479 btrfs_info(root->fs_info, "setting nodatacow"); 480 btrfs_info(root->fs_info,
481 "setting nodatacow");
480 } 482 }
481 } 483 }
482 btrfs_clear_opt(info->mount_opt, COMPRESS); 484 btrfs_clear_opt(info->mount_opt, COMPRESS);
@@ -608,8 +610,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
608 info->alloc_start = memparse(num, NULL); 610 info->alloc_start = memparse(num, NULL);
609 mutex_unlock(&info->chunk_mutex); 611 mutex_unlock(&info->chunk_mutex);
610 kfree(num); 612 kfree(num);
611 btrfs_info(root->fs_info, "allocations start at %llu", 613 btrfs_info(root->fs_info,
612 info->alloc_start); 614 "allocations start at %llu",
615 info->alloc_start);
613 } else { 616 } else {
614 ret = -ENOMEM; 617 ret = -ENOMEM;
615 goto out; 618 goto out;
@@ -762,8 +765,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
762 goto out; 765 goto out;
763 } else if (intarg >= 0) { 766 } else if (intarg >= 0) {
764 info->check_integrity_print_mask = intarg; 767 info->check_integrity_print_mask = intarg;
765 btrfs_info(root->fs_info, "check_integrity_print_mask 0x%x", 768 btrfs_info(root->fs_info,
766 info->check_integrity_print_mask); 769 "check_integrity_print_mask 0x%x",
770 info->check_integrity_print_mask);
767 } else { 771 } else {
768 ret = -EINVAL; 772 ret = -EINVAL;
769 goto out; 773 goto out;
@@ -794,19 +798,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
794 intarg = 0; 798 intarg = 0;
795 ret = match_int(&args[0], &intarg); 799 ret = match_int(&args[0], &intarg);
796 if (ret < 0) { 800 if (ret < 0) {
797 btrfs_err(root->fs_info, "invalid commit interval"); 801 btrfs_err(root->fs_info,
802 "invalid commit interval");
798 ret = -EINVAL; 803 ret = -EINVAL;
799 goto out; 804 goto out;
800 } 805 }
801 if (intarg > 0) { 806 if (intarg > 0) {
802 if (intarg > 300) { 807 if (intarg > 300) {
803 btrfs_warn(root->fs_info, "excessive commit interval %d", 808 btrfs_warn(root->fs_info,
804 intarg); 809 "excessive commit interval %d",
810 intarg);
805 } 811 }
806 info->commit_interval = intarg; 812 info->commit_interval = intarg;
807 } else { 813 } else {
808 btrfs_info(root->fs_info, "using default commit interval %ds", 814 btrfs_info(root->fs_info,
809 BTRFS_DEFAULT_COMMIT_INTERVAL); 815 "using default commit interval %ds",
816 BTRFS_DEFAULT_COMMIT_INTERVAL);
810 info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 817 info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
811 } 818 }
812 break; 819 break;
@@ -827,7 +834,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
827 break; 834 break;
828#endif 835#endif
829 case Opt_err: 836 case Opt_err:
830 btrfs_info(root->fs_info, "unrecognized mount option '%s'", p); 837 btrfs_info(root->fs_info,
838 "unrecognized mount option '%s'", p);
831 ret = -EINVAL; 839 ret = -EINVAL;
832 goto out; 840 goto out;
833 default: 841 default:
@@ -916,9 +924,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
916 } 924 }
917 break; 925 break;
918 case Opt_subvolrootid: 926 case Opt_subvolrootid:
919 printk(KERN_WARNING 927 pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
920 "BTRFS: 'subvolrootid' mount option is deprecated and has "
921 "no effect\n");
922 break; 928 break;
923 case Opt_device: 929 case Opt_device:
924 device_name = match_strdup(&args[0]); 930 device_name = match_strdup(&args[0]);
@@ -1142,7 +1148,7 @@ static int btrfs_fill_super(struct super_block *sb,
1142 sb->s_iflags |= SB_I_CGROUPWB; 1148 sb->s_iflags |= SB_I_CGROUPWB;
1143 err = open_ctree(sb, fs_devices, (char *)data); 1149 err = open_ctree(sb, fs_devices, (char *)data);
1144 if (err) { 1150 if (err) {
1145 printk(KERN_ERR "BTRFS: open_ctree failed\n"); 1151 btrfs_err(fs_info, "open_ctree failed");
1146 return err; 1152 return err;
1147 } 1153 }
1148 1154
@@ -1440,12 +1446,13 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1440 1446
1441 if (!IS_ERR(root)) { 1447 if (!IS_ERR(root)) {
1442 struct super_block *s = root->d_sb; 1448 struct super_block *s = root->d_sb;
1449 struct btrfs_fs_info *fs_info = btrfs_sb(s);
1443 struct inode *root_inode = d_inode(root); 1450 struct inode *root_inode = d_inode(root);
1444 u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid; 1451 u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1445 1452
1446 ret = 0; 1453 ret = 0;
1447 if (!is_subvolume_inode(root_inode)) { 1454 if (!is_subvolume_inode(root_inode)) {
1448 pr_err("BTRFS: '%s' is not a valid subvolume\n", 1455 btrfs_err(fs_info, "'%s' is not a valid subvolume",
1449 subvol_name); 1456 subvol_name);
1450 ret = -EINVAL; 1457 ret = -EINVAL;
1451 } 1458 }
@@ -1455,8 +1462,9 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1455 * subvolume which was passed by ID is renamed and 1462 * subvolume which was passed by ID is renamed and
1456 * another subvolume is renamed over the old location. 1463 * another subvolume is renamed over the old location.
1457 */ 1464 */
1458 pr_err("BTRFS: subvol '%s' does not match subvolid %llu\n", 1465 btrfs_err(fs_info,
1459 subvol_name, subvol_objectid); 1466 "subvol '%s' does not match subvolid %llu",
1467 subvol_name, subvol_objectid);
1460 ret = -EINVAL; 1468 ret = -EINVAL;
1461 } 1469 }
1462 if (ret) { 1470 if (ret) {
@@ -1830,13 +1838,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1830 btrfs_info(fs_info, "creating UUID tree"); 1838 btrfs_info(fs_info, "creating UUID tree");
1831 ret = btrfs_create_uuid_tree(fs_info); 1839 ret = btrfs_create_uuid_tree(fs_info);
1832 if (ret) { 1840 if (ret) {
1833 btrfs_warn(fs_info, "failed to create the UUID tree %d", ret); 1841 btrfs_warn(fs_info,
1842 "failed to create the UUID tree %d",
1843 ret);
1834 goto restore; 1844 goto restore;
1835 } 1845 }
1836 } 1846 }
1837 sb->s_flags &= ~MS_RDONLY; 1847 sb->s_flags &= ~MS_RDONLY;
1838 1848
1839 fs_info->open = 1; 1849 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
1840 } 1850 }
1841out: 1851out:
1842 wake_up_process(fs_info->transaction_kthread); 1852 wake_up_process(fs_info->transaction_kthread);
@@ -2346,7 +2356,7 @@ static void btrfs_interface_exit(void)
2346 2356
2347static void btrfs_print_mod_info(void) 2357static void btrfs_print_mod_info(void)
2348{ 2358{
2349 printk(KERN_INFO "Btrfs loaded, crc32c=%s" 2359 pr_info("Btrfs loaded, crc32c=%s"
2350#ifdef CONFIG_BTRFS_DEBUG 2360#ifdef CONFIG_BTRFS_DEBUG
2351 ", debug=on" 2361 ", debug=on"
2352#endif 2362#endif
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c6569905d3d1..1f157fba8940 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -77,7 +77,7 @@ static int can_modify_feature(struct btrfs_feature_attr *fa)
77 clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR; 77 clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR;
78 break; 78 break;
79 default: 79 default:
80 printk(KERN_WARNING "btrfs: sysfs: unknown feature set %d\n", 80 pr_warn("btrfs: sysfs: unknown feature set %d\n",
81 fa->feature_set); 81 fa->feature_set);
82 return 0; 82 return 0;
83 } 83 }
@@ -430,7 +430,8 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
430{ 430{
431 struct btrfs_fs_info *fs_info = to_fs_info(kobj); 431 struct btrfs_fs_info *fs_info = to_fs_info(kobj);
432 432
433 return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->sectorsize); 433 return snprintf(buf, PAGE_SIZE, "%u\n",
434 fs_info->super_copy->sectorsize);
434} 435}
435 436
436BTRFS_ATTR(sectorsize, btrfs_sectorsize_show); 437BTRFS_ATTR(sectorsize, btrfs_sectorsize_show);
@@ -440,7 +441,8 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
440{ 441{
441 struct btrfs_fs_info *fs_info = to_fs_info(kobj); 442 struct btrfs_fs_info *fs_info = to_fs_info(kobj);
442 443
443 return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->sectorsize); 444 return snprintf(buf, PAGE_SIZE, "%u\n",
445 fs_info->super_copy->sectorsize);
444} 446}
445 447
446BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show); 448BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
@@ -836,9 +838,18 @@ static int btrfs_init_debugfs(void)
836 if (!btrfs_debugfs_root_dentry) 838 if (!btrfs_debugfs_root_dentry)
837 return -ENOMEM; 839 return -ENOMEM;
838 840
839 debugfs_create_u64("test", S_IRUGO | S_IWUGO, btrfs_debugfs_root_dentry, 841 /*
842 * Example code, how to export data through debugfs.
843 *
844 * file: /sys/kernel/debug/btrfs/test
845 * contents of: btrfs_debugfs_test
846 */
847#ifdef CONFIG_BTRFS_DEBUG
848 debugfs_create_u64("test", S_IRUGO | S_IWUSR, btrfs_debugfs_root_dentry,
840 &btrfs_debugfs_test); 849 &btrfs_debugfs_test);
841#endif 850#endif
851
852#endif
842 return 0; 853 return 0;
843} 854}
844 855
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 9f72aeda9220..0bf46808ce8f 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -968,7 +968,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
968 /* [BTRFS_MAX_EXTENT_SIZE] */ 968 /* [BTRFS_MAX_EXTENT_SIZE] */
969 BTRFS_I(inode)->outstanding_extents++; 969 BTRFS_I(inode)->outstanding_extents++;
970 ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 970 ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
971 NULL); 971 NULL, 0);
972 if (ret) { 972 if (ret) {
973 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 973 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
974 goto out; 974 goto out;
@@ -984,7 +984,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
984 BTRFS_I(inode)->outstanding_extents++; 984 BTRFS_I(inode)->outstanding_extents++;
985 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, 985 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
986 BTRFS_MAX_EXTENT_SIZE + sectorsize - 1, 986 BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
987 NULL); 987 NULL, 0);
988 if (ret) { 988 if (ret) {
989 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 989 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
990 goto out; 990 goto out;
@@ -1019,7 +1019,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
1019 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, 1019 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
1020 (BTRFS_MAX_EXTENT_SIZE >> 1) 1020 (BTRFS_MAX_EXTENT_SIZE >> 1)
1021 + sectorsize - 1, 1021 + sectorsize - 1,
1022 NULL); 1022 NULL, 0);
1023 if (ret) { 1023 if (ret) {
1024 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1024 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1025 goto out; 1025 goto out;
@@ -1042,7 +1042,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
1042 ret = btrfs_set_extent_delalloc(inode, 1042 ret = btrfs_set_extent_delalloc(inode,
1043 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize, 1043 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
1044 (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1, 1044 (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
1045 NULL); 1045 NULL, 0);
1046 if (ret) { 1046 if (ret) {
1047 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1047 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1048 goto out; 1048 goto out;
@@ -1060,7 +1060,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
1060 BTRFS_I(inode)->outstanding_extents++; 1060 BTRFS_I(inode)->outstanding_extents++;
1061 ret = btrfs_set_extent_delalloc(inode, 1061 ret = btrfs_set_extent_delalloc(inode,
1062 BTRFS_MAX_EXTENT_SIZE + sectorsize, 1062 BTRFS_MAX_EXTENT_SIZE + sectorsize,
1063 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL); 1063 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
1064 if (ret) { 1064 if (ret) {
1065 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1065 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1066 goto out; 1066 goto out;
@@ -1097,7 +1097,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
1097 BTRFS_I(inode)->outstanding_extents++; 1097 BTRFS_I(inode)->outstanding_extents++;
1098 ret = btrfs_set_extent_delalloc(inode, 1098 ret = btrfs_set_extent_delalloc(inode,
1099 BTRFS_MAX_EXTENT_SIZE + sectorsize, 1099 BTRFS_MAX_EXTENT_SIZE + sectorsize,
1100 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL); 1100 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
1101 if (ret) { 1101 if (ret) {
1102 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1102 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1103 goto out; 1103 goto out;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 4407fef7c16c..ca7cb5e6d385 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -480,7 +480,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
480 */ 480 */
481 root->fs_info->tree_root = root; 481 root->fs_info->tree_root = root;
482 root->fs_info->quota_root = root; 482 root->fs_info->quota_root = root;
483 root->fs_info->quota_enabled = 1; 483 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
484 484
485 /* 485 /*
486 * Can't use bytenr 0, some things freak out 486 * Can't use bytenr 0, some things freak out
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c294313ea2c8..9517de0e668c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -65,8 +65,9 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
65 BUG_ON(!list_empty(&transaction->list)); 65 BUG_ON(!list_empty(&transaction->list));
66 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root)); 66 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
67 if (transaction->delayed_refs.pending_csums) 67 if (transaction->delayed_refs.pending_csums)
68 printk(KERN_ERR "pending csums is %llu\n", 68 btrfs_err(transaction->fs_info,
69 transaction->delayed_refs.pending_csums); 69 "pending csums is %llu",
70 transaction->delayed_refs.pending_csums);
70 while (!list_empty(&transaction->pending_chunks)) { 71 while (!list_empty(&transaction->pending_chunks)) {
71 struct extent_map *em; 72 struct extent_map *em;
72 73
@@ -245,6 +246,7 @@ loop:
245 return -EROFS; 246 return -EROFS;
246 } 247 }
247 248
249 cur_trans->fs_info = fs_info;
248 atomic_set(&cur_trans->num_writers, 1); 250 atomic_set(&cur_trans->num_writers, 1);
249 extwriter_counter_init(cur_trans, type); 251 extwriter_counter_init(cur_trans, type);
250 init_waitqueue_head(&cur_trans->writer_wait); 252 init_waitqueue_head(&cur_trans->writer_wait);
@@ -272,11 +274,9 @@ loop:
272 */ 274 */
273 smp_mb(); 275 smp_mb();
274 if (!list_empty(&fs_info->tree_mod_seq_list)) 276 if (!list_empty(&fs_info->tree_mod_seq_list))
275 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when " 277 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
276 "creating a fresh transaction\n");
277 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 278 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
278 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when " 279 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
279 "creating a fresh transaction\n");
280 atomic64_set(&fs_info->tree_mod_seq, 0); 280 atomic64_set(&fs_info->tree_mod_seq, 0);
281 281
282 spin_lock_init(&cur_trans->delayed_refs.lock); 282 spin_lock_init(&cur_trans->delayed_refs.lock);
@@ -441,7 +441,7 @@ static void wait_current_trans(struct btrfs_root *root)
441 441
442static int may_wait_transaction(struct btrfs_root *root, int type) 442static int may_wait_transaction(struct btrfs_root *root, int type)
443{ 443{
444 if (root->fs_info->log_root_recovering) 444 if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
445 return 0; 445 return 0;
446 446
447 if (type == TRANS_USERSPACE) 447 if (type == TRANS_USERSPACE)
@@ -549,11 +549,8 @@ again:
549 } 549 }
550 } while (ret == -EBUSY); 550 } while (ret == -EBUSY);
551 551
552 if (ret < 0) { 552 if (ret < 0)
553 /* We must get the transaction if we are JOIN_NOLOCK. */
554 BUG_ON(type == TRANS_JOIN_NOLOCK);
555 goto join_fail; 553 goto join_fail;
556 }
557 554
558 cur_trans = root->fs_info->running_transaction; 555 cur_trans = root->fs_info->running_transaction;
559 556
@@ -993,7 +990,6 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
993 struct extent_state *cached_state = NULL; 990 struct extent_state *cached_state = NULL;
994 u64 start = 0; 991 u64 start = 0;
995 u64 end; 992 u64 end;
996 struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
997 bool errors = false; 993 bool errors = false;
998 994
999 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 995 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
@@ -1025,17 +1021,17 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
1025 1021
1026 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 1022 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1027 if ((mark & EXTENT_DIRTY) && 1023 if ((mark & EXTENT_DIRTY) &&
1028 test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, 1024 test_and_clear_bit(BTRFS_FS_LOG1_ERR,
1029 &btree_ino->runtime_flags)) 1025 &root->fs_info->flags))
1030 errors = true; 1026 errors = true;
1031 1027
1032 if ((mark & EXTENT_NEW) && 1028 if ((mark & EXTENT_NEW) &&
1033 test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, 1029 test_and_clear_bit(BTRFS_FS_LOG2_ERR,
1034 &btree_ino->runtime_flags)) 1030 &root->fs_info->flags))
1035 errors = true; 1031 errors = true;
1036 } else { 1032 } else {
1037 if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR, 1033 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR,
1038 &btree_ino->runtime_flags)) 1034 &root->fs_info->flags))
1039 errors = true; 1035 errors = true;
1040 } 1036 }
1041 1037
@@ -1300,11 +1296,11 @@ int btrfs_defrag_root(struct btrfs_root *root)
1300 btrfs_btree_balance_dirty(info->tree_root); 1296 btrfs_btree_balance_dirty(info->tree_root);
1301 cond_resched(); 1297 cond_resched();
1302 1298
1303 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 1299 if (btrfs_fs_closing(info) || ret != -EAGAIN)
1304 break; 1300 break;
1305 1301
1306 if (btrfs_defrag_cancelled(root->fs_info)) { 1302 if (btrfs_defrag_cancelled(info)) {
1307 pr_debug("BTRFS: defrag_root cancelled\n"); 1303 btrfs_debug(info, "defrag_root cancelled");
1308 ret = -EAGAIN; 1304 ret = -EAGAIN;
1309 break; 1305 break;
1310 } 1306 }
@@ -1335,7 +1331,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1335 * kick in anyway. 1331 * kick in anyway.
1336 */ 1332 */
1337 mutex_lock(&fs_info->qgroup_ioctl_lock); 1333 mutex_lock(&fs_info->qgroup_ioctl_lock);
1338 if (!fs_info->quota_enabled) { 1334 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
1339 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1335 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1340 return 0; 1336 return 0;
1341 } 1337 }
@@ -1712,7 +1708,7 @@ static void update_super_roots(struct btrfs_root *root)
1712 super->root_level = root_item->level; 1708 super->root_level = root_item->level;
1713 if (btrfs_test_opt(root->fs_info, SPACE_CACHE)) 1709 if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
1714 super->cache_generation = root_item->generation; 1710 super->cache_generation = root_item->generation;
1715 if (root->fs_info->update_uuid_tree_gen) 1711 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &root->fs_info->flags))
1716 super->uuid_tree_generation = root_item->generation; 1712 super->uuid_tree_generation = root_item->generation;
1717} 1713}
1718 1714
@@ -1919,7 +1915,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1919{ 1915{
1920 struct btrfs_transaction *cur_trans = trans->transaction; 1916 struct btrfs_transaction *cur_trans = trans->transaction;
1921 struct btrfs_transaction *prev_trans = NULL; 1917 struct btrfs_transaction *prev_trans = NULL;
1922 struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
1923 int ret; 1918 int ret;
1924 1919
1925 /* Stop the commit early if ->aborted is set */ 1920 /* Stop the commit early if ->aborted is set */
@@ -2213,8 +2208,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2213 btrfs_update_commit_device_size(root->fs_info); 2208 btrfs_update_commit_device_size(root->fs_info);
2214 btrfs_update_commit_device_bytes_used(root, cur_trans); 2209 btrfs_update_commit_device_bytes_used(root, cur_trans);
2215 2210
2216 clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags); 2211 clear_bit(BTRFS_FS_LOG1_ERR, &root->fs_info->flags);
2217 clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags); 2212 clear_bit(BTRFS_FS_LOG2_ERR, &root->fs_info->flags);
2218 2213
2219 btrfs_trans_release_chunk_metadata(trans); 2214 btrfs_trans_release_chunk_metadata(trans);
2220 2215
@@ -2328,7 +2323,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2328 list_del_init(&root->root_list); 2323 list_del_init(&root->root_list);
2329 spin_unlock(&fs_info->trans_lock); 2324 spin_unlock(&fs_info->trans_lock);
2330 2325
2331 pr_debug("BTRFS: cleaner removing %llu\n", root->objectid); 2326 btrfs_debug(fs_info, "cleaner removing %llu", root->objectid);
2332 2327
2333 btrfs_kill_all_delayed_nodes(root); 2328 btrfs_kill_all_delayed_nodes(root);
2334 2329
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index efb122643380..6cf0d37d4f76 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -82,6 +82,7 @@ struct btrfs_transaction {
82 spinlock_t dropped_roots_lock; 82 spinlock_t dropped_roots_lock;
83 struct btrfs_delayed_ref_root delayed_refs; 83 struct btrfs_delayed_ref_root delayed_refs;
84 int aborted; 84 int aborted;
85 struct btrfs_fs_info *fs_info;
85}; 86};
86 87
87#define __TRANS_FREEZABLE (1U << 0) 88#define __TRANS_FREEZABLE (1U << 0)
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8a84ebd8e7cc..528cae123dc9 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -5579,7 +5579,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5579 if (!path) 5579 if (!path)
5580 return -ENOMEM; 5580 return -ENOMEM;
5581 5581
5582 fs_info->log_root_recovering = 1; 5582 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5583 5583
5584 trans = btrfs_start_transaction(fs_info->tree_root, 0); 5584 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5585 if (IS_ERR(trans)) { 5585 if (IS_ERR(trans)) {
@@ -5592,8 +5592,8 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5592 5592
5593 ret = walk_log_tree(trans, log_root_tree, &wc); 5593 ret = walk_log_tree(trans, log_root_tree, &wc);
5594 if (ret) { 5594 if (ret) {
5595 btrfs_handle_fs_error(fs_info, ret, "Failed to pin buffers while " 5595 btrfs_handle_fs_error(fs_info, ret,
5596 "recovering log root tree."); 5596 "Failed to pin buffers while recovering log root tree.");
5597 goto error; 5597 goto error;
5598 } 5598 }
5599 5599
@@ -5639,8 +5639,8 @@ again:
5639 free_extent_buffer(log->node); 5639 free_extent_buffer(log->node);
5640 free_extent_buffer(log->commit_root); 5640 free_extent_buffer(log->commit_root);
5641 kfree(log); 5641 kfree(log);
5642 btrfs_handle_fs_error(fs_info, ret, "Couldn't read target root " 5642 btrfs_handle_fs_error(fs_info, ret,
5643 "for tree log recovery."); 5643 "Couldn't read target root for tree log recovery.");
5644 goto error; 5644 goto error;
5645 } 5645 }
5646 5646
@@ -5689,7 +5689,7 @@ again:
5689 5689
5690 free_extent_buffer(log_root_tree->node); 5690 free_extent_buffer(log_root_tree->node);
5691 log_root_tree->log_root = NULL; 5691 log_root_tree->log_root = NULL;
5692 fs_info->log_root_recovering = 0; 5692 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5693 kfree(log_root_tree); 5693 kfree(log_root_tree);
5694 5694
5695 return 0; 5695 return 0;
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 778282944530..7fc89e4adb41 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -69,8 +69,9 @@ static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
69 ret = -ENOENT; 69 ret = -ENOENT;
70 70
71 if (!IS_ALIGNED(item_size, sizeof(u64))) { 71 if (!IS_ALIGNED(item_size, sizeof(u64))) {
72 btrfs_warn(uuid_root->fs_info, "uuid item with illegal size %lu!", 72 btrfs_warn(uuid_root->fs_info,
73 (unsigned long)item_size); 73 "uuid item with illegal size %lu!",
74 (unsigned long)item_size);
74 goto out; 75 goto out;
75 } 76 }
76 while (item_size) { 77 while (item_size) {
@@ -137,10 +138,10 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
137 offset = btrfs_item_ptr_offset(eb, slot); 138 offset = btrfs_item_ptr_offset(eb, slot);
138 offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le); 139 offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
139 } else if (ret < 0) { 140 } else if (ret < 0) {
140 btrfs_warn(uuid_root->fs_info, "insert uuid item failed %d " 141 btrfs_warn(uuid_root->fs_info,
141 "(0x%016llx, 0x%016llx) type %u!", 142 "insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
142 ret, (unsigned long long)key.objectid, 143 ret, (unsigned long long)key.objectid,
143 (unsigned long long)key.offset, type); 144 (unsigned long long)key.offset, type);
144 goto out; 145 goto out;
145 } 146 }
146 147
@@ -184,8 +185,8 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
184 185
185 ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1); 186 ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
186 if (ret < 0) { 187 if (ret < 0) {
187 btrfs_warn(uuid_root->fs_info, "error %d while searching for uuid item!", 188 btrfs_warn(uuid_root->fs_info,
188 ret); 189 "error %d while searching for uuid item!", ret);
189 goto out; 190 goto out;
190 } 191 }
191 if (ret > 0) { 192 if (ret > 0) {
@@ -198,8 +199,9 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
198 offset = btrfs_item_ptr_offset(eb, slot); 199 offset = btrfs_item_ptr_offset(eb, slot);
199 item_size = btrfs_item_size_nr(eb, slot); 200 item_size = btrfs_item_size_nr(eb, slot);
200 if (!IS_ALIGNED(item_size, sizeof(u64))) { 201 if (!IS_ALIGNED(item_size, sizeof(u64))) {
201 btrfs_warn(uuid_root->fs_info, "uuid item with illegal size %lu!", 202 btrfs_warn(uuid_root->fs_info,
202 (unsigned long)item_size); 203 "uuid item with illegal size %lu!",
204 (unsigned long)item_size);
203 ret = -ENOENT; 205 ret = -ENOENT;
204 goto out; 206 goto out;
205 } 207 }
@@ -299,8 +301,9 @@ again_search_slot:
299 offset = btrfs_item_ptr_offset(leaf, slot); 301 offset = btrfs_item_ptr_offset(leaf, slot);
300 item_size = btrfs_item_size_nr(leaf, slot); 302 item_size = btrfs_item_size_nr(leaf, slot);
301 if (!IS_ALIGNED(item_size, sizeof(u64))) { 303 if (!IS_ALIGNED(item_size, sizeof(u64))) {
302 btrfs_warn(fs_info, "uuid item with illegal size %lu!", 304 btrfs_warn(fs_info,
303 (unsigned long)item_size); 305 "uuid item with illegal size %lu!",
306 (unsigned long)item_size);
304 goto skip; 307 goto skip;
305 } 308 }
306 while (item_size) { 309 while (item_size) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 035efce603a9..71a60cc01451 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -859,7 +859,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
859 blkdev_put(device->bdev, device->mode); 859 blkdev_put(device->bdev, device->mode);
860} 860}
861 861
862static void btrfs_close_one_device(struct btrfs_device *device) 862static void btrfs_prepare_close_one_device(struct btrfs_device *device)
863{ 863{
864 struct btrfs_fs_devices *fs_devices = device->fs_devices; 864 struct btrfs_fs_devices *fs_devices = device->fs_devices;
865 struct btrfs_device *new_device; 865 struct btrfs_device *new_device;
@@ -877,8 +877,6 @@ static void btrfs_close_one_device(struct btrfs_device *device)
877 if (device->missing) 877 if (device->missing)
878 fs_devices->missing_devices--; 878 fs_devices->missing_devices--;
879 879
880 btrfs_close_bdev(device);
881
882 new_device = btrfs_alloc_device(NULL, &device->devid, 880 new_device = btrfs_alloc_device(NULL, &device->devid,
883 device->uuid); 881 device->uuid);
884 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ 882 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
@@ -892,23 +890,39 @@ static void btrfs_close_one_device(struct btrfs_device *device)
892 890
893 list_replace_rcu(&device->dev_list, &new_device->dev_list); 891 list_replace_rcu(&device->dev_list, &new_device->dev_list);
894 new_device->fs_devices = device->fs_devices; 892 new_device->fs_devices = device->fs_devices;
895
896 call_rcu(&device->rcu, free_device);
897} 893}
898 894
899static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 895static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
900{ 896{
901 struct btrfs_device *device, *tmp; 897 struct btrfs_device *device, *tmp;
898 struct list_head pending_put;
899
900 INIT_LIST_HEAD(&pending_put);
902 901
903 if (--fs_devices->opened > 0) 902 if (--fs_devices->opened > 0)
904 return 0; 903 return 0;
905 904
906 mutex_lock(&fs_devices->device_list_mutex); 905 mutex_lock(&fs_devices->device_list_mutex);
907 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { 906 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
908 btrfs_close_one_device(device); 907 btrfs_prepare_close_one_device(device);
908 list_add(&device->dev_list, &pending_put);
909 } 909 }
910 mutex_unlock(&fs_devices->device_list_mutex); 910 mutex_unlock(&fs_devices->device_list_mutex);
911 911
912 /*
913 * btrfs_show_devname() is using the device_list_mutex,
914 * sometimes call to blkdev_put() leads vfs calling
915 * into this func. So do put outside of device_list_mutex,
916 * as of now.
917 */
918 while (!list_empty(&pending_put)) {
919 device = list_first_entry(&pending_put,
920 struct btrfs_device, dev_list);
921 list_del(&device->dev_list);
922 btrfs_close_bdev(device);
923 call_rcu(&device->rcu, free_device);
924 }
925
912 WARN_ON(fs_devices->open_devices); 926 WARN_ON(fs_devices->open_devices);
913 WARN_ON(fs_devices->rw_devices); 927 WARN_ON(fs_devices->rw_devices);
914 fs_devices->opened = 0; 928 fs_devices->opened = 0;
@@ -1140,12 +1154,12 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1140 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 1154 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1141 if (ret > 0) { 1155 if (ret > 0) {
1142 if (disk_super->label[0]) { 1156 if (disk_super->label[0]) {
1143 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label); 1157 pr_info("BTRFS: device label %s ", disk_super->label);
1144 } else { 1158 } else {
1145 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid); 1159 pr_info("BTRFS: device fsid %pU ", disk_super->fsid);
1146 } 1160 }
1147 1161
1148 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path); 1162 pr_cont("devid %llu transid %llu %s\n", devid, transid, path);
1149 ret = 0; 1163 ret = 0;
1150 } 1164 }
1151 if (!ret && fs_devices_ret) 1165 if (!ret && fs_devices_ret)
@@ -1846,7 +1860,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1846 u64 num_devices; 1860 u64 num_devices;
1847 int ret = 0; 1861 int ret = 0;
1848 bool clear_super = false; 1862 bool clear_super = false;
1849 char *dev_name = NULL;
1850 1863
1851 mutex_lock(&uuid_mutex); 1864 mutex_lock(&uuid_mutex);
1852 1865
@@ -1882,11 +1895,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1882 list_del_init(&device->dev_alloc_list); 1895 list_del_init(&device->dev_alloc_list);
1883 device->fs_devices->rw_devices--; 1896 device->fs_devices->rw_devices--;
1884 unlock_chunks(root); 1897 unlock_chunks(root);
1885 dev_name = kstrdup(device->name->str, GFP_KERNEL);
1886 if (!dev_name) {
1887 ret = -ENOMEM;
1888 goto error_undo;
1889 }
1890 clear_super = true; 1898 clear_super = true;
1891 } 1899 }
1892 1900
@@ -1936,14 +1944,21 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1936 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device); 1944 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1937 } 1945 }
1938 1946
1939 btrfs_close_bdev(device);
1940
1941 call_rcu(&device->rcu, free_device);
1942
1943 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1; 1947 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1944 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices); 1948 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1945 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1949 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1946 1950
1951 /*
1952 * at this point, the device is zero sized and detached from
1953 * the devices list. All that's left is to zero out the old
1954 * supers and free the device.
1955 */
1956 if (device->writeable)
1957 btrfs_scratch_superblocks(device->bdev, device->name->str);
1958
1959 btrfs_close_bdev(device);
1960 call_rcu(&device->rcu, free_device);
1961
1947 if (cur_devices->open_devices == 0) { 1962 if (cur_devices->open_devices == 0) {
1948 struct btrfs_fs_devices *fs_devices; 1963 struct btrfs_fs_devices *fs_devices;
1949 fs_devices = root->fs_info->fs_devices; 1964 fs_devices = root->fs_info->fs_devices;
@@ -1962,24 +1977,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1962 root->fs_info->num_tolerated_disk_barrier_failures = 1977 root->fs_info->num_tolerated_disk_barrier_failures =
1963 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info); 1978 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1964 1979
1965 /*
1966 * at this point, the device is zero sized. We want to
1967 * remove it from the devices list and zero out the old super
1968 */
1969 if (clear_super) {
1970 struct block_device *bdev;
1971
1972 bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL,
1973 root->fs_info->bdev_holder);
1974 if (!IS_ERR(bdev)) {
1975 btrfs_scratch_superblocks(bdev, dev_name);
1976 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1977 }
1978 }
1979
1980out: 1980out:
1981 kfree(dev_name);
1982
1983 mutex_unlock(&uuid_mutex); 1981 mutex_unlock(&uuid_mutex);
1984 return ret; 1982 return ret;
1985 1983
@@ -2494,9 +2492,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2494 ret = btrfs_relocate_sys_chunks(root); 2492 ret = btrfs_relocate_sys_chunks(root);
2495 if (ret < 0) 2493 if (ret < 0)
2496 btrfs_handle_fs_error(root->fs_info, ret, 2494 btrfs_handle_fs_error(root->fs_info, ret,
2497 "Failed to relocate sys chunks after " 2495 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2498 "device initialization. This can be fixed "
2499 "using the \"btrfs balance\" command.");
2500 trans = btrfs_attach_transaction(root); 2496 trans = btrfs_attach_transaction(root);
2501 if (IS_ERR(trans)) { 2497 if (IS_ERR(trans)) {
2502 if (PTR_ERR(trans) == -ENOENT) 2498 if (PTR_ERR(trans) == -ENOENT)
@@ -2555,7 +2551,8 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2555 devices = &fs_info->fs_devices->devices; 2551 devices = &fs_info->fs_devices->devices;
2556 list_for_each_entry(device, devices, dev_list) { 2552 list_for_each_entry(device, devices, dev_list) {
2557 if (device->bdev == bdev) { 2553 if (device->bdev == bdev) {
2558 btrfs_err(fs_info, "target device is in the filesystem!"); 2554 btrfs_err(fs_info,
2555 "target device is in the filesystem!");
2559 ret = -EEXIST; 2556 ret = -EEXIST;
2560 goto error; 2557 goto error;
2561 } 2558 }
@@ -2564,7 +2561,8 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2564 2561
2565 if (i_size_read(bdev->bd_inode) < 2562 if (i_size_read(bdev->bd_inode) <
2566 btrfs_device_get_total_bytes(srcdev)) { 2563 btrfs_device_get_total_bytes(srcdev)) {
2567 btrfs_err(fs_info, "target device is smaller than source device!"); 2564 btrfs_err(fs_info,
2565 "target device is smaller than source device!");
2568 ret = -EINVAL; 2566 ret = -EINVAL;
2569 goto error; 2567 goto error;
2570 } 2568 }
@@ -3698,7 +3696,7 @@ error:
3698 btrfs_free_path(path); 3696 btrfs_free_path(path);
3699 if (enospc_errors) { 3697 if (enospc_errors) {
3700 btrfs_info(fs_info, "%d enospc errors during balance", 3698 btrfs_info(fs_info, "%d enospc errors during balance",
3701 enospc_errors); 3699 enospc_errors);
3702 if (!ret) 3700 if (!ret)
3703 ret = -ENOSPC; 3701 ret = -ENOSPC;
3704 } 3702 }
@@ -3792,8 +3790,8 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
3792 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 3790 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3793 !(bctl->flags & BTRFS_BALANCE_METADATA) || 3791 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3794 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 3792 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3795 btrfs_err(fs_info, "with mixed groups data and " 3793 btrfs_err(fs_info,
3796 "metadata balance options must be the same"); 3794 "with mixed groups data and metadata balance options must be the same");
3797 ret = -EINVAL; 3795 ret = -EINVAL;
3798 goto out; 3796 goto out;
3799 } 3797 }
@@ -3815,23 +3813,23 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
3815 allowed |= (BTRFS_BLOCK_GROUP_RAID10 | 3813 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3816 BTRFS_BLOCK_GROUP_RAID6); 3814 BTRFS_BLOCK_GROUP_RAID6);
3817 if (validate_convert_profile(&bctl->data, allowed)) { 3815 if (validate_convert_profile(&bctl->data, allowed)) {
3818 btrfs_err(fs_info, "unable to start balance with target " 3816 btrfs_err(fs_info,
3819 "data profile %llu", 3817 "unable to start balance with target data profile %llu",
3820 bctl->data.target); 3818 bctl->data.target);
3821 ret = -EINVAL; 3819 ret = -EINVAL;
3822 goto out; 3820 goto out;
3823 } 3821 }
3824 if (validate_convert_profile(&bctl->meta, allowed)) { 3822 if (validate_convert_profile(&bctl->meta, allowed)) {
3825 btrfs_err(fs_info, 3823 btrfs_err(fs_info,
3826 "unable to start balance with target metadata profile %llu", 3824 "unable to start balance with target metadata profile %llu",
3827 bctl->meta.target); 3825 bctl->meta.target);
3828 ret = -EINVAL; 3826 ret = -EINVAL;
3829 goto out; 3827 goto out;
3830 } 3828 }
3831 if (validate_convert_profile(&bctl->sys, allowed)) { 3829 if (validate_convert_profile(&bctl->sys, allowed)) {
3832 btrfs_err(fs_info, 3830 btrfs_err(fs_info,
3833 "unable to start balance with target system profile %llu", 3831 "unable to start balance with target system profile %llu",
3834 bctl->sys.target); 3832 bctl->sys.target);
3835 ret = -EINVAL; 3833 ret = -EINVAL;
3836 goto out; 3834 goto out;
3837 } 3835 }
@@ -3851,10 +3849,11 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
3851 (fs_info->avail_metadata_alloc_bits & allowed) && 3849 (fs_info->avail_metadata_alloc_bits & allowed) &&
3852 !(bctl->meta.target & allowed))) { 3850 !(bctl->meta.target & allowed))) {
3853 if (bctl->flags & BTRFS_BALANCE_FORCE) { 3851 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3854 btrfs_info(fs_info, "force reducing metadata integrity"); 3852 btrfs_info(fs_info,
3853 "force reducing metadata integrity");
3855 } else { 3854 } else {
3856 btrfs_err(fs_info, "balance will reduce metadata " 3855 btrfs_err(fs_info,
3857 "integrity, use force if you want this"); 3856 "balance will reduce metadata integrity, use force if you want this");
3858 ret = -EINVAL; 3857 ret = -EINVAL;
3859 goto out; 3858 goto out;
3860 } 3859 }
@@ -3864,8 +3863,8 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
3864 if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) < 3863 if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
3865 btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) { 3864 btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
3866 btrfs_warn(fs_info, 3865 btrfs_warn(fs_info,
3867 "metadata profile 0x%llx has lower redundancy than data profile 0x%llx", 3866 "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3868 bctl->meta.target, bctl->data.target); 3867 bctl->meta.target, bctl->data.target);
3869 } 3868 }
3870 3869
3871 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3870 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
@@ -4221,7 +4220,7 @@ out:
4221 if (ret) 4220 if (ret)
4222 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); 4221 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4223 else 4222 else
4224 fs_info->update_uuid_tree_gen = 1; 4223 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4225 up(&fs_info->uuid_tree_rescan_sem); 4224 up(&fs_info->uuid_tree_rescan_sem);
4226 return 0; 4225 return 0;
4227} 4226}
@@ -4913,15 +4912,16 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4913 read_unlock(&em_tree->lock); 4912 read_unlock(&em_tree->lock);
4914 4913
4915 if (!em) { 4914 if (!em) {
4916 btrfs_crit(extent_root->fs_info, "unable to find logical " 4915 btrfs_crit(extent_root->fs_info,
4917 "%Lu len %Lu", chunk_offset, chunk_size); 4916 "unable to find logical %Lu len %Lu",
4917 chunk_offset, chunk_size);
4918 return -EINVAL; 4918 return -EINVAL;
4919 } 4919 }
4920 4920
4921 if (em->start != chunk_offset || em->len != chunk_size) { 4921 if (em->start != chunk_offset || em->len != chunk_size) {
4922 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted" 4922 btrfs_crit(extent_root->fs_info,
4923 " %Lu-%Lu, found %Lu-%Lu", chunk_offset, 4923 "found a bad mapping, wanted %Lu-%Lu, found %Lu-%Lu",
4924 chunk_size, em->start, em->len); 4924 chunk_offset, chunk_size, em->start, em->len);
4925 free_extent_map(em); 4925 free_extent_map(em);
4926 return -EINVAL; 4926 return -EINVAL;
4927 } 4927 }
@@ -5154,9 +5154,9 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5154 } 5154 }
5155 5155
5156 if (em->start > logical || em->start + em->len < logical) { 5156 if (em->start > logical || em->start + em->len < logical) {
5157 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got " 5157 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got %Lu-%Lu",
5158 "%Lu-%Lu", logical, logical+len, em->start, 5158 logical, logical+len, em->start,
5159 em->start + em->len); 5159 em->start + em->len);
5160 free_extent_map(em); 5160 free_extent_map(em);
5161 return 1; 5161 return 1;
5162 } 5162 }
@@ -5370,9 +5370,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
5370 } 5370 }
5371 5371
5372 if (em->start > logical || em->start + em->len < logical) { 5372 if (em->start > logical || em->start + em->len < logical) {
5373 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, " 5373 btrfs_crit(fs_info,
5374 "found %Lu-%Lu", logical, em->start, 5374 "found a bad mapping, wanted %Lu, found %Lu-%Lu",
5375 em->start + em->len); 5375 logical, em->start, em->start + em->len);
5376 free_extent_map(em); 5376 free_extent_map(em);
5377 return -EINVAL; 5377 return -EINVAL;
5378 } 5378 }
@@ -5390,9 +5390,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
5390 5390
5391 stripe_offset = stripe_nr * stripe_len; 5391 stripe_offset = stripe_nr * stripe_len;
5392 if (offset < stripe_offset) { 5392 if (offset < stripe_offset) {
5393 btrfs_crit(fs_info, "stripe math has gone wrong, " 5393 btrfs_crit(fs_info,
5394 "stripe_offset=%llu, offset=%llu, start=%llu, " 5394 "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
5395 "logical=%llu, stripe_len=%llu",
5396 stripe_offset, offset, em->start, logical, 5395 stripe_offset, offset, em->start, logical,
5397 stripe_len); 5396 stripe_len);
5398 free_extent_map(em); 5397 free_extent_map(em);
@@ -5642,8 +5641,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
5642 mirror_num = stripe_index + 1; 5641 mirror_num = stripe_index + 1;
5643 } 5642 }
5644 if (stripe_index >= map->num_stripes) { 5643 if (stripe_index >= map->num_stripes) {
5645 btrfs_crit(fs_info, "stripe index math went horribly wrong, " 5644 btrfs_crit(fs_info,
5646 "got stripe_index=%u, num_stripes=%u", 5645 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
5647 stripe_index, map->num_stripes); 5646 stripe_index, map->num_stripes);
5648 ret = -EINVAL; 5647 ret = -EINVAL;
5649 goto out; 5648 goto out;
@@ -5907,10 +5906,11 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
5907 mirror_num, need_raid_map); 5906 mirror_num, need_raid_map);
5908} 5907}
5909 5908
5910int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 5909int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
5911 u64 chunk_start, u64 physical, u64 devid, 5910 u64 chunk_start, u64 physical, u64 devid,
5912 u64 **logical, int *naddrs, int *stripe_len) 5911 u64 **logical, int *naddrs, int *stripe_len)
5913{ 5912{
5913 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5914 struct extent_map_tree *em_tree = &map_tree->map_tree; 5914 struct extent_map_tree *em_tree = &map_tree->map_tree;
5915 struct extent_map *em; 5915 struct extent_map *em;
5916 struct map_lookup *map; 5916 struct map_lookup *map;
@@ -5926,13 +5926,13 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5926 read_unlock(&em_tree->lock); 5926 read_unlock(&em_tree->lock);
5927 5927
5928 if (!em) { 5928 if (!em) {
5929 printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n", 5929 btrfs_err(fs_info, "couldn't find em for chunk %Lu",
5930 chunk_start); 5930 chunk_start);
5931 return -EIO; 5931 return -EIO;
5932 } 5932 }
5933 5933
5934 if (em->start != chunk_start) { 5934 if (em->start != chunk_start) {
5935 printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n", 5935 btrfs_err(fs_info, "bad chunk start, em=%Lu, wanted=%Lu",
5936 em->start, chunk_start); 5936 em->start, chunk_start);
5937 free_extent_map(em); 5937 free_extent_map(em);
5938 return -EIO; 5938 return -EIO;
@@ -6137,10 +6137,12 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
6137 6137
6138 rcu_read_lock(); 6138 rcu_read_lock();
6139 name = rcu_dereference(dev->name); 6139 name = rcu_dereference(dev->name);
6140 pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu " 6140 btrfs_debug(fs_info,
6141 "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf, 6141 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6142 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, 6142 bio_op(bio), bio->bi_opf,
6143 name->str, dev->devid, bio->bi_iter.bi_size); 6143 (u64)bio->bi_iter.bi_sector,
6144 (u_long)dev->bdev->bd_dev, name->str, dev->devid,
6145 bio->bi_iter.bi_size);
6144 rcu_read_unlock(); 6146 rcu_read_unlock();
6145 } 6147 }
6146#endif 6148#endif
@@ -6215,8 +6217,9 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
6215 } 6217 }
6216 6218
6217 if (map_length < length) { 6219 if (map_length < length) {
6218 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu", 6220 btrfs_crit(root->fs_info,
6219 logical, length, map_length); 6221 "mapping failed logical %llu bio len %llu len %llu",
6222 logical, length, map_length);
6220 BUG(); 6223 BUG();
6221 } 6224 }
6222 6225
@@ -6483,8 +6486,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6483 free_extent_map(em); 6486 free_extent_map(em);
6484 return -EIO; 6487 return -EIO;
6485 } 6488 }
6486 btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing", 6489 btrfs_warn(root->fs_info,
6487 devid, uuid); 6490 "devid %llu uuid %pU is missing",
6491 devid, uuid);
6488 } 6492 }
6489 map->stripes[i].dev->in_fs_metadata = 1; 6493 map->stripes[i].dev->in_fs_metadata = 1;
6490 } 6494 }
@@ -6661,7 +6665,8 @@ static int read_one_dev(struct btrfs_root *root,
6661 6665
6662int btrfs_read_sys_array(struct btrfs_root *root) 6666int btrfs_read_sys_array(struct btrfs_root *root)
6663{ 6667{
6664 struct btrfs_super_block *super_copy = root->fs_info->super_copy; 6668 struct btrfs_fs_info *fs_info = root->fs_info;
6669 struct btrfs_super_block *super_copy = fs_info->super_copy;
6665 struct extent_buffer *sb; 6670 struct extent_buffer *sb;
6666 struct btrfs_disk_key *disk_key; 6671 struct btrfs_disk_key *disk_key;
6667 struct btrfs_chunk *chunk; 6672 struct btrfs_chunk *chunk;
@@ -6732,8 +6737,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6732 6737
6733 num_stripes = btrfs_chunk_num_stripes(sb, chunk); 6738 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6734 if (!num_stripes) { 6739 if (!num_stripes) {
6735 printk(KERN_ERR 6740 btrfs_err(fs_info,
6736 "BTRFS: invalid number of stripes %u in sys_array at offset %u\n", 6741 "invalid number of stripes %u in sys_array at offset %u",
6737 num_stripes, cur_offset); 6742 num_stripes, cur_offset);
6738 ret = -EIO; 6743 ret = -EIO;
6739 break; 6744 break;
@@ -6741,7 +6746,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6741 6746
6742 type = btrfs_chunk_type(sb, chunk); 6747 type = btrfs_chunk_type(sb, chunk);
6743 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { 6748 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6744 btrfs_err(root->fs_info, 6749 btrfs_err(fs_info,
6745 "invalid chunk type %llu in sys_array at offset %u", 6750 "invalid chunk type %llu in sys_array at offset %u",
6746 type, cur_offset); 6751 type, cur_offset);
6747 ret = -EIO; 6752 ret = -EIO;
@@ -6756,9 +6761,9 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6756 if (ret) 6761 if (ret)
6757 break; 6762 break;
6758 } else { 6763 } else {
6759 printk(KERN_ERR 6764 btrfs_err(fs_info,
6760 "BTRFS: unexpected item type %u in sys_array at offset %u\n", 6765 "unexpected item type %u in sys_array at offset %u",
6761 (u32)key.type, cur_offset); 6766 (u32)key.type, cur_offset);
6762 ret = -EIO; 6767 ret = -EIO;
6763 break; 6768 break;
6764 } 6769 }
@@ -6771,7 +6776,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6771 return ret; 6776 return ret;
6772 6777
6773out_short_read: 6778out_short_read:
6774 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", 6779 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6775 len, cur_offset); 6780 len, cur_offset);
6776 clear_extent_buffer_uptodate(sb); 6781 clear_extent_buffer_uptodate(sb);
6777 free_extent_buffer_stale(sb); 6782 free_extent_buffer_stale(sb);
@@ -7095,10 +7100,12 @@ int btrfs_get_dev_stats(struct btrfs_root *root,
7095 mutex_unlock(&fs_devices->device_list_mutex); 7100 mutex_unlock(&fs_devices->device_list_mutex);
7096 7101
7097 if (!dev) { 7102 if (!dev) {
7098 btrfs_warn(root->fs_info, "get dev_stats failed, device not found"); 7103 btrfs_warn(root->fs_info,
7104 "get dev_stats failed, device not found");
7099 return -ENODEV; 7105 return -ENODEV;
7100 } else if (!dev->dev_stats_valid) { 7106 } else if (!dev->dev_stats_valid) {
7101 btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid"); 7107 btrfs_warn(root->fs_info,
7108 "get dev_stats failed, not yet valid");
7102 return -ENODEV; 7109 return -ENODEV;
7103 } else if (stats->flags & BTRFS_DEV_STATS_RESET) { 7110 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7104 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { 7111 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 6613e6335ca2..09ed29c67848 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -382,7 +382,7 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
382 u64 logical, u64 *length, 382 u64 logical, u64 *length,
383 struct btrfs_bio **bbio_ret, int mirror_num, 383 struct btrfs_bio **bbio_ret, int mirror_num,
384 int need_raid_map); 384 int need_raid_map);
385int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 385int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
386 u64 chunk_start, u64 physical, u64 devid, 386 u64 chunk_start, u64 physical, u64 devid,
387 u64 **logical, int *naddrs, int *stripe_len); 387 u64 **logical, int *naddrs, int *stripe_len);
388int btrfs_read_sys_array(struct btrfs_root *root); 388int btrfs_read_sys_array(struct btrfs_root *root);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 88d274e8ecf2..441b81a3e545 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -95,7 +95,7 @@ static int zlib_compress_pages(struct list_head *ws,
95 *total_in = 0; 95 *total_in = 0;
96 96
97 if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) { 97 if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) {
98 printk(KERN_WARNING "BTRFS: deflateInit failed\n"); 98 pr_warn("BTRFS: deflateInit failed\n");
99 ret = -EIO; 99 ret = -EIO;
100 goto out; 100 goto out;
101 } 101 }
@@ -123,7 +123,7 @@ static int zlib_compress_pages(struct list_head *ws,
123 while (workspace->strm.total_in < len) { 123 while (workspace->strm.total_in < len) {
124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH); 124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
125 if (ret != Z_OK) { 125 if (ret != Z_OK) {
126 printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n", 126 pr_debug("BTRFS: deflate in loop returned %d\n",
127 ret); 127 ret);
128 zlib_deflateEnd(&workspace->strm); 128 zlib_deflateEnd(&workspace->strm);
129 ret = -EIO; 129 ret = -EIO;
@@ -249,7 +249,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
249 } 249 }
250 250
251 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) { 251 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
252 printk(KERN_WARNING "BTRFS: inflateInit failed\n"); 252 pr_warn("BTRFS: inflateInit failed\n");
253 return -EIO; 253 return -EIO;
254 } 254 }
255 while (workspace->strm.total_in < srclen) { 255 while (workspace->strm.total_in < srclen) {
@@ -339,7 +339,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
339 } 339 }
340 340
341 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) { 341 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
342 printk(KERN_WARNING "BTRFS: inflateInit failed\n"); 342 pr_warn("BTRFS: inflateInit failed\n");
343 return -EIO; 343 return -EIO;
344 } 344 }
345 345