aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-01-05 21:25:51 -0500
committerChris Mason <chris.mason@oracle.com>2009-01-05 21:25:51 -0500
commitd397712bcc6a759a560fd247e6053ecae091f958 (patch)
tree9da8daebb870d8b8b1843507c4621715e23dd31a
parent1f3c79a28c8837e8572b98f6d14142d9a6133c56 (diff)
Btrfs: Fix checkpatch.pl warnings
There were many, most are fixed now. struct-funcs.c generates some warnings but these are bogus. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/acl.c5
-rw-r--r--fs/btrfs/async-thread.c6
-rw-r--r--fs/btrfs/compat.h4
-rw-r--r--fs/btrfs/compression.c12
-rw-r--r--fs/btrfs/ctree.c217
-rw-r--r--fs/btrfs/ctree.h30
-rw-r--r--fs/btrfs/dir-item.c2
-rw-r--r--fs/btrfs/disk-io.c187
-rw-r--r--fs/btrfs/export.c8
-rw-r--r--fs/btrfs/extent-tree.c223
-rw-r--r--fs/btrfs/extent_io.c213
-rw-r--r--fs/btrfs/extent_map.c14
-rw-r--r--fs/btrfs/file-item.c18
-rw-r--r--fs/btrfs/file.c49
-rw-r--r--fs/btrfs/free-space-cache.c37
-rw-r--r--fs/btrfs/inode-map.c1
-rw-r--r--fs/btrfs/inode.c173
-rw-r--r--fs/btrfs/ioctl.c37
-rw-r--r--fs/btrfs/locking.c5
-rw-r--r--fs/btrfs/ordered-data.c34
-rw-r--r--fs/btrfs/print-tree.c73
-rw-r--r--fs/btrfs/ref-cache.c12
-rw-r--r--fs/btrfs/root-tree.c17
-rw-r--r--fs/btrfs/struct-funcs.c4
-rw-r--r--fs/btrfs/super.c25
-rw-r--r--fs/btrfs/sysfs.c6
-rw-r--r--fs/btrfs/transaction.c45
-rw-r--r--fs/btrfs/transaction.h6
-rw-r--r--fs/btrfs/tree-defrag.c9
-rw-r--r--fs/btrfs/tree-log.c70
-rw-r--r--fs/btrfs/volumes.c78
-rw-r--r--fs/btrfs/xattr.c3
-rw-r--r--fs/btrfs/zlib.c45
33 files changed, 770 insertions, 898 deletions
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 867eaf1f8efb..1d53b62dbba5 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -161,8 +161,7 @@ static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
161 ret = __btrfs_setxattr(inode, name, value, size, 0); 161 ret = __btrfs_setxattr(inode, name, value, size, 0);
162 162
163out: 163out:
164 if (value) 164 kfree(value);
165 kfree(value);
166 165
167 if (!ret) 166 if (!ret)
168 btrfs_update_cached_acl(inode, p_acl, acl); 167 btrfs_update_cached_acl(inode, p_acl, acl);
@@ -213,7 +212,7 @@ static int btrfs_xattr_acl_default_get(struct inode *inode, const char *name,
213} 212}
214 213
215static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name, 214static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name,
216 const void *value, size_t size, int flags) 215 const void *value, size_t size, int flags)
217{ 216{
218 return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size); 217 return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
219} 218}
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 4229450b7596..8e2fec05dbe0 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -104,7 +104,7 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
104 104
105 spin_lock_irqsave(&workers->lock, flags); 105 spin_lock_irqsave(&workers->lock, flags);
106 106
107 while(!list_empty(&workers->order_list)) { 107 while (!list_empty(&workers->order_list)) {
108 work = list_entry(workers->order_list.next, 108 work = list_entry(workers->order_list.next,
109 struct btrfs_work, order_list); 109 struct btrfs_work, order_list);
110 110
@@ -143,7 +143,7 @@ static int worker_loop(void *arg)
143 struct btrfs_work *work; 143 struct btrfs_work *work;
144 do { 144 do {
145 spin_lock_irq(&worker->lock); 145 spin_lock_irq(&worker->lock);
146 while(!list_empty(&worker->pending)) { 146 while (!list_empty(&worker->pending)) {
147 cur = worker->pending.next; 147 cur = worker->pending.next;
148 work = list_entry(cur, struct btrfs_work, list); 148 work = list_entry(cur, struct btrfs_work, list);
149 list_del(&work->list); 149 list_del(&work->list);
@@ -188,7 +188,7 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
188 struct btrfs_worker_thread *worker; 188 struct btrfs_worker_thread *worker;
189 189
190 list_splice_init(&workers->idle_list, &workers->worker_list); 190 list_splice_init(&workers->idle_list, &workers->worker_list);
191 while(!list_empty(&workers->worker_list)) { 191 while (!list_empty(&workers->worker_list)) {
192 cur = workers->worker_list.next; 192 cur = workers->worker_list.next;
193 worker = list_entry(cur, struct btrfs_worker_thread, 193 worker = list_entry(cur, struct btrfs_worker_thread,
194 worker_list); 194 worker_list);
diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h
index 75e4426d6fbb..594d60bdd3c4 100644
--- a/fs/btrfs/compat.h
+++ b/fs/btrfs/compat.h
@@ -4,7 +4,7 @@
4#define btrfs_drop_nlink(inode) drop_nlink(inode) 4#define btrfs_drop_nlink(inode) drop_nlink(inode)
5#define btrfs_inc_nlink(inode) inc_nlink(inode) 5#define btrfs_inc_nlink(inode) inc_nlink(inode)
6 6
7#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27) 7#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27)
8static inline struct dentry *d_obtain_alias(struct inode *inode) 8static inline struct dentry *d_obtain_alias(struct inode *inode)
9{ 9{
10 struct dentry *d; 10 struct dentry *d;
@@ -21,7 +21,7 @@ static inline struct dentry *d_obtain_alias(struct inode *inode)
21} 21}
22#endif 22#endif
23 23
24#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) 24#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
25# define __pagevec_lru_add_file __pagevec_lru_add 25# define __pagevec_lru_add_file __pagevec_lru_add
26# define open_bdev_exclusive open_bdev_excl 26# define open_bdev_exclusive open_bdev_excl
27# define close_bdev_exclusive(bdev, mode) close_bdev_excl(bdev) 27# define close_bdev_exclusive(bdev, mode) close_bdev_excl(bdev)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 2436163d5436..ee848d8585d9 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -137,7 +137,8 @@ static int check_compressed_csum(struct inode *inode,
137 kunmap_atomic(kaddr, KM_USER0); 137 kunmap_atomic(kaddr, KM_USER0);
138 138
139 if (csum != *cb_sum) { 139 if (csum != *cb_sum) {
140 printk("btrfs csum failed ino %lu extent %llu csum %u " 140 printk(KERN_INFO "btrfs csum failed ino %lu "
141 "extent %llu csum %u "
141 "wanted %u mirror %d\n", inode->i_ino, 142 "wanted %u mirror %d\n", inode->i_ino,
142 (unsigned long long)disk_start, 143 (unsigned long long)disk_start,
143 csum, *cb_sum, cb->mirror_num); 144 csum, *cb_sum, cb->mirror_num);
@@ -217,7 +218,7 @@ csum_failed:
217 * we have verified the checksum already, set page 218 * we have verified the checksum already, set page
218 * checked so the end_io handlers know about it 219 * checked so the end_io handlers know about it
219 */ 220 */
220 while(bio_index < cb->orig_bio->bi_vcnt) { 221 while (bio_index < cb->orig_bio->bi_vcnt) {
221 SetPageChecked(bvec->bv_page); 222 SetPageChecked(bvec->bv_page);
222 bvec++; 223 bvec++;
223 bio_index++; 224 bio_index++;
@@ -246,7 +247,7 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start,
246 int i; 247 int i;
247 int ret; 248 int ret;
248 249
249 while(nr_pages > 0) { 250 while (nr_pages > 0) {
250 ret = find_get_pages_contig(inode->i_mapping, index, 251 ret = find_get_pages_contig(inode->i_mapping, index,
251 min_t(unsigned long, 252 min_t(unsigned long,
252 nr_pages, ARRAY_SIZE(pages)), pages); 253 nr_pages, ARRAY_SIZE(pages)), pages);
@@ -463,7 +464,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
463 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 464 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
464 465
465 pagevec_init(&pvec, 0); 466 pagevec_init(&pvec, 0);
466 while(last_offset < compressed_end) { 467 while (last_offset < compressed_end) {
467 page_index = last_offset >> PAGE_CACHE_SHIFT; 468 page_index = last_offset >> PAGE_CACHE_SHIFT;
468 469
469 if (page_index > end_index) 470 if (page_index > end_index)
@@ -697,9 +698,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
697 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 698 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
698 BUG_ON(ret); 699 BUG_ON(ret);
699 700
700 if (!btrfs_test_flag(inode, NODATASUM)) { 701 if (!btrfs_test_flag(inode, NODATASUM))
701 btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 702 btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
702 }
703 703
704 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 704 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
705 BUG_ON(ret); 705 BUG_ON(ret);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 7fad2e3ad6ff..9e46c0776816 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -67,7 +67,7 @@ void btrfs_free_path(struct btrfs_path *p)
67 * 67 *
68 * It is safe to call this on paths that no locks or extent buffers held. 68 * It is safe to call this on paths that no locks or extent buffers held.
69 */ 69 */
70void noinline btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) 70noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
71{ 71{
72 int i; 72 int i;
73 73
@@ -112,7 +112,7 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
112{ 112{
113 struct extent_buffer *eb; 113 struct extent_buffer *eb;
114 114
115 while(1) { 115 while (1) {
116 eb = btrfs_root_node(root); 116 eb = btrfs_root_node(root);
117 btrfs_tree_lock(eb); 117 btrfs_tree_lock(eb);
118 118
@@ -202,22 +202,22 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
202} 202}
203 203
204/* 204/*
205 * does the dirty work in cow of a single block. The parent block 205 * does the dirty work in cow of a single block. The parent block (if
206 * (if supplied) is updated to point to the new cow copy. The new 206 * supplied) is updated to point to the new cow copy. The new buffer is marked
207 * buffer is marked dirty and returned locked. If you modify the block 207 * dirty and returned locked. If you modify the block it needs to be marked
208 * it needs to be marked dirty again. 208 * dirty again.
209 * 209 *
210 * search_start -- an allocation hint for the new block 210 * search_start -- an allocation hint for the new block
211 * 211 *
212 * empty_size -- a hint that you plan on doing more cow. This is the size in bytes 212 * empty_size -- a hint that you plan on doing more cow. This is the size in
213 * the allocator should try to find free next to the block it returns. This is 213 * bytes the allocator should try to find free next to the block it returns.
214 * just a hint and may be ignored by the allocator. 214 * This is just a hint and may be ignored by the allocator.
215 * 215 *
216 * prealloc_dest -- if you have already reserved a destination for the cow, 216 * prealloc_dest -- if you have already reserved a destination for the cow,
217 * this uses that block instead of allocating a new one. btrfs_alloc_reserved_extent 217 * this uses that block instead of allocating a new one.
218 * is used to finish the allocation. 218 * btrfs_alloc_reserved_extent is used to finish the allocation.
219 */ 219 */
220static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, 220static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
221 struct btrfs_root *root, 221 struct btrfs_root *root,
222 struct extent_buffer *buf, 222 struct extent_buffer *buf,
223 struct extent_buffer *parent, int parent_slot, 223 struct extent_buffer *parent, int parent_slot,
@@ -366,7 +366,7 @@ static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans,
366 * This version of it has extra checks so that a block isn't cow'd more than 366 * This version of it has extra checks so that a block isn't cow'd more than
367 * once per transaction, as long as it hasn't been written yet 367 * once per transaction, as long as it hasn't been written yet
368 */ 368 */
369int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, 369noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
370 struct btrfs_root *root, struct extent_buffer *buf, 370 struct btrfs_root *root, struct extent_buffer *buf,
371 struct extent_buffer *parent, int parent_slot, 371 struct extent_buffer *parent, int parent_slot,
372 struct extent_buffer **cow_ret, u64 prealloc_dest) 372 struct extent_buffer **cow_ret, u64 prealloc_dest)
@@ -375,13 +375,16 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans,
375 int ret; 375 int ret;
376 376
377 if (trans->transaction != root->fs_info->running_transaction) { 377 if (trans->transaction != root->fs_info->running_transaction) {
378 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, 378 printk(KERN_CRIT "trans %llu running %llu\n",
379 (unsigned long long)trans->transid,
380 (unsigned long long)
379 root->fs_info->running_transaction->transid); 381 root->fs_info->running_transaction->transid);
380 WARN_ON(1); 382 WARN_ON(1);
381 } 383 }
382 if (trans->transid != root->fs_info->generation) { 384 if (trans->transid != root->fs_info->generation) {
383 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, 385 printk(KERN_CRIT "trans %llu running %llu\n",
384 root->fs_info->generation); 386 (unsigned long long)trans->transid,
387 (unsigned long long)root->fs_info->generation);
385 WARN_ON(1); 388 WARN_ON(1);
386 } 389 }
387 390
@@ -489,16 +492,10 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
489 if (cache_only && parent_level != 1) 492 if (cache_only && parent_level != 1)
490 return 0; 493 return 0;
491 494
492 if (trans->transaction != root->fs_info->running_transaction) { 495 if (trans->transaction != root->fs_info->running_transaction)
493 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
494 root->fs_info->running_transaction->transid);
495 WARN_ON(1); 496 WARN_ON(1);
496 } 497 if (trans->transid != root->fs_info->generation)
497 if (trans->transid != root->fs_info->generation) {
498 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
499 root->fs_info->generation);
500 WARN_ON(1); 498 WARN_ON(1);
501 }
502 499
503 parent_nritems = btrfs_header_nritems(parent); 500 parent_nritems = btrfs_header_nritems(parent);
504 blocksize = btrfs_level_size(root, parent_level - 1); 501 blocksize = btrfs_level_size(root, parent_level - 1);
@@ -681,51 +678,18 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
681 BUG_ON(btrfs_node_blockptr(parent, parent_slot) != 678 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
682 btrfs_header_bytenr(leaf)); 679 btrfs_header_bytenr(leaf));
683 } 680 }
684#if 0
685 for (i = 0; nritems > 1 && i < nritems - 2; i++) {
686 btrfs_item_key_to_cpu(leaf, &cpukey, i + 1);
687 btrfs_item_key(leaf, &leaf_key, i);
688 if (comp_keys(&leaf_key, &cpukey) >= 0) {
689 btrfs_print_leaf(root, leaf);
690 printk("slot %d offset bad key\n", i);
691 BUG_ON(1);
692 }
693 if (btrfs_item_offset_nr(leaf, i) !=
694 btrfs_item_end_nr(leaf, i + 1)) {
695 btrfs_print_leaf(root, leaf);
696 printk("slot %d offset bad\n", i);
697 BUG_ON(1);
698 }
699 if (i == 0) {
700 if (btrfs_item_offset_nr(leaf, i) +
701 btrfs_item_size_nr(leaf, i) !=
702 BTRFS_LEAF_DATA_SIZE(root)) {
703 btrfs_print_leaf(root, leaf);
704 printk("slot %d first offset bad\n", i);
705 BUG_ON(1);
706 }
707 }
708 }
709 if (nritems > 0) {
710 if (btrfs_item_size_nr(leaf, nritems - 1) > 4096) {
711 btrfs_print_leaf(root, leaf);
712 printk("slot %d bad size \n", nritems - 1);
713 BUG_ON(1);
714 }
715 }
716#endif
717 if (slot != 0 && slot < nritems - 1) { 681 if (slot != 0 && slot < nritems - 1) {
718 btrfs_item_key(leaf, &leaf_key, slot); 682 btrfs_item_key(leaf, &leaf_key, slot);
719 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1); 683 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
720 if (comp_keys(&leaf_key, &cpukey) <= 0) { 684 if (comp_keys(&leaf_key, &cpukey) <= 0) {
721 btrfs_print_leaf(root, leaf); 685 btrfs_print_leaf(root, leaf);
722 printk("slot %d offset bad key\n", slot); 686 printk(KERN_CRIT "slot %d offset bad key\n", slot);
723 BUG_ON(1); 687 BUG_ON(1);
724 } 688 }
725 if (btrfs_item_offset_nr(leaf, slot - 1) != 689 if (btrfs_item_offset_nr(leaf, slot - 1) !=
726 btrfs_item_end_nr(leaf, slot)) { 690 btrfs_item_end_nr(leaf, slot)) {
727 btrfs_print_leaf(root, leaf); 691 btrfs_print_leaf(root, leaf);
728 printk("slot %d offset bad\n", slot); 692 printk(KERN_CRIT "slot %d offset bad\n", slot);
729 BUG_ON(1); 693 BUG_ON(1);
730 } 694 }
731 } 695 }
@@ -736,7 +700,7 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
736 if (btrfs_item_offset_nr(leaf, slot) != 700 if (btrfs_item_offset_nr(leaf, slot) !=
737 btrfs_item_end_nr(leaf, slot + 1)) { 701 btrfs_item_end_nr(leaf, slot + 1)) {
738 btrfs_print_leaf(root, leaf); 702 btrfs_print_leaf(root, leaf);
739 printk("slot %d offset bad\n", slot); 703 printk(KERN_CRIT "slot %d offset bad\n", slot);
740 BUG_ON(1); 704 BUG_ON(1);
741 } 705 }
742 } 706 }
@@ -745,30 +709,10 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
745 return 0; 709 return 0;
746} 710}
747 711
748static int noinline check_block(struct btrfs_root *root, 712static noinline int check_block(struct btrfs_root *root,
749 struct btrfs_path *path, int level) 713 struct btrfs_path *path, int level)
750{ 714{
751 u64 found_start;
752 return 0; 715 return 0;
753 if (btrfs_header_level(path->nodes[level]) != level)
754 printk("warning: bad level %Lu wanted %d found %d\n",
755 path->nodes[level]->start, level,
756 btrfs_header_level(path->nodes[level]));
757 found_start = btrfs_header_bytenr(path->nodes[level]);
758 if (found_start != path->nodes[level]->start) {
759 printk("warning: bad bytentr %Lu found %Lu\n",
760 path->nodes[level]->start, found_start);
761 }
762#if 0
763 struct extent_buffer *buf = path->nodes[level];
764
765 if (memcmp_extent_buffer(buf, root->fs_info->fsid,
766 (unsigned long)btrfs_header_fsid(buf),
767 BTRFS_FSID_SIZE)) {
768 printk("warning bad block %Lu\n", buf->start);
769 return 1;
770 }
771#endif
772 if (level == 0) 716 if (level == 0)
773 return check_leaf(root, path, level); 717 return check_leaf(root, path, level);
774 return check_node(root, path, level); 718 return check_node(root, path, level);
@@ -802,7 +746,7 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
802 unsigned long map_len = 0; 746 unsigned long map_len = 0;
803 int err; 747 int err;
804 748
805 while(low < high) { 749 while (low < high) {
806 mid = (low + high) / 2; 750 mid = (low + high) / 2;
807 offset = p + mid * item_size; 751 offset = p + mid * item_size;
808 752
@@ -1130,7 +1074,7 @@ enospc:
1130 * when they are completely full. This is also done top down, so we 1074 * when they are completely full. This is also done top down, so we
1131 * have to be pessimistic. 1075 * have to be pessimistic.
1132 */ 1076 */
1133static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, 1077static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1134 struct btrfs_root *root, 1078 struct btrfs_root *root,
1135 struct btrfs_path *path, int level) 1079 struct btrfs_path *path, int level)
1136{ 1080{
@@ -1296,7 +1240,7 @@ static noinline void reada_for_search(struct btrfs_root *root,
1296 1240
1297 nritems = btrfs_header_nritems(node); 1241 nritems = btrfs_header_nritems(node);
1298 nr = slot; 1242 nr = slot;
1299 while(1) { 1243 while (1) {
1300 if (direction < 0) { 1244 if (direction < 0) {
1301 if (nr == 0) 1245 if (nr == 0)
1302 break; 1246 break;
@@ -1322,7 +1266,8 @@ static noinline void reada_for_search(struct btrfs_root *root,
1322 nscan++; 1266 nscan++;
1323 if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32)) 1267 if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32))
1324 break; 1268 break;
1325 if(nread > (256 * 1024) || nscan > 128) 1269
1270 if (nread > (256 * 1024) || nscan > 128)
1326 break; 1271 break;
1327 1272
1328 if (search < lowest_read) 1273 if (search < lowest_read)
@@ -1333,17 +1278,17 @@ static noinline void reada_for_search(struct btrfs_root *root,
1333} 1278}
1334 1279
1335/* 1280/*
1336 * when we walk down the tree, it is usually safe to unlock the higher layers in 1281 * when we walk down the tree, it is usually safe to unlock the higher layers
1337 * the tree. The exceptions are when our path goes through slot 0, because operations 1282 * in the tree. The exceptions are when our path goes through slot 0, because
1338 * on the tree might require changing key pointers higher up in the tree. 1283 * operations on the tree might require changing key pointers higher up in the
1284 * tree.
1339 * 1285 *
1340 * callers might also have set path->keep_locks, which tells this code to 1286 * callers might also have set path->keep_locks, which tells this code to keep
1341 * keep the lock if the path points to the last slot in the block. This is 1287 * the lock if the path points to the last slot in the block. This is part of
1342 * part of walking through the tree, and selecting the next slot in the higher 1288 * walking through the tree, and selecting the next slot in the higher block.
1343 * block.
1344 * 1289 *
1345 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. 1290 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1346 * so if lowest_unlock is 1, level 0 won't be unlocked 1291 * if lowest_unlock is 1, level 0 won't be unlocked
1347 */ 1292 */
1348static noinline void unlock_up(struct btrfs_path *path, int level, 1293static noinline void unlock_up(struct btrfs_path *path, int level,
1349 int lowest_unlock) 1294 int lowest_unlock)
@@ -1832,9 +1777,8 @@ static int push_node_left(struct btrfs_trans_handle *trans,
1832 if (!empty && src_nritems <= 8) 1777 if (!empty && src_nritems <= 8)
1833 return 1; 1778 return 1;
1834 1779
1835 if (push_items <= 0) { 1780 if (push_items <= 0)
1836 return 1; 1781 return 1;
1837 }
1838 1782
1839 if (empty) { 1783 if (empty) {
1840 push_items = min(src_nritems, push_items); 1784 push_items = min(src_nritems, push_items);
@@ -1854,7 +1798,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
1854 copy_extent_buffer(dst, src, 1798 copy_extent_buffer(dst, src,
1855 btrfs_node_key_ptr_offset(dst_nritems), 1799 btrfs_node_key_ptr_offset(dst_nritems),
1856 btrfs_node_key_ptr_offset(0), 1800 btrfs_node_key_ptr_offset(0),
1857 push_items * sizeof(struct btrfs_key_ptr)); 1801 push_items * sizeof(struct btrfs_key_ptr));
1858 1802
1859 if (push_items < src_nritems) { 1803 if (push_items < src_nritems) {
1860 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 1804 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
@@ -1899,19 +1843,16 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
1899 src_nritems = btrfs_header_nritems(src); 1843 src_nritems = btrfs_header_nritems(src);
1900 dst_nritems = btrfs_header_nritems(dst); 1844 dst_nritems = btrfs_header_nritems(dst);
1901 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; 1845 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1902 if (push_items <= 0) { 1846 if (push_items <= 0)
1903 return 1; 1847 return 1;
1904 }
1905 1848
1906 if (src_nritems < 4) { 1849 if (src_nritems < 4)
1907 return 1; 1850 return 1;
1908 }
1909 1851
1910 max_push = src_nritems / 2 + 1; 1852 max_push = src_nritems / 2 + 1;
1911 /* don't try to empty the node */ 1853 /* don't try to empty the node */
1912 if (max_push >= src_nritems) { 1854 if (max_push >= src_nritems)
1913 return 1; 1855 return 1;
1914 }
1915 1856
1916 if (max_push < push_items) 1857 if (max_push < push_items)
1917 push_items = max_push; 1858 push_items = max_push;
@@ -1924,7 +1865,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
1924 copy_extent_buffer(dst, src, 1865 copy_extent_buffer(dst, src,
1925 btrfs_node_key_ptr_offset(0), 1866 btrfs_node_key_ptr_offset(0),
1926 btrfs_node_key_ptr_offset(src_nritems - push_items), 1867 btrfs_node_key_ptr_offset(src_nritems - push_items),
1927 push_items * sizeof(struct btrfs_key_ptr)); 1868 push_items * sizeof(struct btrfs_key_ptr));
1928 1869
1929 btrfs_set_header_nritems(src, src_nritems - push_items); 1870 btrfs_set_header_nritems(src, src_nritems - push_items);
1930 btrfs_set_header_nritems(dst, dst_nritems + push_items); 1871 btrfs_set_header_nritems(dst, dst_nritems + push_items);
@@ -1945,7 +1886,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
1945 * 1886 *
1946 * returns zero on success or < 0 on failure. 1887 * returns zero on success or < 0 on failure.
1947 */ 1888 */
1948static int noinline insert_new_root(struct btrfs_trans_handle *trans, 1889static noinline int insert_new_root(struct btrfs_trans_handle *trans,
1949 struct btrfs_root *root, 1890 struct btrfs_root *root,
1950 struct btrfs_path *path, int level) 1891 struct btrfs_path *path, int level)
1951{ 1892{
@@ -2176,14 +2117,15 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2176 * the start of the leaf data. IOW, how much room 2117 * the start of the leaf data. IOW, how much room
2177 * the leaf has left for both items and data 2118 * the leaf has left for both items and data
2178 */ 2119 */
2179int noinline btrfs_leaf_free_space(struct btrfs_root *root, 2120noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2180 struct extent_buffer *leaf) 2121 struct extent_buffer *leaf)
2181{ 2122{
2182 int nritems = btrfs_header_nritems(leaf); 2123 int nritems = btrfs_header_nritems(leaf);
2183 int ret; 2124 int ret;
2184 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); 2125 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2185 if (ret < 0) { 2126 if (ret < 0) {
2186 printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n", 2127 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2128 "used %d nritems %d\n",
2187 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), 2129 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2188 leaf_space_used(leaf, 0, nritems), nritems); 2130 leaf_space_used(leaf, 0, nritems), nritems);
2189 } 2131 }
@@ -2219,9 +2161,9 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2219 int ret; 2161 int ret;
2220 2162
2221 slot = path->slots[1]; 2163 slot = path->slots[1];
2222 if (!path->nodes[1]) { 2164 if (!path->nodes[1])
2223 return 1; 2165 return 1;
2224 } 2166
2225 upper = path->nodes[1]; 2167 upper = path->nodes[1];
2226 if (slot >= btrfs_header_nritems(upper) - 1) 2168 if (slot >= btrfs_header_nritems(upper) - 1)
2227 return 1; 2169 return 1;
@@ -2418,9 +2360,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2418 return 1; 2360 return 1;
2419 2361
2420 right_nritems = btrfs_header_nritems(right); 2362 right_nritems = btrfs_header_nritems(right);
2421 if (right_nritems == 0) { 2363 if (right_nritems == 0)
2422 return 1; 2364 return 1;
2423 }
2424 2365
2425 WARN_ON(!btrfs_tree_locked(path->nodes[1])); 2366 WARN_ON(!btrfs_tree_locked(path->nodes[1]));
2426 2367
@@ -2502,7 +2443,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2502 push_items * sizeof(struct btrfs_item)); 2443 push_items * sizeof(struct btrfs_item));
2503 2444
2504 push_space = BTRFS_LEAF_DATA_SIZE(root) - 2445 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2505 btrfs_item_offset_nr(right, push_items -1); 2446 btrfs_item_offset_nr(right, push_items - 1);
2506 2447
2507 copy_extent_buffer(left, right, btrfs_leaf_data(left) + 2448 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2508 leaf_data_end(root, left) - push_space, 2449 leaf_data_end(root, left) - push_space,
@@ -2537,7 +2478,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2537 2478
2538 /* fixup right node */ 2479 /* fixup right node */
2539 if (push_items > right_nritems) { 2480 if (push_items > right_nritems) {
2540 printk("push items %d nr %u\n", push_items, right_nritems); 2481 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2482 right_nritems);
2541 WARN_ON(1); 2483 WARN_ON(1);
2542 } 2484 }
2543 2485
@@ -2640,9 +2582,8 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
2640 /* first try to make some room by pushing left and right */ 2582 /* first try to make some room by pushing left and right */
2641 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) { 2583 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2642 wret = push_leaf_right(trans, root, path, data_size, 0); 2584 wret = push_leaf_right(trans, root, path, data_size, 0);
2643 if (wret < 0) { 2585 if (wret < 0)
2644 return wret; 2586 return wret;
2645 }
2646 if (wret) { 2587 if (wret) {
2647 wret = push_leaf_left(trans, root, path, data_size, 0); 2588 wret = push_leaf_left(trans, root, path, data_size, 0);
2648 if (wret < 0) 2589 if (wret < 0)
@@ -2665,7 +2606,7 @@ again:
2665 l = path->nodes[0]; 2606 l = path->nodes[0];
2666 slot = path->slots[0]; 2607 slot = path->slots[0];
2667 nritems = btrfs_header_nritems(l); 2608 nritems = btrfs_header_nritems(l);
2668 mid = (nritems + 1)/ 2; 2609 mid = (nritems + 1) / 2;
2669 2610
2670 right = btrfs_alloc_free_block(trans, root, root->leafsize, 2611 right = btrfs_alloc_free_block(trans, root, root->leafsize,
2671 path->nodes[1]->start, 2612 path->nodes[1]->start,
@@ -2734,7 +2675,7 @@ again:
2734 path->slots[0] = 0; 2675 path->slots[0] = 0;
2735 if (path->slots[1] == 0) { 2676 if (path->slots[1] == 0) {
2736 wret = fixup_low_keys(trans, root, 2677 wret = fixup_low_keys(trans, root,
2737 path, &disk_key, 1); 2678 path, &disk_key, 1);
2738 if (wret) 2679 if (wret)
2739 ret = wret; 2680 ret = wret;
2740 } 2681 }
@@ -3033,8 +2974,8 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3033 BTRFS_FILE_EXTENT_INLINE) { 2974 BTRFS_FILE_EXTENT_INLINE) {
3034 ptr = btrfs_item_ptr_offset(leaf, slot); 2975 ptr = btrfs_item_ptr_offset(leaf, slot);
3035 memmove_extent_buffer(leaf, ptr, 2976 memmove_extent_buffer(leaf, ptr,
3036 (unsigned long)fi, 2977 (unsigned long)fi,
3037 offsetof(struct btrfs_file_extent_item, 2978 offsetof(struct btrfs_file_extent_item,
3038 disk_bytenr)); 2979 disk_bytenr));
3039 } 2980 }
3040 } 2981 }
@@ -3096,7 +3037,8 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
3096 BUG_ON(slot < 0); 3037 BUG_ON(slot < 0);
3097 if (slot >= nritems) { 3038 if (slot >= nritems) {
3098 btrfs_print_leaf(root, leaf); 3039 btrfs_print_leaf(root, leaf);
3099 printk("slot %d too large, nritems %d\n", slot, nritems); 3040 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3041 slot, nritems);
3100 BUG_ON(1); 3042 BUG_ON(1);
3101 } 3043 }
3102 3044
@@ -3218,7 +3160,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3218 3160
3219 if (old_data < data_end) { 3161 if (old_data < data_end) {
3220 btrfs_print_leaf(root, leaf); 3162 btrfs_print_leaf(root, leaf);
3221 printk("slot %d old_data %d data_end %d\n", 3163 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3222 slot, old_data, data_end); 3164 slot, old_data, data_end);
3223 BUG_ON(1); 3165 BUG_ON(1);
3224 } 3166 }
@@ -3317,9 +3259,8 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3317 unsigned int data_end; 3259 unsigned int data_end;
3318 struct btrfs_disk_key disk_key; 3260 struct btrfs_disk_key disk_key;
3319 3261
3320 for (i = 0; i < nr; i++) { 3262 for (i = 0; i < nr; i++)
3321 total_data += data_size[i]; 3263 total_data += data_size[i];
3322 }
3323 3264
3324 total_size = total_data + (nr * sizeof(struct btrfs_item)); 3265 total_size = total_data + (nr * sizeof(struct btrfs_item));
3325 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); 3266 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
@@ -3336,7 +3277,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3336 3277
3337 if (btrfs_leaf_free_space(root, leaf) < total_size) { 3278 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3338 btrfs_print_leaf(root, leaf); 3279 btrfs_print_leaf(root, leaf);
3339 printk("not enough freespace need %u have %d\n", 3280 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3340 total_size, btrfs_leaf_free_space(root, leaf)); 3281 total_size, btrfs_leaf_free_space(root, leaf));
3341 BUG(); 3282 BUG();
3342 } 3283 }
@@ -3349,7 +3290,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3349 3290
3350 if (old_data < data_end) { 3291 if (old_data < data_end) {
3351 btrfs_print_leaf(root, leaf); 3292 btrfs_print_leaf(root, leaf);
3352 printk("slot %d old_data %d data_end %d\n", 3293 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3353 slot, old_data, data_end); 3294 slot, old_data, data_end);
3354 BUG_ON(1); 3295 BUG_ON(1);
3355 } 3296 }
@@ -3457,7 +3398,7 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3457 int wret; 3398 int wret;
3458 3399
3459 nritems = btrfs_header_nritems(parent); 3400 nritems = btrfs_header_nritems(parent);
3460 if (slot != nritems -1) { 3401 if (slot != nritems - 1) {
3461 memmove_extent_buffer(parent, 3402 memmove_extent_buffer(parent,
3462 btrfs_node_key_ptr_offset(slot), 3403 btrfs_node_key_ptr_offset(slot),
3463 btrfs_node_key_ptr_offset(slot + 1), 3404 btrfs_node_key_ptr_offset(slot + 1),
@@ -3614,7 +3555,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3614 3555
3615 if (btrfs_header_nritems(leaf) == 0) { 3556 if (btrfs_header_nritems(leaf) == 0) {
3616 path->slots[1] = slot; 3557 path->slots[1] = slot;
3617 ret = btrfs_del_leaf(trans, root, path, leaf->start); 3558 ret = btrfs_del_leaf(trans, root, path,
3559 leaf->start);
3618 BUG_ON(ret); 3560 BUG_ON(ret);
3619 free_extent_buffer(leaf); 3561 free_extent_buffer(leaf);
3620 } else { 3562 } else {
@@ -3717,7 +3659,7 @@ again:
3717 ret = 1; 3659 ret = 1;
3718 goto out; 3660 goto out;
3719 } 3661 }
3720 while(1) { 3662 while (1) {
3721 nritems = btrfs_header_nritems(cur); 3663 nritems = btrfs_header_nritems(cur);
3722 level = btrfs_header_level(cur); 3664 level = btrfs_header_level(cur);
3723 sret = bin_search(cur, min_key, level, &slot); 3665 sret = bin_search(cur, min_key, level, &slot);
@@ -3738,7 +3680,7 @@ again:
3738 * min_trans parameters. If it isn't in cache or is too 3680 * min_trans parameters. If it isn't in cache or is too
3739 * old, skip to the next one. 3681 * old, skip to the next one.
3740 */ 3682 */
3741 while(slot < nritems) { 3683 while (slot < nritems) {
3742 u64 blockptr; 3684 u64 blockptr;
3743 u64 gen; 3685 u64 gen;
3744 struct extent_buffer *tmp; 3686 struct extent_buffer *tmp;
@@ -3830,7 +3772,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
3830 struct extent_buffer *c; 3772 struct extent_buffer *c;
3831 3773
3832 WARN_ON(!path->keep_locks); 3774 WARN_ON(!path->keep_locks);
3833 while(level < BTRFS_MAX_LEVEL) { 3775 while (level < BTRFS_MAX_LEVEL) {
3834 if (!path->nodes[level]) 3776 if (!path->nodes[level])
3835 return 1; 3777 return 1;
3836 3778
@@ -3839,9 +3781,8 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
3839next: 3781next:
3840 if (slot >= btrfs_header_nritems(c)) { 3782 if (slot >= btrfs_header_nritems(c)) {
3841 level++; 3783 level++;
3842 if (level == BTRFS_MAX_LEVEL) { 3784 if (level == BTRFS_MAX_LEVEL)
3843 return 1; 3785 return 1;
3844 }
3845 continue; 3786 continue;
3846 } 3787 }
3847 if (level == 0) 3788 if (level == 0)
@@ -3889,9 +3830,8 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3889 int ret; 3830 int ret;
3890 3831
3891 nritems = btrfs_header_nritems(path->nodes[0]); 3832 nritems = btrfs_header_nritems(path->nodes[0]);
3892 if (nritems == 0) { 3833 if (nritems == 0)
3893 return 1; 3834 return 1;
3894 }
3895 3835
3896 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 3836 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
3897 3837
@@ -3915,7 +3855,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3915 goto done; 3855 goto done;
3916 } 3856 }
3917 3857
3918 while(level < BTRFS_MAX_LEVEL) { 3858 while (level < BTRFS_MAX_LEVEL) {
3919 if (!path->nodes[level]) 3859 if (!path->nodes[level])
3920 return 1; 3860 return 1;
3921 3861
@@ -3923,9 +3863,8 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3923 c = path->nodes[level]; 3863 c = path->nodes[level];
3924 if (slot >= btrfs_header_nritems(c)) { 3864 if (slot >= btrfs_header_nritems(c)) {
3925 level++; 3865 level++;
3926 if (level == BTRFS_MAX_LEVEL) { 3866 if (level == BTRFS_MAX_LEVEL)
3927 return 1; 3867 return 1;
3928 }
3929 continue; 3868 continue;
3930 } 3869 }
3931 3870
@@ -3946,7 +3885,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3946 break; 3885 break;
3947 } 3886 }
3948 path->slots[level] = slot; 3887 path->slots[level] = slot;
3949 while(1) { 3888 while (1) {
3950 level--; 3889 level--;
3951 c = path->nodes[level]; 3890 c = path->nodes[level];
3952 if (path->locks[level]) 3891 if (path->locks[level])
@@ -3986,7 +3925,7 @@ int btrfs_previous_item(struct btrfs_root *root,
3986 u32 nritems; 3925 u32 nritems;
3987 int ret; 3926 int ret;
3988 3927
3989 while(1) { 3928 while (1) {
3990 if (path->slots[0] == 0) { 3929 if (path->slots[0] == 0) {
3991 ret = btrfs_prev_leaf(root, path); 3930 ret = btrfs_prev_leaf(root, path);
3992 if (ret != 0) 3931 if (ret != 0)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index ccea0648e106..eee060f88113 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -126,7 +126,6 @@ struct btrfs_ordered_sum;
126static int btrfs_csum_sizes[] = { 4, 0 }; 126static int btrfs_csum_sizes[] = { 4, 0 };
127 127
128/* four bytes for CRC32 */ 128/* four bytes for CRC32 */
129//#define BTRFS_CRC32_SIZE 4
130#define BTRFS_EMPTY_DIR_SIZE 0 129#define BTRFS_EMPTY_DIR_SIZE 0
131 130
132#define BTRFS_FT_UNKNOWN 0 131#define BTRFS_FT_UNKNOWN 0
@@ -283,8 +282,8 @@ struct btrfs_header {
283} __attribute__ ((__packed__)); 282} __attribute__ ((__packed__));
284 283
285#define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \ 284#define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \
286 sizeof(struct btrfs_header)) / \ 285 sizeof(struct btrfs_header)) / \
287 sizeof(struct btrfs_key_ptr)) 286 sizeof(struct btrfs_key_ptr))
288#define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header)) 287#define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header))
289#define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->leafsize)) 288#define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->leafsize))
290#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 289#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
@@ -1512,7 +1511,7 @@ static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
1512 1511
1513static inline int btrfs_is_leaf(struct extent_buffer *eb) 1512static inline int btrfs_is_leaf(struct extent_buffer *eb)
1514{ 1513{
1515 return (btrfs_header_level(eb) == 0); 1514 return btrfs_header_level(eb) == 0;
1516} 1515}
1517 1516
1518/* struct btrfs_root_item */ 1517/* struct btrfs_root_item */
@@ -1597,8 +1596,8 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
1597/* struct btrfs_file_extent_item */ 1596/* struct btrfs_file_extent_item */
1598BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); 1597BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
1599 1598
1600static inline unsigned long btrfs_file_extent_inline_start(struct 1599static inline unsigned long
1601 btrfs_file_extent_item *e) 1600btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e)
1602{ 1601{
1603 unsigned long offset = (unsigned long)e; 1602 unsigned long offset = (unsigned long)e;
1604 offset += offsetof(struct btrfs_file_extent_item, disk_bytenr); 1603 offset += offsetof(struct btrfs_file_extent_item, disk_bytenr);
@@ -1660,20 +1659,20 @@ static inline int btrfs_set_root_name(struct btrfs_root *root,
1660 const char *name, int len) 1659 const char *name, int len)
1661{ 1660{
1662 /* if we already have a name just free it */ 1661 /* if we already have a name just free it */
1663 if (root->name) 1662 kfree(root->name);
1664 kfree(root->name);
1665 1663
1666 root->name = kmalloc(len+1, GFP_KERNEL); 1664 root->name = kmalloc(len+1, GFP_KERNEL);
1667 if (!root->name) 1665 if (!root->name)
1668 return -ENOMEM; 1666 return -ENOMEM;
1669 1667
1670 memcpy(root->name, name, len); 1668 memcpy(root->name, name, len);
1671 root->name[len] ='\0'; 1669 root->name[len] = '\0';
1672 1670
1673 return 0; 1671 return 0;
1674} 1672}
1675 1673
1676static inline u32 btrfs_level_size(struct btrfs_root *root, int level) { 1674static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
1675{
1677 if (level == 0) 1676 if (level == 0)
1678 return root->leafsize; 1677 return root->leafsize;
1679 return root->nodesize; 1678 return root->nodesize;
@@ -1707,9 +1706,9 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1707int btrfs_extent_post_op(struct btrfs_trans_handle *trans, 1706int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
1708 struct btrfs_root *root); 1707 struct btrfs_root *root);
1709int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy); 1708int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
1710struct btrfs_block_group_cache *btrfs_lookup_block_group(struct 1709struct btrfs_block_group_cache *btrfs_lookup_block_group(
1711 btrfs_fs_info *info, 1710 struct btrfs_fs_info *info,
1712 u64 bytenr); 1711 u64 bytenr);
1713u64 btrfs_find_block_group(struct btrfs_root *root, 1712u64 btrfs_find_block_group(struct btrfs_root *root,
1714 u64 search_start, u64 search_hint, int owner); 1713 u64 search_start, u64 search_hint, int owner);
1715struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, 1714struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
@@ -1908,8 +1907,9 @@ int btrfs_search_root(struct btrfs_root *root, u64 search_start,
1908int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid, 1907int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid,
1909 struct btrfs_root *latest_root); 1908 struct btrfs_root *latest_root);
1910/* dir-item.c */ 1909/* dir-item.c */
1911int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root 1910int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
1912 *root, const char *name, int name_len, u64 dir, 1911 struct btrfs_root *root, const char *name,
1912 int name_len, u64 dir,
1913 struct btrfs_key *location, u8 type, u64 index); 1913 struct btrfs_key *location, u8 type, u64 index);
1914struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, 1914struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
1915 struct btrfs_root *root, 1915 struct btrfs_root *root,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 5040b71f1900..926a0b287a7d 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -333,7 +333,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
333 leaf = path->nodes[0]; 333 leaf = path->nodes[0];
334 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 334 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
335 total_len = btrfs_item_size_nr(leaf, path->slots[0]); 335 total_len = btrfs_item_size_nr(leaf, path->slots[0]);
336 while(cur < total_len) { 336 while (cur < total_len) {
337 this_len = sizeof(*dir_item) + 337 this_len = sizeof(*dir_item) +
338 btrfs_dir_name_len(leaf, dir_item) + 338 btrfs_dir_name_len(leaf, dir_item) +
339 btrfs_dir_data_len(leaf, dir_item); 339 btrfs_dir_data_len(leaf, dir_item);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index dae25e78a6b7..81a313874ae5 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -23,7 +23,7 @@
23#include <linux/swap.h> 23#include <linux/swap.h>
24#include <linux/radix-tree.h> 24#include <linux/radix-tree.h>
25#include <linux/writeback.h> 25#include <linux/writeback.h>
26#include <linux/buffer_head.h> // for block_sync_page 26#include <linux/buffer_head.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/kthread.h> 28#include <linux/kthread.h>
29#include <linux/freezer.h> 29#include <linux/freezer.h>
@@ -40,19 +40,6 @@
40#include "ref-cache.h" 40#include "ref-cache.h"
41#include "tree-log.h" 41#include "tree-log.h"
42 42
43#if 0
44static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
45{
46 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
47 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
48 (unsigned long long)extent_buffer_blocknr(buf),
49 (unsigned long long)btrfs_header_blocknr(buf));
50 return 1;
51 }
52 return 0;
53}
54#endif
55
56static struct extent_io_ops btree_extent_io_ops; 43static struct extent_io_ops btree_extent_io_ops;
57static void end_workqueue_fn(struct btrfs_work *work); 44static void end_workqueue_fn(struct btrfs_work *work);
58 45
@@ -128,23 +115,13 @@ static struct extent_map *btree_get_extent(struct inode *inode,
128 u64 failed_start = em->start; 115 u64 failed_start = em->start;
129 u64 failed_len = em->len; 116 u64 failed_len = em->len;
130 117
131 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
132 em->start, em->len, em->block_start);
133 free_extent_map(em); 118 free_extent_map(em);
134 em = lookup_extent_mapping(em_tree, start, len); 119 em = lookup_extent_mapping(em_tree, start, len);
135 if (em) { 120 if (em) {
136 printk("after failing, found %Lu %Lu %Lu\n",
137 em->start, em->len, em->block_start);
138 ret = 0; 121 ret = 0;
139 } else { 122 } else {
140 em = lookup_extent_mapping(em_tree, failed_start, 123 em = lookup_extent_mapping(em_tree, failed_start,
141 failed_len); 124 failed_len);
142 if (em) {
143 printk("double failure lookup gives us "
144 "%Lu %Lu -> %Lu\n", em->start,
145 em->len, em->block_start);
146 free_extent_map(em);
147 }
148 ret = -EIO; 125 ret = -EIO;
149 } 126 }
150 } else if (ret) { 127 } else if (ret) {
@@ -191,15 +168,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
191 unsigned long inline_result; 168 unsigned long inline_result;
192 169
193 len = buf->len - offset; 170 len = buf->len - offset;
194 while(len > 0) { 171 while (len > 0) {
195 err = map_private_extent_buffer(buf, offset, 32, 172 err = map_private_extent_buffer(buf, offset, 32,
196 &map_token, &kaddr, 173 &map_token, &kaddr,
197 &map_start, &map_len, KM_USER0); 174 &map_start, &map_len, KM_USER0);
198 if (err) { 175 if (err)
199 printk("failed to map extent buffer! %lu\n",
200 offset);
201 return 1; 176 return 1;
202 }
203 cur_len = min(len, map_len - (offset - map_start)); 177 cur_len = min(len, map_len - (offset - map_start));
204 crc = btrfs_csum_data(root, kaddr + offset - map_start, 178 crc = btrfs_csum_data(root, kaddr + offset - map_start,
205 crc, cur_len); 179 crc, cur_len);
@@ -218,15 +192,14 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
218 btrfs_csum_final(crc, result); 192 btrfs_csum_final(crc, result);
219 193
220 if (verify) { 194 if (verify) {
221 /* FIXME, this is not good */
222 if (memcmp_extent_buffer(buf, result, 0, csum_size)) { 195 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
223 u32 val; 196 u32 val;
224 u32 found = 0; 197 u32 found = 0;
225 memcpy(&found, result, csum_size); 198 memcpy(&found, result, csum_size);
226 199
227 read_extent_buffer(buf, &val, 0, csum_size); 200 read_extent_buffer(buf, &val, 0, csum_size);
228 printk("btrfs: %s checksum verify failed on %llu " 201 printk(KERN_INFO "btrfs: %s checksum verify failed "
229 "wanted %X found %X level %d\n", 202 "on %llu wanted %X found %X level %d\n",
230 root->fs_info->sb->s_id, 203 root->fs_info->sb->s_id,
231 buf->start, val, found, btrfs_header_level(buf)); 204 buf->start, val, found, btrfs_header_level(buf));
232 if (result != (char *)&inline_result) 205 if (result != (char *)&inline_result)
@@ -293,7 +266,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
293 if (!ret && 266 if (!ret &&
294 !verify_parent_transid(io_tree, eb, parent_transid)) 267 !verify_parent_transid(io_tree, eb, parent_transid))
295 return ret; 268 return ret;
296printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num); 269
297 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, 270 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
298 eb->start, eb->len); 271 eb->start, eb->len);
299 if (num_copies == 1) 272 if (num_copies == 1)
@@ -307,9 +280,10 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror
307} 280}
308 281
309/* 282/*
310 * checksum a dirty tree block before IO. This has extra checks to make 283 * checksum a dirty tree block before IO. This has extra checks to make sure
311 * sure we only fill in the checksum field in the first page of a multi-page block 284 * we only fill in the checksum field in the first page of a multi-page block
312 */ 285 */
286
313static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) 287static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
314{ 288{
315 struct extent_io_tree *tree; 289 struct extent_io_tree *tree;
@@ -327,28 +301,22 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
327 if (!page->private) 301 if (!page->private)
328 goto out; 302 goto out;
329 len = page->private >> 2; 303 len = page->private >> 2;
330 if (len == 0) { 304 WARN_ON(len == 0);
331 WARN_ON(1); 305
332 }
333 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); 306 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
334 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, 307 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
335 btrfs_header_generation(eb)); 308 btrfs_header_generation(eb));
336 BUG_ON(ret); 309 BUG_ON(ret);
337 found_start = btrfs_header_bytenr(eb); 310 found_start = btrfs_header_bytenr(eb);
338 if (found_start != start) { 311 if (found_start != start) {
339 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
340 start, found_start, len);
341 WARN_ON(1); 312 WARN_ON(1);
342 goto err; 313 goto err;
343 } 314 }
344 if (eb->first_page != page) { 315 if (eb->first_page != page) {
345 printk("bad first page %lu %lu\n", eb->first_page->index,
346 page->index);
347 WARN_ON(1); 316 WARN_ON(1);
348 goto err; 317 goto err;
349 } 318 }
350 if (!PageUptodate(page)) { 319 if (!PageUptodate(page)) {
351 printk("csum not up to date page %lu\n", page->index);
352 WARN_ON(1); 320 WARN_ON(1);
353 goto err; 321 goto err;
354 } 322 }
@@ -396,29 +364,30 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
396 goto out; 364 goto out;
397 if (!page->private) 365 if (!page->private)
398 goto out; 366 goto out;
367
399 len = page->private >> 2; 368 len = page->private >> 2;
400 if (len == 0) { 369 WARN_ON(len == 0);
401 WARN_ON(1); 370
402 }
403 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); 371 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
404 372
405 found_start = btrfs_header_bytenr(eb); 373 found_start = btrfs_header_bytenr(eb);
406 if (found_start != start) { 374 if (found_start != start) {
407 printk("bad tree block start %llu %llu\n", 375 printk(KERN_INFO "btrfs bad tree block start %llu %llu\n",
408 (unsigned long long)found_start, 376 (unsigned long long)found_start,
409 (unsigned long long)eb->start); 377 (unsigned long long)eb->start);
410 ret = -EIO; 378 ret = -EIO;
411 goto err; 379 goto err;
412 } 380 }
413 if (eb->first_page != page) { 381 if (eb->first_page != page) {
414 printk("bad first page %lu %lu\n", eb->first_page->index, 382 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
415 page->index); 383 eb->first_page->index, page->index);
416 WARN_ON(1); 384 WARN_ON(1);
417 ret = -EIO; 385 ret = -EIO;
418 goto err; 386 goto err;
419 } 387 }
420 if (check_tree_block_fsid(root, eb)) { 388 if (check_tree_block_fsid(root, eb)) {
421 printk("bad fsid on block %Lu\n", eb->start); 389 printk(KERN_INFO "btrfs bad fsid on block %llu\n",
390 (unsigned long long)eb->start);
422 ret = -EIO; 391 ret = -EIO;
423 goto err; 392 goto err;
424 } 393 }
@@ -578,7 +547,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
578 HZ/10); 547 HZ/10);
579 } 548 }
580#endif 549#endif
581 while(atomic_read(&fs_info->async_submit_draining) && 550 while (atomic_read(&fs_info->async_submit_draining) &&
582 atomic_read(&fs_info->nr_async_submits)) { 551 atomic_read(&fs_info->nr_async_submits)) {
583 wait_event(fs_info->async_submit_wait, 552 wait_event(fs_info->async_submit_wait,
584 (atomic_read(&fs_info->nr_async_submits) == 0)); 553 (atomic_read(&fs_info->nr_async_submits) == 0));
@@ -594,7 +563,7 @@ static int btree_csum_one_bio(struct bio *bio)
594 struct btrfs_root *root; 563 struct btrfs_root *root;
595 564
596 WARN_ON(bio->bi_vcnt <= 0); 565 WARN_ON(bio->bi_vcnt <= 0);
597 while(bio_index < bio->bi_vcnt) { 566 while (bio_index < bio->bi_vcnt) {
598 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 567 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
599 csum_dirty_buffer(root, bvec->bv_page); 568 csum_dirty_buffer(root, bvec->bv_page);
600 bio_index++; 569 bio_index++;
@@ -680,9 +649,8 @@ static int btree_writepages(struct address_space *mapping,
680 649
681 num_dirty = count_range_bits(tree, &start, (u64)-1, 650 num_dirty = count_range_bits(tree, &start, (u64)-1,
682 thresh, EXTENT_DIRTY); 651 thresh, EXTENT_DIRTY);
683 if (num_dirty < thresh) { 652 if (num_dirty < thresh)
684 return 0; 653 return 0;
685 }
686 } 654 }
687 return extent_writepages(tree, mapping, btree_get_extent, wbc); 655 return extent_writepages(tree, mapping, btree_get_extent, wbc);
688} 656}
@@ -701,15 +669,14 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
701 int ret; 669 int ret;
702 670
703 if (PageWriteback(page) || PageDirty(page)) 671 if (PageWriteback(page) || PageDirty(page))
704 return 0; 672 return 0;
705 673
706 tree = &BTRFS_I(page->mapping->host)->io_tree; 674 tree = &BTRFS_I(page->mapping->host)->io_tree;
707 map = &BTRFS_I(page->mapping->host)->extent_tree; 675 map = &BTRFS_I(page->mapping->host)->extent_tree;
708 676
709 ret = try_release_extent_state(map, tree, page, gfp_flags); 677 ret = try_release_extent_state(map, tree, page, gfp_flags);
710 if (!ret) { 678 if (!ret)
711 return 0; 679 return 0;
712 }
713 680
714 ret = try_release_extent_buffer(tree, page); 681 ret = try_release_extent_buffer(tree, page);
715 if (ret == 1) { 682 if (ret == 1) {
@@ -728,8 +695,8 @@ static void btree_invalidatepage(struct page *page, unsigned long offset)
728 extent_invalidatepage(tree, page, offset); 695 extent_invalidatepage(tree, page, offset);
729 btree_releasepage(page, GFP_NOFS); 696 btree_releasepage(page, GFP_NOFS);
730 if (PagePrivate(page)) { 697 if (PagePrivate(page)) {
731 printk("warning page private not zero on page %Lu\n", 698 printk(KERN_WARNING "btrfs warning page private not zero "
732 page_offset(page)); 699 "on page %llu\n", (unsigned long long)page_offset(page));
733 ClearPagePrivate(page); 700 ClearPagePrivate(page);
734 set_page_private(page, 0); 701 set_page_private(page, 0);
735 page_cache_release(page); 702 page_cache_release(page);
@@ -813,7 +780,7 @@ int btrfs_write_tree_block(struct extent_buffer *buf)
813int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 780int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
814{ 781{
815 return btrfs_wait_on_page_writeback_range(buf->first_page->mapping, 782 return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
816 buf->start, buf->start + buf->len -1); 783 buf->start, buf->start + buf->len - 1);
817} 784}
818 785
819struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, 786struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
@@ -832,11 +799,10 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
832 799
833 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 800 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
834 801
835 if (ret == 0) { 802 if (ret == 0)
836 buf->flags |= EXTENT_UPTODATE; 803 buf->flags |= EXTENT_UPTODATE;
837 } else { 804 else
838 WARN_ON(1); 805 WARN_ON(1);
839 }
840 return buf; 806 return buf;
841 807
842} 808}
@@ -944,7 +910,7 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
944 if (!log_root_tree) 910 if (!log_root_tree)
945 return 0; 911 return 0;
946 912
947 while(1) { 913 while (1) {
948 ret = find_first_extent_bit(&log_root_tree->dirty_log_pages, 914 ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
949 0, &start, &end, EXTENT_DIRTY); 915 0, &start, &end, EXTENT_DIRTY);
950 if (ret) 916 if (ret)
@@ -1165,24 +1131,6 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1165 root->in_sysfs = 1; 1131 root->in_sysfs = 1;
1166 return root; 1132 return root;
1167} 1133}
1168#if 0
1169static int add_hasher(struct btrfs_fs_info *info, char *type) {
1170 struct btrfs_hasher *hasher;
1171
1172 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
1173 if (!hasher)
1174 return -ENOMEM;
1175 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
1176 if (!hasher->hash_tfm) {
1177 kfree(hasher);
1178 return -EINVAL;
1179 }
1180 spin_lock(&info->hash_lock);
1181 list_add(&hasher->list, &info->hashers);
1182 spin_unlock(&info->hash_lock);
1183 return 0;
1184}
1185#endif
1186 1134
1187static int btrfs_congested_fn(void *congested_data, int bdi_bits) 1135static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1188{ 1136{
@@ -1226,9 +1174,8 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1226 continue; 1174 continue;
1227 1175
1228 bdi = blk_get_backing_dev_info(device->bdev); 1176 bdi = blk_get_backing_dev_info(device->bdev);
1229 if (bdi->unplug_io_fn) { 1177 if (bdi->unplug_io_fn)
1230 bdi->unplug_io_fn(bdi, page); 1178 bdi->unplug_io_fn(bdi, page);
1231 }
1232 } 1179 }
1233} 1180}
1234 1181
@@ -1420,8 +1367,9 @@ static int transaction_kthread(void *arg)
1420 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1367 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1421 1368
1422 if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) { 1369 if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) {
1423 printk("btrfs: total reference cache size %Lu\n", 1370 printk(KERN_INFO "btrfs: total reference cache "
1424 root->fs_info->total_ref_cache_size); 1371 "size %llu\n",
1372 root->fs_info->total_ref_cache_size);
1425 } 1373 }
1426 1374
1427 mutex_lock(&root->fs_info->trans_mutex); 1375 mutex_lock(&root->fs_info->trans_mutex);
@@ -1592,14 +1540,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1592 atomic_set(&fs_info->tree_log_writers, 0); 1540 atomic_set(&fs_info->tree_log_writers, 0);
1593 fs_info->tree_log_transid = 0; 1541 fs_info->tree_log_transid = 0;
1594 1542
1595#if 0
1596 ret = add_hasher(fs_info, "crc32c");
1597 if (ret) {
1598 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1599 err = -ENOMEM;
1600 goto fail_iput;
1601 }
1602#endif
1603 __setup_root(4096, 4096, 4096, 4096, tree_root, 1543 __setup_root(4096, 4096, 4096, 4096, tree_root,
1604 fs_info, BTRFS_ROOT_TREE_OBJECTID); 1544 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1605 1545
@@ -1720,7 +1660,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1720 1660
1721 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC, 1661 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1722 sizeof(disk_super->magic))) { 1662 sizeof(disk_super->magic))) {
1723 printk("btrfs: valid FS not found on %s\n", sb->s_id); 1663 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1724 goto fail_sb_buffer; 1664 goto fail_sb_buffer;
1725 } 1665 }
1726 1666
@@ -1728,8 +1668,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1728 ret = btrfs_read_sys_array(tree_root); 1668 ret = btrfs_read_sys_array(tree_root);
1729 mutex_unlock(&fs_info->chunk_mutex); 1669 mutex_unlock(&fs_info->chunk_mutex);
1730 if (ret) { 1670 if (ret) {
1731 printk("btrfs: failed to read the system array on %s\n", 1671 printk(KERN_WARNING "btrfs: failed to read the system "
1732 sb->s_id); 1672 "array on %s\n", sb->s_id);
1733 goto fail_sys_array; 1673 goto fail_sys_array;
1734 } 1674 }
1735 1675
@@ -1746,14 +1686,15 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1746 BUG_ON(!chunk_root->node); 1686 BUG_ON(!chunk_root->node);
1747 1687
1748 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, 1688 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1749 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), 1689 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1750 BTRFS_UUID_SIZE); 1690 BTRFS_UUID_SIZE);
1751 1691
1752 mutex_lock(&fs_info->chunk_mutex); 1692 mutex_lock(&fs_info->chunk_mutex);
1753 ret = btrfs_read_chunk_tree(chunk_root); 1693 ret = btrfs_read_chunk_tree(chunk_root);
1754 mutex_unlock(&fs_info->chunk_mutex); 1694 mutex_unlock(&fs_info->chunk_mutex);
1755 if (ret) { 1695 if (ret) {
1756 printk("btrfs: failed to read chunk tree on %s\n", sb->s_id); 1696 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1697 sb->s_id);
1757 goto fail_chunk_root; 1698 goto fail_chunk_root;
1758 } 1699 }
1759 1700
@@ -1812,7 +1753,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1812 u64 bytenr = btrfs_super_log_root(disk_super); 1753 u64 bytenr = btrfs_super_log_root(disk_super);
1813 1754
1814 if (fs_devices->rw_devices == 0) { 1755 if (fs_devices->rw_devices == 0) {
1815 printk("Btrfs log replay required on RO media\n"); 1756 printk(KERN_WARNING "Btrfs log replay required "
1757 "on RO media\n");
1816 err = -EIO; 1758 err = -EIO;
1817 goto fail_trans_kthread; 1759 goto fail_trans_kthread;
1818 } 1760 }
@@ -2097,7 +2039,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2097 total_errors++; 2039 total_errors++;
2098 } 2040 }
2099 if (total_errors > max_errors) { 2041 if (total_errors > max_errors) {
2100 printk("btrfs: %d errors while writing supers\n", total_errors); 2042 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2043 total_errors);
2101 BUG(); 2044 BUG();
2102 } 2045 }
2103 2046
@@ -2114,7 +2057,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2114 total_errors++; 2057 total_errors++;
2115 } 2058 }
2116 if (total_errors > max_errors) { 2059 if (total_errors > max_errors) {
2117 printk("btrfs: %d errors while writing supers\n", total_errors); 2060 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2061 total_errors);
2118 BUG(); 2062 BUG();
2119 } 2063 }
2120 return 0; 2064 return 0;
@@ -2137,16 +2081,11 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2137 down_write(&root->anon_super.s_umount); 2081 down_write(&root->anon_super.s_umount);
2138 kill_anon_super(&root->anon_super); 2082 kill_anon_super(&root->anon_super);
2139 } 2083 }
2140#if 0
2141 if (root->in_sysfs)
2142 btrfs_sysfs_del_root(root);
2143#endif
2144 if (root->node) 2084 if (root->node)
2145 free_extent_buffer(root->node); 2085 free_extent_buffer(root->node);
2146 if (root->commit_root) 2086 if (root->commit_root)
2147 free_extent_buffer(root->commit_root); 2087 free_extent_buffer(root->commit_root);
2148 if (root->name) 2088 kfree(root->name);
2149 kfree(root->name);
2150 kfree(root); 2089 kfree(root);
2151 return 0; 2090 return 0;
2152} 2091}
@@ -2157,7 +2096,7 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info)
2157 struct btrfs_root *gang[8]; 2096 struct btrfs_root *gang[8];
2158 int i; 2097 int i;
2159 2098
2160 while(1) { 2099 while (1) {
2161 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 2100 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2162 (void **)gang, 0, 2101 (void **)gang, 0,
2163 ARRAY_SIZE(gang)); 2102 ARRAY_SIZE(gang));
@@ -2228,18 +2167,17 @@ int close_ctree(struct btrfs_root *root)
2228 2167
2229 if (!(fs_info->sb->s_flags & MS_RDONLY)) { 2168 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2230 ret = btrfs_commit_super(root); 2169 ret = btrfs_commit_super(root);
2231 if (ret) { 2170 if (ret)
2232 printk("btrfs: commit super returns %d\n", ret); 2171 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2233 }
2234 } 2172 }
2235 2173
2236 if (fs_info->delalloc_bytes) { 2174 if (fs_info->delalloc_bytes) {
2237 printk("btrfs: at unmount delalloc count %Lu\n", 2175 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2238 fs_info->delalloc_bytes); 2176 fs_info->delalloc_bytes);
2239 } 2177 }
2240 if (fs_info->total_ref_cache_size) { 2178 if (fs_info->total_ref_cache_size) {
2241 printk("btrfs: at umount reference cache size %Lu\n", 2179 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2242 fs_info->total_ref_cache_size); 2180 (unsigned long long)fs_info->total_ref_cache_size);
2243 } 2181 }
2244 2182
2245 if (fs_info->extent_root->node) 2183 if (fs_info->extent_root->node)
@@ -2248,13 +2186,13 @@ int close_ctree(struct btrfs_root *root)
2248 if (fs_info->tree_root->node) 2186 if (fs_info->tree_root->node)
2249 free_extent_buffer(fs_info->tree_root->node); 2187 free_extent_buffer(fs_info->tree_root->node);
2250 2188
2251 if (root->fs_info->chunk_root->node); 2189 if (root->fs_info->chunk_root->node)
2252 free_extent_buffer(root->fs_info->chunk_root->node); 2190 free_extent_buffer(root->fs_info->chunk_root->node);
2253 2191
2254 if (root->fs_info->dev_root->node); 2192 if (root->fs_info->dev_root->node)
2255 free_extent_buffer(root->fs_info->dev_root->node); 2193 free_extent_buffer(root->fs_info->dev_root->node);
2256 2194
2257 if (root->fs_info->csum_root->node); 2195 if (root->fs_info->csum_root->node)
2258 free_extent_buffer(root->fs_info->csum_root->node); 2196 free_extent_buffer(root->fs_info->csum_root->node);
2259 2197
2260 btrfs_free_block_groups(root->fs_info); 2198 btrfs_free_block_groups(root->fs_info);
@@ -2273,7 +2211,7 @@ int close_ctree(struct btrfs_root *root)
2273 btrfs_stop_workers(&fs_info->submit_workers); 2211 btrfs_stop_workers(&fs_info->submit_workers);
2274 2212
2275#if 0 2213#if 0
2276 while(!list_empty(&fs_info->hashers)) { 2214 while (!list_empty(&fs_info->hashers)) {
2277 struct btrfs_hasher *hasher; 2215 struct btrfs_hasher *hasher;
2278 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher, 2216 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
2279 hashers); 2217 hashers);
@@ -2324,9 +2262,11 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2324 2262
2325 WARN_ON(!btrfs_tree_locked(buf)); 2263 WARN_ON(!btrfs_tree_locked(buf));
2326 if (transid != root->fs_info->generation) { 2264 if (transid != root->fs_info->generation) {
2327 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n", 2265 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2266 "found %llu running %llu\n",
2328 (unsigned long long)buf->start, 2267 (unsigned long long)buf->start,
2329 transid, root->fs_info->generation); 2268 (unsigned long long)transid,
2269 (unsigned long long)root->fs_info->generation);
2330 WARN_ON(1); 2270 WARN_ON(1);
2331 } 2271 }
2332 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); 2272 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
@@ -2361,9 +2301,8 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2361 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 2301 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2362 int ret; 2302 int ret;
2363 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 2303 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2364 if (ret == 0) { 2304 if (ret == 0)
2365 buf->flags |= EXTENT_UPTODATE; 2305 buf->flags |= EXTENT_UPTODATE;
2366 }
2367 return ret; 2306 return ret;
2368} 2307}
2369 2308
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 48b82cd7583c..85315d2c90de 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -7,9 +7,11 @@
7#include "export.h" 7#include "export.h"
8#include "compat.h" 8#include "compat.h"
9 9
10#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4) 10#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \
11#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4) 11 parent_objectid) / 4)
12#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4) 12#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, \
13 parent_root_objectid) / 4)
14#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4)
13 15
14static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, 16static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
15 int connectable) 17 int connectable)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 780c1eeb8299..ec43fa526d77 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -49,10 +49,10 @@ struct pending_extent_op {
49 int del; 49 int del;
50}; 50};
51 51
52static int finish_current_insert(struct btrfs_trans_handle *trans, struct 52static int finish_current_insert(struct btrfs_trans_handle *trans,
53 btrfs_root *extent_root, int all); 53 struct btrfs_root *extent_root, int all);
54static int del_pending_extents(struct btrfs_trans_handle *trans, struct 54static int del_pending_extents(struct btrfs_trans_handle *trans,
55 btrfs_root *extent_root, int all); 55 struct btrfs_root *extent_root, int all);
56static int pin_down_bytes(struct btrfs_trans_handle *trans, 56static int pin_down_bytes(struct btrfs_trans_handle *trans,
57 struct btrfs_root *root, 57 struct btrfs_root *root,
58 u64 bytenr, u64 num_bytes, int is_data); 58 u64 bytenr, u64 num_bytes, int is_data);
@@ -247,7 +247,7 @@ static int cache_block_group(struct btrfs_root *root,
247 if (ret < 0) 247 if (ret < 0)
248 goto err; 248 goto err;
249 249
250 while(1) { 250 while (1) {
251 leaf = path->nodes[0]; 251 leaf = path->nodes[0];
252 slot = path->slots[0]; 252 slot = path->slots[0];
253 if (slot >= btrfs_header_nritems(leaf)) { 253 if (slot >= btrfs_header_nritems(leaf)) {
@@ -292,9 +292,8 @@ err:
292/* 292/*
293 * return the block group that starts at or after bytenr 293 * return the block group that starts at or after bytenr
294 */ 294 */
295static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct 295static struct btrfs_block_group_cache *
296 btrfs_fs_info *info, 296btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
297 u64 bytenr)
298{ 297{
299 struct btrfs_block_group_cache *cache; 298 struct btrfs_block_group_cache *cache;
300 299
@@ -306,9 +305,9 @@ static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
306/* 305/*
307 * return the block group that contains teh given bytenr 306 * return the block group that contains teh given bytenr
308 */ 307 */
309struct btrfs_block_group_cache *btrfs_lookup_block_group(struct 308struct btrfs_block_group_cache *btrfs_lookup_block_group(
310 btrfs_fs_info *info, 309 struct btrfs_fs_info *info,
311 u64 bytenr) 310 u64 bytenr)
312{ 311{
313 struct btrfs_block_group_cache *cache; 312 struct btrfs_block_group_cache *cache;
314 313
@@ -492,7 +491,7 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
492 * to the key objectid. 491 * to the key objectid.
493 */ 492 */
494 493
495static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans, 494static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
496 struct btrfs_root *root, 495 struct btrfs_root *root,
497 struct btrfs_path *path, 496 struct btrfs_path *path,
498 u64 bytenr, u64 parent, 497 u64 bytenr, u64 parent,
@@ -537,7 +536,7 @@ out:
537 * updates all the backrefs that are pending on update_list for the 536 * updates all the backrefs that are pending on update_list for the
538 * extent_root 537 * extent_root
539 */ 538 */
540static int noinline update_backrefs(struct btrfs_trans_handle *trans, 539static noinline int update_backrefs(struct btrfs_trans_handle *trans,
541 struct btrfs_root *extent_root, 540 struct btrfs_root *extent_root,
542 struct btrfs_path *path, 541 struct btrfs_path *path,
543 struct list_head *update_list) 542 struct list_head *update_list)
@@ -573,9 +572,11 @@ loop:
573 btrfs_ref_generation(leaf, ref) != op->orig_generation || 572 btrfs_ref_generation(leaf, ref) != op->orig_generation ||
574 (ref_objectid != op->level && 573 (ref_objectid != op->level &&
575 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) { 574 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
576 printk(KERN_ERR "couldn't find %Lu, parent %Lu, root %Lu, " 575 printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, "
577 "owner %u\n", op->bytenr, op->orig_parent, 576 "root %llu, owner %u\n",
578 ref_root, op->level); 577 (unsigned long long)op->bytenr,
578 (unsigned long long)op->orig_parent,
579 (unsigned long long)ref_root, op->level);
579 btrfs_print_leaf(extent_root, leaf); 580 btrfs_print_leaf(extent_root, leaf);
580 BUG(); 581 BUG();
581 } 582 }
@@ -620,7 +621,7 @@ out:
620 return 0; 621 return 0;
621} 622}
622 623
623static int noinline insert_extents(struct btrfs_trans_handle *trans, 624static noinline int insert_extents(struct btrfs_trans_handle *trans,
624 struct btrfs_root *extent_root, 625 struct btrfs_root *extent_root,
625 struct btrfs_path *path, 626 struct btrfs_path *path,
626 struct list_head *insert_list, int nr) 627 struct list_head *insert_list, int nr)
@@ -781,7 +782,7 @@ static int noinline insert_extents(struct btrfs_trans_handle *trans,
781 return ret; 782 return ret;
782} 783}
783 784
784static int noinline insert_extent_backref(struct btrfs_trans_handle *trans, 785static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
785 struct btrfs_root *root, 786 struct btrfs_root *root,
786 struct btrfs_path *path, 787 struct btrfs_path *path,
787 u64 bytenr, u64 parent, 788 u64 bytenr, u64 parent,
@@ -840,7 +841,7 @@ out:
840 return ret; 841 return ret;
841} 842}
842 843
843static int noinline remove_extent_backref(struct btrfs_trans_handle *trans, 844static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
844 struct btrfs_root *root, 845 struct btrfs_root *root,
845 struct btrfs_path *path) 846 struct btrfs_path *path)
846{ 847{
@@ -868,7 +869,7 @@ static int noinline remove_extent_backref(struct btrfs_trans_handle *trans,
868static void btrfs_issue_discard(struct block_device *bdev, 869static void btrfs_issue_discard(struct block_device *bdev,
869 u64 start, u64 len) 870 u64 start, u64 len)
870{ 871{
871#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) 872#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
872 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL); 873 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
873#else 874#else
874 blkdev_issue_discard(bdev, start >> 9, len >> 9); 875 blkdev_issue_discard(bdev, start >> 9, len >> 9);
@@ -908,7 +909,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
908#endif 909#endif
909} 910}
910 911
911static int noinline free_extents(struct btrfs_trans_handle *trans, 912static noinline int free_extents(struct btrfs_trans_handle *trans,
912 struct btrfs_root *extent_root, 913 struct btrfs_root *extent_root,
913 struct list_head *del_list) 914 struct list_head *del_list)
914{ 915{
@@ -937,10 +938,11 @@ search:
937 extent_root->root_key.objectid, 938 extent_root->root_key.objectid,
938 op->orig_generation, op->level, 1); 939 op->orig_generation, op->level, 1);
939 if (ret) { 940 if (ret) {
940 printk("Unable to find backref byte nr %Lu root %Lu gen %Lu " 941 printk(KERN_ERR "btrfs unable to find backref byte nr %llu "
941 "owner %u\n", op->bytenr, 942 "root %llu gen %llu owner %u\n",
942 extent_root->root_key.objectid, op->orig_generation, 943 (unsigned long long)op->bytenr,
943 op->level); 944 (unsigned long long)extent_root->root_key.objectid,
945 (unsigned long long)op->orig_generation, op->level);
944 btrfs_print_leaf(extent_root, path->nodes[0]); 946 btrfs_print_leaf(extent_root, path->nodes[0]);
945 WARN_ON(1); 947 WARN_ON(1);
946 goto out; 948 goto out;
@@ -1282,7 +1284,9 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1282 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 1284 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1283 if (key.objectid != bytenr) { 1285 if (key.objectid != bytenr) {
1284 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]); 1286 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
1285 printk("wanted %Lu found %Lu\n", bytenr, key.objectid); 1287 printk(KERN_ERR "btrfs wanted %llu found %llu\n",
1288 (unsigned long long)bytenr,
1289 (unsigned long long)key.objectid);
1286 BUG(); 1290 BUG();
1287 } 1291 }
1288 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY); 1292 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
@@ -1353,7 +1357,8 @@ int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
1353 goto out; 1357 goto out;
1354 if (ret != 0) { 1358 if (ret != 0) {
1355 btrfs_print_leaf(root, path->nodes[0]); 1359 btrfs_print_leaf(root, path->nodes[0]);
1356 printk("failed to find block number %Lu\n", bytenr); 1360 printk(KERN_INFO "btrfs failed to find block number %llu\n",
1361 (unsigned long long)bytenr);
1357 BUG(); 1362 BUG();
1358 } 1363 }
1359 l = path->nodes[0]; 1364 l = path->nodes[0];
@@ -1738,7 +1743,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1738 if (!path) 1743 if (!path)
1739 return -ENOMEM; 1744 return -ENOMEM;
1740 1745
1741 while(1) { 1746 while (1) {
1742 cache = NULL; 1747 cache = NULL;
1743 spin_lock(&root->fs_info->block_group_cache_lock); 1748 spin_lock(&root->fs_info->block_group_cache_lock);
1744 for (n = rb_first(&root->fs_info->block_group_cache_tree); 1749 for (n = rb_first(&root->fs_info->block_group_cache_tree);
@@ -1921,10 +1926,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1921 spin_unlock(&space_info->lock); 1926 spin_unlock(&space_info->lock);
1922 1927
1923 ret = btrfs_alloc_chunk(trans, extent_root, flags); 1928 ret = btrfs_alloc_chunk(trans, extent_root, flags);
1924 if (ret) { 1929 if (ret)
1925printk("space info full %Lu\n", flags);
1926 space_info->full = 1; 1930 space_info->full = 1;
1927 }
1928out: 1931out:
1929 mutex_unlock(&extent_root->fs_info->chunk_mutex); 1932 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1930 return ret; 1933 return ret;
@@ -1941,7 +1944,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
1941 u64 old_val; 1944 u64 old_val;
1942 u64 byte_in_group; 1945 u64 byte_in_group;
1943 1946
1944 while(total) { 1947 while (total) {
1945 cache = btrfs_lookup_block_group(info, bytenr); 1948 cache = btrfs_lookup_block_group(info, bytenr);
1946 if (!cache) 1949 if (!cache)
1947 return -1; 1950 return -1;
@@ -2089,7 +2092,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
2089 int ret; 2092 int ret;
2090 2093
2091 mutex_lock(&root->fs_info->pinned_mutex); 2094 mutex_lock(&root->fs_info->pinned_mutex);
2092 while(1) { 2095 while (1) {
2093 ret = find_first_extent_bit(pinned_extents, last, 2096 ret = find_first_extent_bit(pinned_extents, last,
2094 &start, &end, EXTENT_DIRTY); 2097 &start, &end, EXTENT_DIRTY);
2095 if (ret) 2098 if (ret)
@@ -2110,7 +2113,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2110 int ret; 2113 int ret;
2111 2114
2112 mutex_lock(&root->fs_info->pinned_mutex); 2115 mutex_lock(&root->fs_info->pinned_mutex);
2113 while(1) { 2116 while (1) {
2114 ret = find_first_extent_bit(unpin, 0, &start, &end, 2117 ret = find_first_extent_bit(unpin, 0, &start, &end,
2115 EXTENT_DIRTY); 2118 EXTENT_DIRTY);
2116 if (ret) 2119 if (ret)
@@ -2400,7 +2403,7 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2400 if (ret == 0) { 2403 if (ret == 0) {
2401 struct btrfs_key found_key; 2404 struct btrfs_key found_key;
2402 extent_slot = path->slots[0]; 2405 extent_slot = path->slots[0];
2403 while(extent_slot > 0) { 2406 while (extent_slot > 0) {
2404 extent_slot--; 2407 extent_slot--;
2405 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2408 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2406 extent_slot); 2409 extent_slot);
@@ -2422,8 +2425,8 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2422 &key, path, -1, 1); 2425 &key, path, -1, 1);
2423 if (ret) { 2426 if (ret) {
2424 printk(KERN_ERR "umm, got %d back from search" 2427 printk(KERN_ERR "umm, got %d back from search"
2425 ", was looking for %Lu\n", ret, 2428 ", was looking for %llu\n", ret,
2426 bytenr); 2429 (unsigned long long)bytenr);
2427 btrfs_print_leaf(extent_root, path->nodes[0]); 2430 btrfs_print_leaf(extent_root, path->nodes[0]);
2428 } 2431 }
2429 BUG_ON(ret); 2432 BUG_ON(ret);
@@ -2432,9 +2435,12 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2432 } else { 2435 } else {
2433 btrfs_print_leaf(extent_root, path->nodes[0]); 2436 btrfs_print_leaf(extent_root, path->nodes[0]);
2434 WARN_ON(1); 2437 WARN_ON(1);
2435 printk("Unable to find ref byte nr %Lu root %Lu " 2438 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
2436 "gen %Lu owner %Lu\n", bytenr, 2439 "root %llu gen %llu owner %llu\n",
2437 root_objectid, ref_generation, owner_objectid); 2440 (unsigned long long)bytenr,
2441 (unsigned long long)root_objectid,
2442 (unsigned long long)ref_generation,
2443 (unsigned long long)owner_objectid);
2438 } 2444 }
2439 2445
2440 leaf = path->nodes[0]; 2446 leaf = path->nodes[0];
@@ -2517,8 +2523,8 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2517 * find all the blocks marked as pending in the radix tree and remove 2523 * find all the blocks marked as pending in the radix tree and remove
2518 * them from the extent map 2524 * them from the extent map
2519 */ 2525 */
2520static int del_pending_extents(struct btrfs_trans_handle *trans, struct 2526static int del_pending_extents(struct btrfs_trans_handle *trans,
2521 btrfs_root *extent_root, int all) 2527 struct btrfs_root *extent_root, int all)
2522{ 2528{
2523 int ret; 2529 int ret;
2524 int err = 0; 2530 int err = 0;
@@ -2539,7 +2545,7 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct
2539 2545
2540again: 2546again:
2541 mutex_lock(&info->extent_ins_mutex); 2547 mutex_lock(&info->extent_ins_mutex);
2542 while(1) { 2548 while (1) {
2543 ret = find_first_extent_bit(pending_del, search, &start, &end, 2549 ret = find_first_extent_bit(pending_del, search, &start, &end,
2544 EXTENT_WRITEBACK); 2550 EXTENT_WRITEBACK);
2545 if (ret) { 2551 if (ret) {
@@ -2753,7 +2759,7 @@ static u64 stripe_align(struct btrfs_root *root, u64 val)
2753 * ins->offset == number of blocks 2759 * ins->offset == number of blocks
2754 * Any available blocks before search_start are skipped. 2760 * Any available blocks before search_start are skipped.
2755 */ 2761 */
2756static int noinline find_free_extent(struct btrfs_trans_handle *trans, 2762static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2757 struct btrfs_root *orig_root, 2763 struct btrfs_root *orig_root,
2758 u64 num_bytes, u64 empty_size, 2764 u64 num_bytes, u64 empty_size,
2759 u64 search_start, u64 search_end, 2765 u64 search_start, u64 search_end,
@@ -2762,7 +2768,7 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
2762 int data) 2768 int data)
2763{ 2769{
2764 int ret = 0; 2770 int ret = 0;
2765 struct btrfs_root * root = orig_root->fs_info->extent_root; 2771 struct btrfs_root *root = orig_root->fs_info->extent_root;
2766 u64 total_needed = num_bytes; 2772 u64 total_needed = num_bytes;
2767 u64 *last_ptr = NULL; 2773 u64 *last_ptr = NULL;
2768 u64 last_wanted = 0; 2774 u64 last_wanted = 0;
@@ -2995,8 +3001,10 @@ loop_check:
2995 *last_ptr = ins->objectid + ins->offset; 3001 *last_ptr = ins->objectid + ins->offset;
2996 ret = 0; 3002 ret = 0;
2997 } else if (!ret) { 3003 } else if (!ret) {
2998 printk(KERN_ERR "we were searching for %Lu bytes, num_bytes %Lu," 3004 printk(KERN_ERR "btrfs searching for %llu bytes, "
2999 " loop %d, allowed_alloc %d\n", total_needed, num_bytes, 3005 "num_bytes %llu, loop %d, allowed_alloc %d\n",
3006 (unsigned long long)total_needed,
3007 (unsigned long long)num_bytes,
3000 loop, allowed_chunk_alloc); 3008 loop, allowed_chunk_alloc);
3001 ret = -ENOSPC; 3009 ret = -ENOSPC;
3002 } 3010 }
@@ -3012,19 +3020,22 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3012 struct btrfs_block_group_cache *cache; 3020 struct btrfs_block_group_cache *cache;
3013 struct list_head *l; 3021 struct list_head *l;
3014 3022
3015 printk(KERN_INFO "space_info has %Lu free, is %sfull\n", 3023 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3016 info->total_bytes - info->bytes_used - info->bytes_pinned - 3024 (unsigned long long)(info->total_bytes - info->bytes_used -
3017 info->bytes_reserved, (info->full) ? "" : "not "); 3025 info->bytes_pinned - info->bytes_reserved),
3026 (info->full) ? "" : "not ");
3018 3027
3019 down_read(&info->groups_sem); 3028 down_read(&info->groups_sem);
3020 list_for_each(l, &info->block_groups) { 3029 list_for_each(l, &info->block_groups) {
3021 cache = list_entry(l, struct btrfs_block_group_cache, list); 3030 cache = list_entry(l, struct btrfs_block_group_cache, list);
3022 spin_lock(&cache->lock); 3031 spin_lock(&cache->lock);
3023 printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used " 3032 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3024 "%Lu pinned %Lu reserved\n", 3033 "%llu pinned %llu reserved\n",
3025 cache->key.objectid, cache->key.offset, 3034 (unsigned long long)cache->key.objectid,
3026 btrfs_block_group_used(&cache->item), 3035 (unsigned long long)cache->key.offset,
3027 cache->pinned, cache->reserved); 3036 (unsigned long long)btrfs_block_group_used(&cache->item),
3037 (unsigned long long)cache->pinned,
3038 (unsigned long long)cache->reserved);
3028 btrfs_dump_free_space(cache, bytes); 3039 btrfs_dump_free_space(cache, bytes);
3029 spin_unlock(&cache->lock); 3040 spin_unlock(&cache->lock);
3030 } 3041 }
@@ -3045,15 +3056,15 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3045 3056
3046 if (data) { 3057 if (data) {
3047 alloc_profile = info->avail_data_alloc_bits & 3058 alloc_profile = info->avail_data_alloc_bits &
3048 info->data_alloc_profile; 3059 info->data_alloc_profile;
3049 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile; 3060 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
3050 } else if (root == root->fs_info->chunk_root) { 3061 } else if (root == root->fs_info->chunk_root) {
3051 alloc_profile = info->avail_system_alloc_bits & 3062 alloc_profile = info->avail_system_alloc_bits &
3052 info->system_alloc_profile; 3063 info->system_alloc_profile;
3053 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile; 3064 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
3054 } else { 3065 } else {
3055 alloc_profile = info->avail_metadata_alloc_bits & 3066 alloc_profile = info->avail_metadata_alloc_bits &
3056 info->metadata_alloc_profile; 3067 info->metadata_alloc_profile;
3057 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile; 3068 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
3058 } 3069 }
3059again: 3070again:
@@ -3092,8 +3103,9 @@ again:
3092 struct btrfs_space_info *sinfo; 3103 struct btrfs_space_info *sinfo;
3093 3104
3094 sinfo = __find_space_info(root->fs_info, data); 3105 sinfo = __find_space_info(root->fs_info, data);
3095 printk("allocation failed flags %Lu, wanted %Lu\n", 3106 printk(KERN_ERR "btrfs allocation failed flags %llu, "
3096 data, num_bytes); 3107 "wanted %llu\n", (unsigned long long)data,
3108 (unsigned long long)num_bytes);
3097 dump_space_info(sinfo, num_bytes); 3109 dump_space_info(sinfo, num_bytes);
3098 BUG(); 3110 BUG();
3099 } 3111 }
@@ -3108,7 +3120,8 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
3108 3120
3109 cache = btrfs_lookup_block_group(root->fs_info, start); 3121 cache = btrfs_lookup_block_group(root->fs_info, start);
3110 if (!cache) { 3122 if (!cache) {
3111 printk(KERN_ERR "Unable to find block group for %Lu\n", start); 3123 printk(KERN_ERR "Unable to find block group for %llu\n",
3124 (unsigned long long)start);
3112 return -ENOSPC; 3125 return -ENOSPC;
3113 } 3126 }
3114 3127
@@ -3235,10 +3248,12 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3235 } 3248 }
3236 3249
3237update_block: 3250update_block:
3238 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0); 3251 ret = update_block_group(trans, root, ins->objectid,
3252 ins->offset, 1, 0);
3239 if (ret) { 3253 if (ret) {
3240 printk("update block group failed for %Lu %Lu\n", 3254 printk(KERN_ERR "btrfs update block group failed for %llu "
3241 ins->objectid, ins->offset); 3255 "%llu\n", (unsigned long long)ins->objectid,
3256 (unsigned long long)ins->offset);
3242 BUG(); 3257 BUG();
3243 } 3258 }
3244out: 3259out:
@@ -3420,7 +3435,7 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3420 return 0; 3435 return 0;
3421} 3436}
3422 3437
3423static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans, 3438static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3424 struct btrfs_root *root, 3439 struct btrfs_root *root,
3425 struct btrfs_leaf_ref *ref) 3440 struct btrfs_leaf_ref *ref)
3426{ 3441{
@@ -3445,15 +3460,15 @@ static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3445 return 0; 3460 return 0;
3446} 3461}
3447 3462
3448static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, 3463static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
3449 u32 *refs) 3464 u64 len, u32 *refs)
3450{ 3465{
3451 int ret; 3466 int ret;
3452 3467
3453 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs); 3468 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
3454 BUG_ON(ret); 3469 BUG_ON(ret);
3455 3470
3456#if 0 // some debugging code in case we see problems here 3471#if 0 /* some debugging code in case we see problems here */
3457 /* if the refs count is one, it won't get increased again. But 3472 /* if the refs count is one, it won't get increased again. But
3458 * if the ref count is > 1, someone may be decreasing it at 3473 * if the ref count is > 1, someone may be decreasing it at
3459 * the same time we are. 3474 * the same time we are.
@@ -3474,8 +3489,8 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len
3474 free_extent_buffer(eb); 3489 free_extent_buffer(eb);
3475 } 3490 }
3476 if (*refs == 1) { 3491 if (*refs == 1) {
3477 printk("block %llu went down to one during drop_snap\n", 3492 printk(KERN_ERR "btrfs block %llu went down to one "
3478 (unsigned long long)start); 3493 "during drop_snap\n", (unsigned long long)start);
3479 } 3494 }
3480 3495
3481 } 3496 }
@@ -3489,7 +3504,7 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len
3489 * helper function for drop_snapshot, this walks down the tree dropping ref 3504 * helper function for drop_snapshot, this walks down the tree dropping ref
3490 * counts as it goes. 3505 * counts as it goes.
3491 */ 3506 */
3492static int noinline walk_down_tree(struct btrfs_trans_handle *trans, 3507static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3493 struct btrfs_root *root, 3508 struct btrfs_root *root,
3494 struct btrfs_path *path, int *level) 3509 struct btrfs_path *path, int *level)
3495{ 3510{
@@ -3516,7 +3531,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
3516 /* 3531 /*
3517 * walk down to the last node level and free all the leaves 3532 * walk down to the last node level and free all the leaves
3518 */ 3533 */
3519 while(*level >= 0) { 3534 while (*level >= 0) {
3520 WARN_ON(*level < 0); 3535 WARN_ON(*level < 0);
3521 WARN_ON(*level >= BTRFS_MAX_LEVEL); 3536 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3522 cur = path->nodes[*level]; 3537 cur = path->nodes[*level];
@@ -3576,10 +3591,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
3576 *level = 0; 3591 *level = 0;
3577 break; 3592 break;
3578 } 3593 }
3579 if (printk_ratelimit()) {
3580 printk("leaf ref miss for bytenr %llu\n",
3581 (unsigned long long)bytenr);
3582 }
3583 } 3594 }
3584 next = btrfs_find_tree_block(root, bytenr, blocksize); 3595 next = btrfs_find_tree_block(root, bytenr, blocksize);
3585 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { 3596 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
@@ -3641,7 +3652,7 @@ out:
3641 * walk_down_tree. The main difference is that it checks reference 3652 * walk_down_tree. The main difference is that it checks reference
3642 * counts while tree blocks are locked. 3653 * counts while tree blocks are locked.
3643 */ 3654 */
3644static int noinline walk_down_subtree(struct btrfs_trans_handle *trans, 3655static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3645 struct btrfs_root *root, 3656 struct btrfs_root *root,
3646 struct btrfs_path *path, int *level) 3657 struct btrfs_path *path, int *level)
3647{ 3658{
@@ -3730,7 +3741,7 @@ out:
3730 * to find the first node higher up where we haven't yet gone through 3741 * to find the first node higher up where we haven't yet gone through
3731 * all the slots 3742 * all the slots
3732 */ 3743 */
3733static int noinline walk_up_tree(struct btrfs_trans_handle *trans, 3744static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3734 struct btrfs_root *root, 3745 struct btrfs_root *root,
3735 struct btrfs_path *path, 3746 struct btrfs_path *path,
3736 int *level, int max_level) 3747 int *level, int max_level)
@@ -3839,7 +3850,7 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3839 } 3850 }
3840 } 3851 }
3841 } 3852 }
3842 while(1) { 3853 while (1) {
3843 wret = walk_down_tree(trans, root, path, &level); 3854 wret = walk_down_tree(trans, root, path, &level);
3844 if (wret > 0) 3855 if (wret > 0)
3845 break; 3856 break;
@@ -3920,7 +3931,7 @@ static unsigned long calc_ra(unsigned long start, unsigned long last,
3920 return min(last, start + nr - 1); 3931 return min(last, start + nr - 1);
3921} 3932}
3922 3933
3923static int noinline relocate_inode_pages(struct inode *inode, u64 start, 3934static noinline int relocate_inode_pages(struct inode *inode, u64 start,
3924 u64 len) 3935 u64 len)
3925{ 3936{
3926 u64 page_start; 3937 u64 page_start;
@@ -4011,7 +4022,7 @@ out_unlock:
4011 return ret; 4022 return ret;
4012} 4023}
4013 4024
4014static int noinline relocate_data_extent(struct inode *reloc_inode, 4025static noinline int relocate_data_extent(struct inode *reloc_inode,
4015 struct btrfs_key *extent_key, 4026 struct btrfs_key *extent_key,
4016 u64 offset) 4027 u64 offset)
4017{ 4028{
@@ -4087,7 +4098,7 @@ static int is_cowonly_root(u64 root_objectid)
4087 return 0; 4098 return 0;
4088} 4099}
4089 4100
4090static int noinline __next_ref_path(struct btrfs_trans_handle *trans, 4101static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
4091 struct btrfs_root *extent_root, 4102 struct btrfs_root *extent_root,
4092 struct btrfs_ref_path *ref_path, 4103 struct btrfs_ref_path *ref_path,
4093 int first_time) 4104 int first_time)
@@ -4119,11 +4130,10 @@ walk_down:
4119 if (level < ref_path->lowest_level) 4130 if (level < ref_path->lowest_level)
4120 break; 4131 break;
4121 4132
4122 if (level >= 0) { 4133 if (level >= 0)
4123 bytenr = ref_path->nodes[level]; 4134 bytenr = ref_path->nodes[level];
4124 } else { 4135 else
4125 bytenr = ref_path->extent_start; 4136 bytenr = ref_path->extent_start;
4126 }
4127 BUG_ON(bytenr == 0); 4137 BUG_ON(bytenr == 0);
4128 4138
4129 parent = ref_path->nodes[level + 1]; 4139 parent = ref_path->nodes[level + 1];
@@ -4170,11 +4180,12 @@ walk_up:
4170 level = ref_path->current_level; 4180 level = ref_path->current_level;
4171 while (level < BTRFS_MAX_LEVEL - 1) { 4181 while (level < BTRFS_MAX_LEVEL - 1) {
4172 u64 ref_objectid; 4182 u64 ref_objectid;
4173 if (level >= 0) { 4183
4184 if (level >= 0)
4174 bytenr = ref_path->nodes[level]; 4185 bytenr = ref_path->nodes[level];
4175 } else { 4186 else
4176 bytenr = ref_path->extent_start; 4187 bytenr = ref_path->extent_start;
4177 } 4188
4178 BUG_ON(bytenr == 0); 4189 BUG_ON(bytenr == 0);
4179 4190
4180 key.objectid = bytenr; 4191 key.objectid = bytenr;
@@ -4299,7 +4310,7 @@ static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
4299 return __next_ref_path(trans, extent_root, ref_path, 0); 4310 return __next_ref_path(trans, extent_root, ref_path, 0);
4300} 4311}
4301 4312
4302static int noinline get_new_locations(struct inode *reloc_inode, 4313static noinline int get_new_locations(struct inode *reloc_inode,
4303 struct btrfs_key *extent_key, 4314 struct btrfs_key *extent_key,
4304 u64 offset, int no_fragment, 4315 u64 offset, int no_fragment,
4305 struct disk_extent **extents, 4316 struct disk_extent **extents,
@@ -4420,7 +4431,7 @@ out:
4420 return ret; 4431 return ret;
4421} 4432}
4422 4433
4423static int noinline replace_one_extent(struct btrfs_trans_handle *trans, 4434static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4424 struct btrfs_root *root, 4435 struct btrfs_root *root,
4425 struct btrfs_path *path, 4436 struct btrfs_path *path,
4426 struct btrfs_key *extent_key, 4437 struct btrfs_key *extent_key,
@@ -4778,7 +4789,7 @@ int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4778 return 0; 4789 return 0;
4779} 4790}
4780 4791
4781static int noinline invalidate_extent_cache(struct btrfs_root *root, 4792static noinline int invalidate_extent_cache(struct btrfs_root *root,
4782 struct extent_buffer *leaf, 4793 struct extent_buffer *leaf,
4783 struct btrfs_block_group_cache *group, 4794 struct btrfs_block_group_cache *group,
4784 struct btrfs_root *target_root) 4795 struct btrfs_root *target_root)
@@ -4826,7 +4837,7 @@ static int noinline invalidate_extent_cache(struct btrfs_root *root,
4826 return 0; 4837 return 0;
4827} 4838}
4828 4839
4829static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans, 4840static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
4830 struct btrfs_root *root, 4841 struct btrfs_root *root,
4831 struct extent_buffer *leaf, 4842 struct extent_buffer *leaf,
4832 struct btrfs_block_group_cache *group, 4843 struct btrfs_block_group_cache *group,
@@ -5035,7 +5046,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
5035 return 0; 5046 return 0;
5036} 5047}
5037 5048
5038static int noinline init_reloc_tree(struct btrfs_trans_handle *trans, 5049static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
5039 struct btrfs_root *root) 5050 struct btrfs_root *root)
5040{ 5051{
5041 struct btrfs_root *reloc_root; 5052 struct btrfs_root *reloc_root;
@@ -5102,7 +5113,7 @@ static int noinline init_reloc_tree(struct btrfs_trans_handle *trans,
5102 * tree blocks are shared between reloc trees, so they are also shared 5113 * tree blocks are shared between reloc trees, so they are also shared
5103 * between subvols. 5114 * between subvols.
5104 */ 5115 */
5105static int noinline relocate_one_path(struct btrfs_trans_handle *trans, 5116static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
5106 struct btrfs_root *root, 5117 struct btrfs_root *root,
5107 struct btrfs_path *path, 5118 struct btrfs_path *path,
5108 struct btrfs_key *first_key, 5119 struct btrfs_key *first_key,
@@ -5199,7 +5210,7 @@ static int noinline relocate_one_path(struct btrfs_trans_handle *trans,
5199 return 0; 5210 return 0;
5200} 5211}
5201 5212
5202static int noinline relocate_tree_block(struct btrfs_trans_handle *trans, 5213static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
5203 struct btrfs_root *root, 5214 struct btrfs_root *root,
5204 struct btrfs_path *path, 5215 struct btrfs_path *path,
5205 struct btrfs_key *first_key, 5216 struct btrfs_key *first_key,
@@ -5217,7 +5228,7 @@ static int noinline relocate_tree_block(struct btrfs_trans_handle *trans,
5217 return 0; 5228 return 0;
5218} 5229}
5219 5230
5220static int noinline del_extent_zero(struct btrfs_trans_handle *trans, 5231static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
5221 struct btrfs_root *extent_root, 5232 struct btrfs_root *extent_root,
5222 struct btrfs_path *path, 5233 struct btrfs_path *path,
5223 struct btrfs_key *extent_key) 5234 struct btrfs_key *extent_key)
@@ -5233,7 +5244,7 @@ out:
5233 return ret; 5244 return ret;
5234} 5245}
5235 5246
5236static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info, 5247static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
5237 struct btrfs_ref_path *ref_path) 5248 struct btrfs_ref_path *ref_path)
5238{ 5249{
5239 struct btrfs_key root_key; 5250 struct btrfs_key root_key;
@@ -5248,7 +5259,7 @@ static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info,
5248 return btrfs_read_fs_root_no_name(fs_info, &root_key); 5259 return btrfs_read_fs_root_no_name(fs_info, &root_key);
5249} 5260}
5250 5261
5251static int noinline relocate_one_extent(struct btrfs_root *extent_root, 5262static noinline int relocate_one_extent(struct btrfs_root *extent_root,
5252 struct btrfs_path *path, 5263 struct btrfs_path *path,
5253 struct btrfs_key *extent_key, 5264 struct btrfs_key *extent_key,
5254 struct btrfs_block_group_cache *group, 5265 struct btrfs_block_group_cache *group,
@@ -5276,8 +5287,8 @@ static int noinline relocate_one_extent(struct btrfs_root *extent_root,
5276 5287
5277 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); 5288 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
5278 if (!ref_path) { 5289 if (!ref_path) {
5279 ret = -ENOMEM; 5290 ret = -ENOMEM;
5280 goto out; 5291 goto out;
5281 } 5292 }
5282 5293
5283 for (loops = 0; ; loops++) { 5294 for (loops = 0; ; loops++) {
@@ -5497,7 +5508,7 @@ out:
5497 return ret; 5508 return ret;
5498} 5509}
5499 5510
5500static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info, 5511static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
5501 struct btrfs_block_group_cache *group) 5512 struct btrfs_block_group_cache *group)
5502{ 5513{
5503 struct inode *inode = NULL; 5514 struct inode *inode = NULL;
@@ -5617,7 +5628,7 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
5617 block_group = btrfs_lookup_block_group(info, group_start); 5628 block_group = btrfs_lookup_block_group(info, group_start);
5618 BUG_ON(!block_group); 5629 BUG_ON(!block_group);
5619 5630
5620 printk("btrfs relocating block group %llu flags %llu\n", 5631 printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
5621 (unsigned long long)block_group->key.objectid, 5632 (unsigned long long)block_group->key.objectid,
5622 (unsigned long long)block_group->flags); 5633 (unsigned long long)block_group->flags);
5623 5634
@@ -5649,7 +5660,7 @@ again:
5649 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1); 5660 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
5650 mutex_unlock(&root->fs_info->cleaner_mutex); 5661 mutex_unlock(&root->fs_info->cleaner_mutex);
5651 5662
5652 while(1) { 5663 while (1) {
5653 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5664 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5654 if (ret < 0) 5665 if (ret < 0)
5655 goto out; 5666 goto out;
@@ -5712,7 +5723,7 @@ next:
5712 } 5723 }
5713 5724
5714 if (total_found > 0) { 5725 if (total_found > 0) {
5715 printk("btrfs found %llu extents in pass %d\n", 5726 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
5716 (unsigned long long)total_found, pass); 5727 (unsigned long long)total_found, pass);
5717 pass++; 5728 pass++;
5718 if (total_found == skipped && pass > 2) { 5729 if (total_found == skipped && pass > 2) {
@@ -5754,7 +5765,7 @@ static int find_first_block_group(struct btrfs_root *root,
5754 if (ret < 0) 5765 if (ret < 0)
5755 goto out; 5766 goto out;
5756 5767
5757 while(1) { 5768 while (1) {
5758 slot = path->slots[0]; 5769 slot = path->slots[0];
5759 leaf = path->nodes[0]; 5770 leaf = path->nodes[0];
5760 if (slot >= btrfs_header_nritems(leaf)) { 5771 if (slot >= btrfs_header_nritems(leaf)) {
@@ -5825,7 +5836,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
5825 if (!path) 5836 if (!path)
5826 return -ENOMEM; 5837 return -ENOMEM;
5827 5838
5828 while(1) { 5839 while (1) {
5829 ret = find_first_block_group(root, path, &key); 5840 ret = find_first_block_group(root, path, &key);
5830 if (ret > 0) { 5841 if (ret > 0) {
5831 ret = 0; 5842 ret = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 0bf7684207aa..39edb551dca6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -32,7 +32,7 @@ static LIST_HEAD(states);
32 32
33#define LEAK_DEBUG 0 33#define LEAK_DEBUG 0
34#ifdef LEAK_DEBUG 34#ifdef LEAK_DEBUG
35static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; 35static DEFINE_SPINLOCK(leak_lock);
36#endif 36#endif
37 37
38#define BUFFER_LRU_MAX 64 38#define BUFFER_LRU_MAX 64
@@ -81,7 +81,11 @@ void extent_io_exit(void)
81 81
82 while (!list_empty(&states)) { 82 while (!list_empty(&states)) {
83 state = list_entry(states.next, struct extent_state, leak_list); 83 state = list_entry(states.next, struct extent_state, leak_list);
84 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs)); 84 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
85 "state %lu in tree %p refs %d\n",
86 (unsigned long long)state->start,
87 (unsigned long long)state->end,
88 state->state, state->tree, atomic_read(&state->refs));
85 list_del(&state->leak_list); 89 list_del(&state->leak_list);
86 kmem_cache_free(extent_state_cache, state); 90 kmem_cache_free(extent_state_cache, state);
87 91
@@ -89,7 +93,9 @@ void extent_io_exit(void)
89 93
90 while (!list_empty(&buffers)) { 94 while (!list_empty(&buffers)) {
91 eb = list_entry(buffers.next, struct extent_buffer, leak_list); 95 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
92 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs)); 96 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
97 "refs %d\n", (unsigned long long)eb->start,
98 eb->len, atomic_read(&eb->refs));
93 list_del(&eb->leak_list); 99 list_del(&eb->leak_list);
94 kmem_cache_free(extent_buffer_cache, eb); 100 kmem_cache_free(extent_buffer_cache, eb);
95 } 101 }
@@ -158,11 +164,11 @@ EXPORT_SYMBOL(free_extent_state);
158static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 164static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159 struct rb_node *node) 165 struct rb_node *node)
160{ 166{
161 struct rb_node ** p = &root->rb_node; 167 struct rb_node **p = &root->rb_node;
162 struct rb_node * parent = NULL; 168 struct rb_node *parent = NULL;
163 struct tree_entry *entry; 169 struct tree_entry *entry;
164 170
165 while(*p) { 171 while (*p) {
166 parent = *p; 172 parent = *p;
167 entry = rb_entry(parent, struct tree_entry, rb_node); 173 entry = rb_entry(parent, struct tree_entry, rb_node);
168 174
@@ -185,13 +191,13 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
185 struct rb_node **next_ret) 191 struct rb_node **next_ret)
186{ 192{
187 struct rb_root *root = &tree->state; 193 struct rb_root *root = &tree->state;
188 struct rb_node * n = root->rb_node; 194 struct rb_node *n = root->rb_node;
189 struct rb_node *prev = NULL; 195 struct rb_node *prev = NULL;
190 struct rb_node *orig_prev = NULL; 196 struct rb_node *orig_prev = NULL;
191 struct tree_entry *entry; 197 struct tree_entry *entry;
192 struct tree_entry *prev_entry = NULL; 198 struct tree_entry *prev_entry = NULL;
193 199
194 while(n) { 200 while (n) {
195 entry = rb_entry(n, struct tree_entry, rb_node); 201 entry = rb_entry(n, struct tree_entry, rb_node);
196 prev = n; 202 prev = n;
197 prev_entry = entry; 203 prev_entry = entry;
@@ -200,14 +206,13 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
200 n = n->rb_left; 206 n = n->rb_left;
201 else if (offset > entry->end) 207 else if (offset > entry->end)
202 n = n->rb_right; 208 n = n->rb_right;
203 else { 209 else
204 return n; 210 return n;
205 }
206 } 211 }
207 212
208 if (prev_ret) { 213 if (prev_ret) {
209 orig_prev = prev; 214 orig_prev = prev;
210 while(prev && offset > prev_entry->end) { 215 while (prev && offset > prev_entry->end) {
211 prev = rb_next(prev); 216 prev = rb_next(prev);
212 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 217 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
213 } 218 }
@@ -217,7 +222,7 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
217 222
218 if (next_ret) { 223 if (next_ret) {
219 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 224 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220 while(prev && offset < prev_entry->start) { 225 while (prev && offset < prev_entry->start) {
221 prev = rb_prev(prev); 226 prev = rb_prev(prev);
222 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 227 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
223 } 228 }
@@ -233,9 +238,8 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree,
233 struct rb_node *ret; 238 struct rb_node *ret;
234 239
235 ret = __etree_search(tree, offset, &prev, NULL); 240 ret = __etree_search(tree, offset, &prev, NULL);
236 if (!ret) { 241 if (!ret)
237 return prev; 242 return prev;
238 }
239 return ret; 243 return ret;
240} 244}
241 245
@@ -243,11 +247,11 @@ static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
243 u64 offset, struct rb_node *node) 247 u64 offset, struct rb_node *node)
244{ 248{
245 struct rb_root *root = &tree->buffer; 249 struct rb_root *root = &tree->buffer;
246 struct rb_node ** p = &root->rb_node; 250 struct rb_node **p = &root->rb_node;
247 struct rb_node * parent = NULL; 251 struct rb_node *parent = NULL;
248 struct extent_buffer *eb; 252 struct extent_buffer *eb;
249 253
250 while(*p) { 254 while (*p) {
251 parent = *p; 255 parent = *p;
252 eb = rb_entry(parent, struct extent_buffer, rb_node); 256 eb = rb_entry(parent, struct extent_buffer, rb_node);
253 257
@@ -268,10 +272,10 @@ static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
268 u64 offset) 272 u64 offset)
269{ 273{
270 struct rb_root *root = &tree->buffer; 274 struct rb_root *root = &tree->buffer;
271 struct rb_node * n = root->rb_node; 275 struct rb_node *n = root->rb_node;
272 struct extent_buffer *eb; 276 struct extent_buffer *eb;
273 277
274 while(n) { 278 while (n) {
275 eb = rb_entry(n, struct extent_buffer, rb_node); 279 eb = rb_entry(n, struct extent_buffer, rb_node);
276 if (offset < eb->start) 280 if (offset < eb->start)
277 n = n->rb_left; 281 n = n->rb_left;
@@ -363,7 +367,9 @@ static int insert_state(struct extent_io_tree *tree,
363 struct rb_node *node; 367 struct rb_node *node;
364 368
365 if (end < start) { 369 if (end < start) {
366 printk("end < start %Lu %Lu\n", end, start); 370 printk(KERN_ERR "btrfs end < start %llu %llu\n",
371 (unsigned long long)end,
372 (unsigned long long)start);
367 WARN_ON(1); 373 WARN_ON(1);
368 } 374 }
369 if (bits & EXTENT_DIRTY) 375 if (bits & EXTENT_DIRTY)
@@ -376,7 +382,10 @@ static int insert_state(struct extent_io_tree *tree,
376 if (node) { 382 if (node) {
377 struct extent_state *found; 383 struct extent_state *found;
378 found = rb_entry(node, struct extent_state, rb_node); 384 found = rb_entry(node, struct extent_state, rb_node);
379 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); 385 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
386 "%llu %llu\n", (unsigned long long)found->start,
387 (unsigned long long)found->end,
388 (unsigned long long)start, (unsigned long long)end);
380 free_extent_state(state); 389 free_extent_state(state);
381 return -EEXIST; 390 return -EEXIST;
382 } 391 }
@@ -412,7 +421,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
412 if (node) { 421 if (node) {
413 struct extent_state *found; 422 struct extent_state *found;
414 found = rb_entry(node, struct extent_state, rb_node); 423 found = rb_entry(node, struct extent_state, rb_node);
415 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
416 free_extent_state(prealloc); 424 free_extent_state(prealloc);
417 return -EEXIST; 425 return -EEXIST;
418 } 426 }
@@ -661,8 +669,9 @@ static void set_state_bits(struct extent_io_tree *tree,
661 * [start, end] is inclusive 669 * [start, end] is inclusive
662 * This takes the tree lock. 670 * This takes the tree lock.
663 */ 671 */
664static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, 672static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
665 int exclusive, u64 *failed_start, gfp_t mask) 673 int bits, int exclusive, u64 *failed_start,
674 gfp_t mask)
666{ 675{
667 struct extent_state *state; 676 struct extent_state *state;
668 struct extent_state *prealloc = NULL; 677 struct extent_state *prealloc = NULL;
@@ -763,7 +772,7 @@ again:
763 if (end < last_start) 772 if (end < last_start)
764 this_end = end; 773 this_end = end;
765 else 774 else
766 this_end = last_start -1; 775 this_end = last_start - 1;
767 err = insert_state(tree, prealloc, start, this_end, 776 err = insert_state(tree, prealloc, start, this_end,
768 bits); 777 bits);
769 prealloc = NULL; 778 prealloc = NULL;
@@ -891,8 +900,8 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
891} 900}
892EXPORT_SYMBOL(set_extent_uptodate); 901EXPORT_SYMBOL(set_extent_uptodate);
893 902
894static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 903static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
895 gfp_t mask) 904 u64 end, gfp_t mask)
896{ 905{
897 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); 906 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
898} 907}
@@ -904,8 +913,8 @@ static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
904 0, NULL, mask); 913 0, NULL, mask);
905} 914}
906 915
907static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, 916static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
908 gfp_t mask) 917 u64 end, gfp_t mask)
909{ 918{
910 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); 919 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
911} 920}
@@ -1025,11 +1034,10 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1025 * our range starts. 1034 * our range starts.
1026 */ 1035 */
1027 node = tree_search(tree, start); 1036 node = tree_search(tree, start);
1028 if (!node) { 1037 if (!node)
1029 goto out; 1038 goto out;
1030 }
1031 1039
1032 while(1) { 1040 while (1) {
1033 state = rb_entry(node, struct extent_state, rb_node); 1041 state = rb_entry(node, struct extent_state, rb_node);
1034 if (state->end >= start && (state->state & bits)) { 1042 if (state->end >= start && (state->state & bits)) {
1035 *start_ret = state->start; 1043 *start_ret = state->start;
@@ -1062,15 +1070,14 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1062 * our range starts. 1070 * our range starts.
1063 */ 1071 */
1064 node = tree_search(tree, start); 1072 node = tree_search(tree, start);
1065 if (!node) { 1073 if (!node)
1066 goto out; 1074 goto out;
1067 }
1068 1075
1069 while(1) { 1076 while (1) {
1070 state = rb_entry(node, struct extent_state, rb_node); 1077 state = rb_entry(node, struct extent_state, rb_node);
1071 if (state->end >= start && (state->state & bits)) { 1078 if (state->end >= start && (state->state & bits))
1072 return state; 1079 return state;
1073 } 1080
1074 node = rb_next(node); 1081 node = rb_next(node);
1075 if (!node) 1082 if (!node)
1076 break; 1083 break;
@@ -1108,7 +1115,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1108 goto out; 1115 goto out;
1109 } 1116 }
1110 1117
1111 while(1) { 1118 while (1) {
1112 state = rb_entry(node, struct extent_state, rb_node); 1119 state = rb_entry(node, struct extent_state, rb_node);
1113 if (found && (state->start != cur_start || 1120 if (found && (state->start != cur_start ||
1114 (state->state & EXTENT_BOUNDARY))) { 1121 (state->state & EXTENT_BOUNDARY))) {
@@ -1150,7 +1157,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
1150 if (index == locked_page->index && end_index == index) 1157 if (index == locked_page->index && end_index == index)
1151 return 0; 1158 return 0;
1152 1159
1153 while(nr_pages > 0) { 1160 while (nr_pages > 0) {
1154 ret = find_get_pages_contig(inode->i_mapping, index, 1161 ret = find_get_pages_contig(inode->i_mapping, index,
1155 min_t(unsigned long, nr_pages, 1162 min_t(unsigned long, nr_pages,
1156 ARRAY_SIZE(pages)), pages); 1163 ARRAY_SIZE(pages)), pages);
@@ -1186,7 +1193,7 @@ static noinline int lock_delalloc_pages(struct inode *inode,
1186 1193
1187 /* skip the page at the start index */ 1194 /* skip the page at the start index */
1188 nrpages = end_index - index + 1; 1195 nrpages = end_index - index + 1;
1189 while(nrpages > 0) { 1196 while (nrpages > 0) {
1190 ret = find_get_pages_contig(inode->i_mapping, index, 1197 ret = find_get_pages_contig(inode->i_mapping, index,
1191 min_t(unsigned long, 1198 min_t(unsigned long,
1192 nrpages, ARRAY_SIZE(pages)), pages); 1199 nrpages, ARRAY_SIZE(pages)), pages);
@@ -1263,17 +1270,16 @@ again:
1263 * pages in order, so we can't process delalloc bytes before 1270 * pages in order, so we can't process delalloc bytes before
1264 * locked_page 1271 * locked_page
1265 */ 1272 */
1266 if (delalloc_start < *start) { 1273 if (delalloc_start < *start)
1267 delalloc_start = *start; 1274 delalloc_start = *start;
1268 }
1269 1275
1270 /* 1276 /*
1271 * make sure to limit the number of pages we try to lock down 1277 * make sure to limit the number of pages we try to lock down
1272 * if we're looping. 1278 * if we're looping.
1273 */ 1279 */
1274 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) { 1280 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1275 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; 1281 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1276 } 1282
1277 /* step two, lock all the pages after the page that has start */ 1283 /* step two, lock all the pages after the page that has start */
1278 ret = lock_delalloc_pages(inode, locked_page, 1284 ret = lock_delalloc_pages(inode, locked_page,
1279 delalloc_start, delalloc_end); 1285 delalloc_start, delalloc_end);
@@ -1341,7 +1347,7 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1341 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback)) 1347 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
1342 return 0; 1348 return 0;
1343 1349
1344 while(nr_pages > 0) { 1350 while (nr_pages > 0) {
1345 ret = find_get_pages_contig(inode->i_mapping, index, 1351 ret = find_get_pages_contig(inode->i_mapping, index,
1346 min_t(unsigned long, 1352 min_t(unsigned long,
1347 nr_pages, ARRAY_SIZE(pages)), pages); 1353 nr_pages, ARRAY_SIZE(pages)), pages);
@@ -1384,7 +1390,6 @@ u64 count_range_bits(struct extent_io_tree *tree,
1384 int found = 0; 1390 int found = 0;
1385 1391
1386 if (search_end <= cur_start) { 1392 if (search_end <= cur_start) {
1387 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1388 WARN_ON(1); 1393 WARN_ON(1);
1389 return 0; 1394 return 0;
1390 } 1395 }
@@ -1399,11 +1404,10 @@ u64 count_range_bits(struct extent_io_tree *tree,
1399 * our range starts. 1404 * our range starts.
1400 */ 1405 */
1401 node = tree_search(tree, cur_start); 1406 node = tree_search(tree, cur_start);
1402 if (!node) { 1407 if (!node)
1403 goto out; 1408 goto out;
1404 }
1405 1409
1406 while(1) { 1410 while (1) {
1407 state = rb_entry(node, struct extent_state, rb_node); 1411 state = rb_entry(node, struct extent_state, rb_node);
1408 if (state->start > search_end) 1412 if (state->start > search_end)
1409 break; 1413 break;
@@ -1927,19 +1931,15 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
1927 nr = bio_get_nr_vecs(bdev); 1931 nr = bio_get_nr_vecs(bdev);
1928 1932
1929 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 1933 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1930 if (!bio) {
1931 printk("failed to allocate bio nr %d\n", nr);
1932 }
1933 1934
1934 bio_add_page(bio, page, page_size, offset); 1935 bio_add_page(bio, page, page_size, offset);
1935 bio->bi_end_io = end_io_func; 1936 bio->bi_end_io = end_io_func;
1936 bio->bi_private = tree; 1937 bio->bi_private = tree;
1937 1938
1938 if (bio_ret) { 1939 if (bio_ret)
1939 *bio_ret = bio; 1940 *bio_ret = bio;
1940 } else { 1941 else
1941 ret = submit_one_bio(rw, bio, mirror_num, bio_flags); 1942 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1942 }
1943 1943
1944 return ret; 1944 return ret;
1945} 1945}
@@ -2028,13 +2028,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2028 break; 2028 break;
2029 } 2029 }
2030 extent_offset = cur - em->start; 2030 extent_offset = cur - em->start;
2031 if (extent_map_end(em) <= cur) {
2032printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
2033 }
2034 BUG_ON(extent_map_end(em) <= cur); 2031 BUG_ON(extent_map_end(em) <= cur);
2035 if (end < cur) {
2036printk("2bad mapping end %Lu cur %Lu\n", end, cur);
2037 }
2038 BUG_ON(end < cur); 2032 BUG_ON(end < cur);
2039 2033
2040 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 2034 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
@@ -2199,7 +2193,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2199 delalloc_end = 0; 2193 delalloc_end = 0;
2200 page_started = 0; 2194 page_started = 0;
2201 if (!epd->extent_locked) { 2195 if (!epd->extent_locked) {
2202 while(delalloc_end < page_end) { 2196 while (delalloc_end < page_end) {
2203 nr_delalloc = find_lock_delalloc_range(inode, tree, 2197 nr_delalloc = find_lock_delalloc_range(inode, tree,
2204 page, 2198 page,
2205 &delalloc_start, 2199 &delalloc_start,
@@ -2242,9 +2236,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2242 nr_written++; 2236 nr_written++;
2243 2237
2244 end = page_end; 2238 end = page_end;
2245 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { 2239 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
2246 printk("found delalloc bits after lock_extent\n"); 2240 printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
2247 }
2248 2241
2249 if (last_byte <= start) { 2242 if (last_byte <= start) {
2250 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 2243 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
@@ -2297,7 +2290,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2297 clear_extent_dirty(tree, cur, 2290 clear_extent_dirty(tree, cur,
2298 cur + iosize - 1, GFP_NOFS); 2291 cur + iosize - 1, GFP_NOFS);
2299 2292
2300 unlock_extent(tree, unlock_start, cur + iosize -1, 2293 unlock_extent(tree, unlock_start, cur + iosize - 1,
2301 GFP_NOFS); 2294 GFP_NOFS);
2302 2295
2303 /* 2296 /*
@@ -2344,9 +2337,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2344 2337
2345 set_range_writeback(tree, cur, cur + iosize - 1); 2338 set_range_writeback(tree, cur, cur + iosize - 1);
2346 if (!PageWriteback(page)) { 2339 if (!PageWriteback(page)) {
2347 printk("warning page %lu not writeback, " 2340 printk(KERN_ERR "btrfs warning page %lu not "
2348 "cur %llu end %llu\n", page->index, 2341 "writeback, cur %llu end %llu\n",
2349 (unsigned long long)cur, 2342 page->index, (unsigned long long)cur,
2350 (unsigned long long)end); 2343 (unsigned long long)end);
2351 } 2344 }
2352 2345
@@ -2430,8 +2423,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2430retry: 2423retry:
2431 while (!done && (index <= end) && 2424 while (!done && (index <= end) &&
2432 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 2425 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2433 PAGECACHE_TAG_DIRTY, 2426 PAGECACHE_TAG_DIRTY, min(end - index,
2434 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { 2427 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2435 unsigned i; 2428 unsigned i;
2436 2429
2437 scanned = 1; 2430 scanned = 1;
@@ -2536,9 +2529,8 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2536 2529
2537 extent_write_cache_pages(tree, mapping, &wbc_writepages, 2530 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2538 __extent_writepage, &epd, flush_write_bio); 2531 __extent_writepage, &epd, flush_write_bio);
2539 if (epd.bio) { 2532 if (epd.bio)
2540 submit_one_bio(WRITE, epd.bio, 0, 0); 2533 submit_one_bio(WRITE, epd.bio, 0, 0);
2541 }
2542 return ret; 2534 return ret;
2543} 2535}
2544EXPORT_SYMBOL(extent_write_full_page); 2536EXPORT_SYMBOL(extent_write_full_page);
@@ -2568,7 +2560,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2568 .range_end = end + 1, 2560 .range_end = end + 1,
2569 }; 2561 };
2570 2562
2571 while(start <= end) { 2563 while (start <= end) {
2572 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); 2564 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2573 if (clear_page_dirty_for_io(page)) 2565 if (clear_page_dirty_for_io(page))
2574 ret = __extent_writepage(page, &wbc_writepages, &epd); 2566 ret = __extent_writepage(page, &wbc_writepages, &epd);
@@ -2606,9 +2598,8 @@ int extent_writepages(struct extent_io_tree *tree,
2606 ret = extent_write_cache_pages(tree, mapping, wbc, 2598 ret = extent_write_cache_pages(tree, mapping, wbc,
2607 __extent_writepage, &epd, 2599 __extent_writepage, &epd,
2608 flush_write_bio); 2600 flush_write_bio);
2609 if (epd.bio) { 2601 if (epd.bio)
2610 submit_one_bio(WRITE, epd.bio, 0, 0); 2602 submit_one_bio(WRITE, epd.bio, 0, 0);
2611 }
2612 return ret; 2603 return ret;
2613} 2604}
2614EXPORT_SYMBOL(extent_writepages); 2605EXPORT_SYMBOL(extent_writepages);
@@ -2666,7 +2657,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2666 u64 end = start + PAGE_CACHE_SIZE - 1; 2657 u64 end = start + PAGE_CACHE_SIZE - 1;
2667 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 2658 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2668 2659
2669 start += (offset + blocksize -1) & ~(blocksize - 1); 2660 start += (offset + blocksize - 1) & ~(blocksize - 1);
2670 if (start > end) 2661 if (start > end)
2671 return 0; 2662 return 0;
2672 2663
@@ -2727,12 +2718,12 @@ int extent_prepare_write(struct extent_io_tree *tree,
2727 orig_block_start = block_start; 2718 orig_block_start = block_start;
2728 2719
2729 lock_extent(tree, page_start, page_end, GFP_NOFS); 2720 lock_extent(tree, page_start, page_end, GFP_NOFS);
2730 while(block_start <= block_end) { 2721 while (block_start <= block_end) {
2731 em = get_extent(inode, page, page_offset, block_start, 2722 em = get_extent(inode, page, page_offset, block_start,
2732 block_end - block_start + 1, 1); 2723 block_end - block_start + 1, 1);
2733 if (IS_ERR(em) || !em) { 2724 if (IS_ERR(em) || !em)
2734 goto err; 2725 goto err;
2735 } 2726
2736 cur_end = min(block_end, extent_map_end(em) - 1); 2727 cur_end = min(block_end, extent_map_end(em) - 1);
2737 block_off_start = block_start & (PAGE_CACHE_SIZE - 1); 2728 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2738 block_off_end = block_off_start + blocksize; 2729 block_off_end = block_off_start + blocksize;
@@ -3170,7 +3161,7 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
3170 } 3161 }
3171 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 3162 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3172 set_extent_dirty(tree, page_offset(page), 3163 set_extent_dirty(tree, page_offset(page),
3173 page_offset(page) + PAGE_CACHE_SIZE -1, 3164 page_offset(page) + PAGE_CACHE_SIZE - 1,
3174 GFP_NOFS); 3165 GFP_NOFS);
3175 unlock_page(page); 3166 unlock_page(page);
3176 } 3167 }
@@ -3235,7 +3226,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
3235 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1); 3226 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3236 if (ret) 3227 if (ret)
3237 return 1; 3228 return 1;
3238 while(start <= end) { 3229 while (start <= end) {
3239 index = start >> PAGE_CACHE_SHIFT; 3230 index = start >> PAGE_CACHE_SHIFT;
3240 page = find_get_page(tree->mapping, index); 3231 page = find_get_page(tree->mapping, index);
3241 uptodate = PageUptodate(page); 3232 uptodate = PageUptodate(page);
@@ -3321,16 +3312,12 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3321 lock_page(page); 3312 lock_page(page);
3322 } 3313 }
3323 locked_pages++; 3314 locked_pages++;
3324 if (!PageUptodate(page)) { 3315 if (!PageUptodate(page))
3325 all_uptodate = 0; 3316 all_uptodate = 0;
3326 }
3327 } 3317 }
3328 if (all_uptodate) { 3318 if (all_uptodate) {
3329 if (start_i == 0) 3319 if (start_i == 0)
3330 eb->flags |= EXTENT_UPTODATE; 3320 eb->flags |= EXTENT_UPTODATE;
3331 if (ret) {
3332 printk("all up to date but ret is %d\n", ret);
3333 }
3334 goto unlock_exit; 3321 goto unlock_exit;
3335 } 3322 }
3336 3323
@@ -3345,10 +3332,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3345 err = __extent_read_full_page(tree, page, 3332 err = __extent_read_full_page(tree, page,
3346 get_extent, &bio, 3333 get_extent, &bio,
3347 mirror_num, &bio_flags); 3334 mirror_num, &bio_flags);
3348 if (err) { 3335 if (err)
3349 ret = err; 3336 ret = err;
3350 printk("err %d from __extent_read_full_page\n", ret);
3351 }
3352 } else { 3337 } else {
3353 unlock_page(page); 3338 unlock_page(page);
3354 } 3339 }
@@ -3357,26 +3342,23 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3357 if (bio) 3342 if (bio)
3358 submit_one_bio(READ, bio, mirror_num, bio_flags); 3343 submit_one_bio(READ, bio, mirror_num, bio_flags);
3359 3344
3360 if (ret || !wait) { 3345 if (ret || !wait)
3361 if (ret)
3362 printk("ret %d wait %d returning\n", ret, wait);
3363 return ret; 3346 return ret;
3364 } 3347
3365 for (i = start_i; i < num_pages; i++) { 3348 for (i = start_i; i < num_pages; i++) {
3366 page = extent_buffer_page(eb, i); 3349 page = extent_buffer_page(eb, i);
3367 wait_on_page_locked(page); 3350 wait_on_page_locked(page);
3368 if (!PageUptodate(page)) { 3351 if (!PageUptodate(page))
3369 printk("page not uptodate after wait_on_page_locked\n");
3370 ret = -EIO; 3352 ret = -EIO;
3371 }
3372 } 3353 }
3354
3373 if (!ret) 3355 if (!ret)
3374 eb->flags |= EXTENT_UPTODATE; 3356 eb->flags |= EXTENT_UPTODATE;
3375 return ret; 3357 return ret;
3376 3358
3377unlock_exit: 3359unlock_exit:
3378 i = start_i; 3360 i = start_i;
3379 while(locked_pages > 0) { 3361 while (locked_pages > 0) {
3380 page = extent_buffer_page(eb, i); 3362 page = extent_buffer_page(eb, i);
3381 i++; 3363 i++;
3382 unlock_page(page); 3364 unlock_page(page);
@@ -3403,7 +3385,7 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3403 3385
3404 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3386 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3405 3387
3406 while(len > 0) { 3388 while (len > 0) {
3407 page = extent_buffer_page(eb, i); 3389 page = extent_buffer_page(eb, i);
3408 3390
3409 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3391 cur = min(len, (PAGE_CACHE_SIZE - offset));
@@ -3442,8 +3424,11 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3442 offset = 0; 3424 offset = 0;
3443 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; 3425 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3444 } 3426 }
3427
3445 if (start + min_len > eb->len) { 3428 if (start + min_len > eb->len) {
3446printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len); 3429 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3430 "wanted %lu %lu\n", (unsigned long long)eb->start,
3431 eb->len, start, min_len);
3447 WARN_ON(1); 3432 WARN_ON(1);
3448 } 3433 }
3449 3434
@@ -3506,7 +3491,7 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3506 3491
3507 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3492 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3508 3493
3509 while(len > 0) { 3494 while (len > 0) {
3510 page = extent_buffer_page(eb, i); 3495 page = extent_buffer_page(eb, i);
3511 3496
3512 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3497 cur = min(len, (PAGE_CACHE_SIZE - offset));
@@ -3542,7 +3527,7 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3542 3527
3543 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3528 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3544 3529
3545 while(len > 0) { 3530 while (len > 0) {
3546 page = extent_buffer_page(eb, i); 3531 page = extent_buffer_page(eb, i);
3547 WARN_ON(!PageUptodate(page)); 3532 WARN_ON(!PageUptodate(page));
3548 3533
@@ -3574,7 +3559,7 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
3574 3559
3575 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); 3560 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3576 3561
3577 while(len > 0) { 3562 while (len > 0) {
3578 page = extent_buffer_page(eb, i); 3563 page = extent_buffer_page(eb, i);
3579 WARN_ON(!PageUptodate(page)); 3564 WARN_ON(!PageUptodate(page));
3580 3565
@@ -3607,7 +3592,7 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3607 offset = (start_offset + dst_offset) & 3592 offset = (start_offset + dst_offset) &
3608 ((unsigned long)PAGE_CACHE_SIZE - 1); 3593 ((unsigned long)PAGE_CACHE_SIZE - 1);
3609 3594
3610 while(len > 0) { 3595 while (len > 0) {
3611 page = extent_buffer_page(dst, i); 3596 page = extent_buffer_page(dst, i);
3612 WARN_ON(!PageUptodate(page)); 3597 WARN_ON(!PageUptodate(page));
3613 3598
@@ -3674,17 +3659,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3674 unsigned long src_i; 3659 unsigned long src_i;
3675 3660
3676 if (src_offset + len > dst->len) { 3661 if (src_offset + len > dst->len) {
3677 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 3662 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3678 src_offset, len, dst->len); 3663 "len %lu dst len %lu\n", src_offset, len, dst->len);
3679 BUG_ON(1); 3664 BUG_ON(1);
3680 } 3665 }
3681 if (dst_offset + len > dst->len) { 3666 if (dst_offset + len > dst->len) {
3682 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 3667 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3683 dst_offset, len, dst->len); 3668 "len %lu dst len %lu\n", dst_offset, len, dst->len);
3684 BUG_ON(1); 3669 BUG_ON(1);
3685 } 3670 }
3686 3671
3687 while(len > 0) { 3672 while (len > 0) {
3688 dst_off_in_page = (start_offset + dst_offset) & 3673 dst_off_in_page = (start_offset + dst_offset) &
3689 ((unsigned long)PAGE_CACHE_SIZE - 1); 3674 ((unsigned long)PAGE_CACHE_SIZE - 1);
3690 src_off_in_page = (start_offset + src_offset) & 3675 src_off_in_page = (start_offset + src_offset) &
@@ -3722,20 +3707,20 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3722 unsigned long src_i; 3707 unsigned long src_i;
3723 3708
3724 if (src_offset + len > dst->len) { 3709 if (src_offset + len > dst->len) {
3725 printk("memmove bogus src_offset %lu move len %lu len %lu\n", 3710 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3726 src_offset, len, dst->len); 3711 "len %lu len %lu\n", src_offset, len, dst->len);
3727 BUG_ON(1); 3712 BUG_ON(1);
3728 } 3713 }
3729 if (dst_offset + len > dst->len) { 3714 if (dst_offset + len > dst->len) {
3730 printk("memmove bogus dst_offset %lu move len %lu len %lu\n", 3715 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3731 dst_offset, len, dst->len); 3716 "len %lu len %lu\n", dst_offset, len, dst->len);
3732 BUG_ON(1); 3717 BUG_ON(1);
3733 } 3718 }
3734 if (dst_offset < src_offset) { 3719 if (dst_offset < src_offset) {
3735 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 3720 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3736 return; 3721 return;
3737 } 3722 }
3738 while(len > 0) { 3723 while (len > 0) {
3739 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; 3724 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3740 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; 3725 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3741 3726
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index fd3ebfb8c3c5..4a83e33ada32 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -89,11 +89,11 @@ EXPORT_SYMBOL(free_extent_map);
89static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 89static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
90 struct rb_node *node) 90 struct rb_node *node)
91{ 91{
92 struct rb_node ** p = &root->rb_node; 92 struct rb_node **p = &root->rb_node;
93 struct rb_node * parent = NULL; 93 struct rb_node *parent = NULL;
94 struct extent_map *entry; 94 struct extent_map *entry;
95 95
96 while(*p) { 96 while (*p) {
97 parent = *p; 97 parent = *p;
98 entry = rb_entry(parent, struct extent_map, rb_node); 98 entry = rb_entry(parent, struct extent_map, rb_node);
99 99
@@ -122,13 +122,13 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
122 struct rb_node **prev_ret, 122 struct rb_node **prev_ret,
123 struct rb_node **next_ret) 123 struct rb_node **next_ret)
124{ 124{
125 struct rb_node * n = root->rb_node; 125 struct rb_node *n = root->rb_node;
126 struct rb_node *prev = NULL; 126 struct rb_node *prev = NULL;
127 struct rb_node *orig_prev = NULL; 127 struct rb_node *orig_prev = NULL;
128 struct extent_map *entry; 128 struct extent_map *entry;
129 struct extent_map *prev_entry = NULL; 129 struct extent_map *prev_entry = NULL;
130 130
131 while(n) { 131 while (n) {
132 entry = rb_entry(n, struct extent_map, rb_node); 132 entry = rb_entry(n, struct extent_map, rb_node);
133 prev = n; 133 prev = n;
134 prev_entry = entry; 134 prev_entry = entry;
@@ -145,7 +145,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
145 145
146 if (prev_ret) { 146 if (prev_ret) {
147 orig_prev = prev; 147 orig_prev = prev;
148 while(prev && offset >= extent_map_end(prev_entry)) { 148 while (prev && offset >= extent_map_end(prev_entry)) {
149 prev = rb_next(prev); 149 prev = rb_next(prev);
150 prev_entry = rb_entry(prev, struct extent_map, rb_node); 150 prev_entry = rb_entry(prev, struct extent_map, rb_node);
151 } 151 }
@@ -155,7 +155,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
155 155
156 if (next_ret) { 156 if (next_ret) {
157 prev_entry = rb_entry(prev, struct extent_map, rb_node); 157 prev_entry = rb_entry(prev, struct extent_map, rb_node);
158 while(prev && offset < prev_entry->start) { 158 while (prev && offset < prev_entry->start) {
159 prev = rb_prev(prev); 159 prev = rb_prev(prev);
160 prev_entry = rb_entry(prev, struct extent_map, rb_node); 160 prev_entry = rb_entry(prev, struct extent_map, rb_node);
161 } 161 }
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index cc6e0b6de949..b11abfad81a5 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -24,7 +24,7 @@
24#include "transaction.h" 24#include "transaction.h"
25#include "print-tree.h" 25#include "print-tree.h"
26 26
27#define MAX_CSUM_ITEMS(r,size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ 27#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
28 sizeof(struct btrfs_item) * 2) / \ 28 sizeof(struct btrfs_item) * 2) / \
29 size) - 1)) 29 size) - 1))
30int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 30int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
@@ -166,7 +166,7 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
166 WARN_ON(bio->bi_vcnt <= 0); 166 WARN_ON(bio->bi_vcnt <= 0);
167 167
168 disk_bytenr = (u64)bio->bi_sector << 9; 168 disk_bytenr = (u64)bio->bi_sector << 9;
169 while(bio_index < bio->bi_vcnt) { 169 while (bio_index < bio->bi_vcnt) {
170 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 170 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
171 ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); 171 ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum);
172 if (ret == 0) 172 if (ret == 0)
@@ -192,8 +192,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
192 offset + bvec->bv_len - 1, 192 offset + bvec->bv_len - 1,
193 EXTENT_NODATASUM, GFP_NOFS); 193 EXTENT_NODATASUM, GFP_NOFS);
194 } else { 194 } else {
195 printk("no csum found for inode %lu " 195 printk(KERN_INFO "btrfs no csum found "
196 "start %llu\n", inode->i_ino, 196 "for inode %lu start %llu\n",
197 inode->i_ino,
197 (unsigned long long)offset); 198 (unsigned long long)offset);
198 } 199 }
199 item = NULL; 200 item = NULL;
@@ -373,7 +374,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
373 BUG_ON(!ordered); 374 BUG_ON(!ordered);
374 sums->bytenr = ordered->start; 375 sums->bytenr = ordered->start;
375 376
376 while(bio_index < bio->bi_vcnt) { 377 while (bio_index < bio->bi_vcnt) {
377 if (!contig) 378 if (!contig)
378 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 379 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
379 380
@@ -507,7 +508,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
507 508
508 path = btrfs_alloc_path(); 509 path = btrfs_alloc_path();
509 510
510 while(1) { 511 while (1) {
511 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 512 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
512 key.offset = end_byte - 1; 513 key.offset = end_byte - 1;
513 key.type = BTRFS_EXTENT_CSUM_KEY; 514 key.type = BTRFS_EXTENT_CSUM_KEY;
@@ -715,9 +716,8 @@ again:
715 goto csum; 716 goto csum;
716 717
717 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 718 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
718 if (diff != csum_size) { 719 if (diff != csum_size)
719 goto insert; 720 goto insert;
720 }
721 721
722 ret = btrfs_extend_item(trans, root, path, diff); 722 ret = btrfs_extend_item(trans, root, path, diff);
723 BUG_ON(ret); 723 BUG_ON(ret);
@@ -732,7 +732,7 @@ insert:
732 u64 next_sector = sector_sum->bytenr; 732 u64 next_sector = sector_sum->bytenr;
733 struct btrfs_sector_sum *next = sector_sum + 1; 733 struct btrfs_sector_sum *next = sector_sum + 1;
734 734
735 while(tmp < sums->len) { 735 while (tmp < sums->len) {
736 if (next_sector + root->sectorsize != next->bytenr) 736 if (next_sector + root->sectorsize != next->bytenr)
737 break; 737 break;
738 tmp += root->sectorsize; 738 tmp += root->sectorsize;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 5908521922fb..0e3a13a45653 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -44,10 +44,10 @@
44/* simple helper to fault in pages and copy. This should go away 44/* simple helper to fault in pages and copy. This should go away
45 * and be replaced with calls into generic code. 45 * and be replaced with calls into generic code.
46 */ 46 */
47static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, 47static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
48 int write_bytes, 48 int write_bytes,
49 struct page **prepared_pages, 49 struct page **prepared_pages,
50 const char __user * buf) 50 const char __user *buf)
51{ 51{
52 long page_fault = 0; 52 long page_fault = 0;
53 int i; 53 int i;
@@ -78,7 +78,7 @@ static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
78/* 78/*
79 * unlocks pages after btrfs_file_write is done with them 79 * unlocks pages after btrfs_file_write is done with them
80 */ 80 */
81static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) 81static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
82{ 82{
83 size_t i; 83 size_t i;
84 for (i = 0; i < num_pages; i++) { 84 for (i = 0; i < num_pages; i++) {
@@ -103,7 +103,7 @@ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
103 * this also makes the decision about creating an inline extent vs 103 * this also makes the decision about creating an inline extent vs
104 * doing real data extents, marking pages dirty and delalloc as required. 104 * doing real data extents, marking pages dirty and delalloc as required.
105 */ 105 */
106static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, 106static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root, 107 struct btrfs_root *root,
108 struct file *file, 108 struct file *file,
109 struct page **pages, 109 struct page **pages,
@@ -137,9 +137,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
137 btrfs_set_trans_block_group(trans, inode); 137 btrfs_set_trans_block_group(trans, inode);
138 hint_byte = 0; 138 hint_byte = 0;
139 139
140 if ((end_of_last_block & 4095) == 0) {
141 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
142 }
143 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS); 140 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
144 141
145 /* check for reserved extents on each page, we don't want 142 /* check for reserved extents on each page, we don't want
@@ -185,7 +182,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
185 len = (u64)-1; 182 len = (u64)-1;
186 testend = 0; 183 testend = 0;
187 } 184 }
188 while(1) { 185 while (1) {
189 if (!split) 186 if (!split)
190 split = alloc_extent_map(GFP_NOFS); 187 split = alloc_extent_map(GFP_NOFS);
191 if (!split2) 188 if (!split2)
@@ -295,7 +292,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
295 path = btrfs_alloc_path(); 292 path = btrfs_alloc_path();
296 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino, 293 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
297 last_offset, 0); 294 last_offset, 0);
298 while(1) { 295 while (1) {
299 nritems = btrfs_header_nritems(path->nodes[0]); 296 nritems = btrfs_header_nritems(path->nodes[0]);
300 if (path->slots[0] >= nritems) { 297 if (path->slots[0] >= nritems) {
301 ret = btrfs_next_leaf(root, path); 298 ret = btrfs_next_leaf(root, path);
@@ -314,8 +311,10 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
314 if (found_key.offset < last_offset) { 311 if (found_key.offset < last_offset) {
315 WARN_ON(1); 312 WARN_ON(1);
316 btrfs_print_leaf(root, leaf); 313 btrfs_print_leaf(root, leaf);
317 printk("inode %lu found offset %Lu expected %Lu\n", 314 printk(KERN_ERR "inode %lu found offset %llu "
318 inode->i_ino, found_key.offset, last_offset); 315 "expected %llu\n", inode->i_ino,
316 (unsigned long long)found_key.offset,
317 (unsigned long long)last_offset);
319 err = 1; 318 err = 1;
320 goto out; 319 goto out;
321 } 320 }
@@ -331,7 +330,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
331 extent_end = found_key.offset + 330 extent_end = found_key.offset +
332 btrfs_file_extent_inline_len(leaf, extent); 331 btrfs_file_extent_inline_len(leaf, extent);
333 extent_end = (extent_end + root->sectorsize - 1) & 332 extent_end = (extent_end + root->sectorsize - 1) &
334 ~((u64)root->sectorsize -1 ); 333 ~((u64)root->sectorsize - 1);
335 } 334 }
336 last_offset = extent_end; 335 last_offset = extent_end;
337 path->slots[0]++; 336 path->slots[0]++;
@@ -339,8 +338,9 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
339 if (0 && last_offset < inode->i_size) { 338 if (0 && last_offset < inode->i_size) {
340 WARN_ON(1); 339 WARN_ON(1);
341 btrfs_print_leaf(root, leaf); 340 btrfs_print_leaf(root, leaf);
342 printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino, 341 printk(KERN_ERR "inode %lu found offset %llu size %llu\n",
343 last_offset, inode->i_size); 342 inode->i_ino, (unsigned long long)last_offset,
343 (unsigned long long)inode->i_size);
344 err = 1; 344 err = 1;
345 345
346 } 346 }
@@ -362,7 +362,7 @@ out:
362 * inline_limit is used to tell this code which offsets in the file to keep 362 * inline_limit is used to tell this code which offsets in the file to keep
363 * if they contain inline extents. 363 * if they contain inline extents.
364 */ 364 */
365int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, 365noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
366 struct btrfs_root *root, struct inode *inode, 366 struct btrfs_root *root, struct inode *inode,
367 u64 start, u64 end, u64 inline_limit, u64 *hint_byte) 367 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
368{ 368{
@@ -398,7 +398,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
398 path = btrfs_alloc_path(); 398 path = btrfs_alloc_path();
399 if (!path) 399 if (!path)
400 return -ENOMEM; 400 return -ENOMEM;
401 while(1) { 401 while (1) {
402 recow = 0; 402 recow = 0;
403 btrfs_release_path(root, path); 403 btrfs_release_path(root, path);
404 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 404 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
@@ -649,16 +649,15 @@ next_slot:
649 if (disk_bytenr != 0) { 649 if (disk_bytenr != 0) {
650 ret = btrfs_update_extent_ref(trans, root, 650 ret = btrfs_update_extent_ref(trans, root,
651 disk_bytenr, orig_parent, 651 disk_bytenr, orig_parent,
652 leaf->start, 652 leaf->start,
653 root->root_key.objectid, 653 root->root_key.objectid,
654 trans->transid, ins.objectid); 654 trans->transid, ins.objectid);
655 655
656 BUG_ON(ret); 656 BUG_ON(ret);
657 } 657 }
658 btrfs_release_path(root, path); 658 btrfs_release_path(root, path);
659 if (disk_bytenr != 0) { 659 if (disk_bytenr != 0)
660 inode_add_bytes(inode, extent_end - end); 660 inode_add_bytes(inode, extent_end - end);
661 }
662 } 661 }
663 662
664 if (found_extent && !keep) { 663 if (found_extent && !keep) {
@@ -944,7 +943,7 @@ done:
944 * waits for data=ordered extents to finish before allowing the pages to be 943 * waits for data=ordered extents to finish before allowing the pages to be
945 * modified. 944 * modified.
946 */ 945 */
947static int noinline prepare_pages(struct btrfs_root *root, struct file *file, 946static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
948 struct page **pages, size_t num_pages, 947 struct page **pages, size_t num_pages,
949 loff_t pos, unsigned long first_index, 948 loff_t pos, unsigned long first_index,
950 unsigned long last_index, size_t write_bytes) 949 unsigned long last_index, size_t write_bytes)
@@ -979,7 +978,8 @@ again:
979 struct btrfs_ordered_extent *ordered; 978 struct btrfs_ordered_extent *ordered;
980 lock_extent(&BTRFS_I(inode)->io_tree, 979 lock_extent(&BTRFS_I(inode)->io_tree,
981 start_pos, last_pos - 1, GFP_NOFS); 980 start_pos, last_pos - 1, GFP_NOFS);
982 ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1); 981 ordered = btrfs_lookup_first_ordered_extent(inode,
982 last_pos - 1);
983 if (ordered && 983 if (ordered &&
984 ordered->file_offset + ordered->len > start_pos && 984 ordered->file_offset + ordered->len > start_pos &&
985 ordered->file_offset < last_pos) { 985 ordered->file_offset < last_pos) {
@@ -1085,7 +1085,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1085 } 1085 }
1086 } 1086 }
1087 1087
1088 while(count > 0) { 1088 while (count > 0) {
1089 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1089 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1090 size_t write_bytes = min(count, nrptrs * 1090 size_t write_bytes = min(count, nrptrs *
1091 (size_t)PAGE_CACHE_SIZE - 1091 (size_t)PAGE_CACHE_SIZE -
@@ -1178,7 +1178,7 @@ out_nolock:
1178 return num_written ? num_written : err; 1178 return num_written ? num_written : err;
1179} 1179}
1180 1180
1181int btrfs_release_file(struct inode * inode, struct file * filp) 1181int btrfs_release_file(struct inode *inode, struct file *filp)
1182{ 1182{
1183 if (filp->private_data) 1183 if (filp->private_data)
1184 btrfs_ioctl_trans_end(filp); 1184 btrfs_ioctl_trans_end(filp);
@@ -1237,9 +1237,8 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1237 } 1237 }
1238 1238
1239 ret = btrfs_log_dentry_safe(trans, root, file->f_dentry); 1239 ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
1240 if (ret < 0) { 1240 if (ret < 0)
1241 goto out; 1241 goto out;
1242 }
1243 1242
1244 /* we've logged all the items and now have a consistent 1243 /* we've logged all the items and now have a consistent
1245 * version of the file in the log. It is possible that 1244 * version of the file in the log. It is possible that
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 2e69b9c30437..d1e5f0e84c58 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -213,10 +213,13 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
213 info->offset = offset; 213 info->offset = offset;
214 info->bytes += bytes; 214 info->bytes += bytes;
215 } else if (right_info && right_info->offset != offset+bytes) { 215 } else if (right_info && right_info->offset != offset+bytes) {
216 printk(KERN_ERR "adding space in the middle of an existing " 216 printk(KERN_ERR "btrfs adding space in the middle of an "
217 "free space area. existing: offset=%Lu, bytes=%Lu. " 217 "existing free space area. existing: "
218 "new: offset=%Lu, bytes=%Lu\n", right_info->offset, 218 "offset=%llu, bytes=%llu. new: offset=%llu, "
219 right_info->bytes, offset, bytes); 219 "bytes=%llu\n", (unsigned long long)right_info->offset,
220 (unsigned long long)right_info->bytes,
221 (unsigned long long)offset,
222 (unsigned long long)bytes);
220 BUG(); 223 BUG();
221 } 224 }
222 225
@@ -225,11 +228,14 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
225 228
226 if (unlikely((left_info->offset + left_info->bytes) != 229 if (unlikely((left_info->offset + left_info->bytes) !=
227 offset)) { 230 offset)) {
228 printk(KERN_ERR "free space to the left of new free " 231 printk(KERN_ERR "btrfs free space to the left "
229 "space isn't quite right. existing: offset=%Lu," 232 "of new free space isn't "
230 " bytes=%Lu. new: offset=%Lu, bytes=%Lu\n", 233 "quite right. existing: offset=%llu, "
231 left_info->offset, left_info->bytes, offset, 234 "bytes=%llu. new: offset=%llu, bytes=%llu\n",
232 bytes); 235 (unsigned long long)left_info->offset,
236 (unsigned long long)left_info->bytes,
237 (unsigned long long)offset,
238 (unsigned long long)bytes);
233 BUG(); 239 BUG();
234 } 240 }
235 241
@@ -265,8 +271,7 @@ out:
265 BUG(); 271 BUG();
266 } 272 }
267 273
268 if (alloc_info) 274 kfree(alloc_info);
269 kfree(alloc_info);
270 275
271 return ret; 276 return ret;
272} 277}
@@ -283,9 +288,11 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
283 288
284 if (info && info->offset == offset) { 289 if (info && info->offset == offset) {
285 if (info->bytes < bytes) { 290 if (info->bytes < bytes) {
286 printk(KERN_ERR "Found free space at %Lu, size %Lu," 291 printk(KERN_ERR "Found free space at %llu, size %llu,"
287 "trying to use %Lu\n", 292 "trying to use %llu\n",
288 info->offset, info->bytes, bytes); 293 (unsigned long long)info->offset,
294 (unsigned long long)info->bytes,
295 (unsigned long long)bytes);
289 WARN_ON(1); 296 WARN_ON(1);
290 ret = -EINVAL; 297 ret = -EINVAL;
291 goto out; 298 goto out;
@@ -401,8 +408,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
401 info = rb_entry(n, struct btrfs_free_space, offset_index); 408 info = rb_entry(n, struct btrfs_free_space, offset_index);
402 if (info->bytes >= bytes) 409 if (info->bytes >= bytes)
403 count++; 410 count++;
404 //printk(KERN_INFO "offset=%Lu, bytes=%Lu\n", info->offset,
405 // info->bytes);
406 } 411 }
407 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" 412 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
408 "\n", count); 413 "\n", count);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 80038c5ef7cf..2aa79873eb46 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -129,7 +129,6 @@ int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
129 last_ino = key.objectid + 1; 129 last_ino = key.objectid + 1;
130 path->slots[0]++; 130 path->slots[0]++;
131 } 131 }
132 // FIXME -ENOSPC
133 BUG_ON(1); 132 BUG_ON(1);
134found: 133found:
135 btrfs_release_path(root, path); 134 btrfs_release_path(root, path);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 068bad463387..1b35ea63b6ce 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -124,7 +124,7 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
124 * the btree. The caller should have done a btrfs_drop_extents so that 124 * the btree. The caller should have done a btrfs_drop_extents so that
125 * no overlapping inline items exist in the btree 125 * no overlapping inline items exist in the btree
126 */ 126 */
127static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, 127static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
128 struct btrfs_root *root, struct inode *inode, 128 struct btrfs_root *root, struct inode *inode,
129 u64 start, size_t size, size_t compressed_size, 129 u64 start, size_t size, size_t compressed_size,
130 struct page **compressed_pages) 130 struct page **compressed_pages)
@@ -148,7 +148,8 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
148 cur_size = compressed_size; 148 cur_size = compressed_size;
149 } 149 }
150 150
151 path = btrfs_alloc_path(); if (!path) 151 path = btrfs_alloc_path();
152 if (!path)
152 return -ENOMEM; 153 return -ENOMEM;
153 154
154 btrfs_set_trans_block_group(trans, inode); 155 btrfs_set_trans_block_group(trans, inode);
@@ -165,7 +166,6 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
165 BUG_ON(ret); 166 BUG_ON(ret);
166 if (ret) { 167 if (ret) {
167 err = ret; 168 err = ret;
168 printk("got bad ret %d\n", ret);
169 goto fail; 169 goto fail;
170 } 170 }
171 leaf = path->nodes[0]; 171 leaf = path->nodes[0];
@@ -181,7 +181,7 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
181 if (use_compress) { 181 if (use_compress) {
182 struct page *cpage; 182 struct page *cpage;
183 int i = 0; 183 int i = 0;
184 while(compressed_size > 0) { 184 while (compressed_size > 0) {
185 cpage = compressed_pages[i]; 185 cpage = compressed_pages[i];
186 cur_size = min_t(unsigned long, compressed_size, 186 cur_size = min_t(unsigned long, compressed_size,
187 PAGE_CACHE_SIZE); 187 PAGE_CACHE_SIZE);
@@ -519,8 +519,7 @@ free_pages_out:
519 WARN_ON(pages[i]->mapping); 519 WARN_ON(pages[i]->mapping);
520 page_cache_release(pages[i]); 520 page_cache_release(pages[i]);
521 } 521 }
522 if (pages) 522 kfree(pages);
523 kfree(pages);
524 523
525 goto out; 524 goto out;
526} 525}
@@ -549,7 +548,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
549 548
550 trans = btrfs_join_transaction(root, 1); 549 trans = btrfs_join_transaction(root, 1);
551 550
552 while(!list_empty(&async_cow->extents)) { 551 while (!list_empty(&async_cow->extents)) {
553 async_extent = list_entry(async_cow->extents.next, 552 async_extent = list_entry(async_cow->extents.next,
554 struct async_extent, list); 553 struct async_extent, list);
555 list_del(&async_extent->list); 554 list_del(&async_extent->list);
@@ -562,8 +561,8 @@ static noinline int submit_compressed_extents(struct inode *inode,
562 unsigned long nr_written = 0; 561 unsigned long nr_written = 0;
563 562
564 lock_extent(io_tree, async_extent->start, 563 lock_extent(io_tree, async_extent->start,
565 async_extent->start + async_extent->ram_size - 1, 564 async_extent->start +
566 GFP_NOFS); 565 async_extent->ram_size - 1, GFP_NOFS);
567 566
568 /* allocate blocks */ 567 /* allocate blocks */
569 cow_file_range(inode, async_cow->locked_page, 568 cow_file_range(inode, async_cow->locked_page,
@@ -581,7 +580,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
581 if (!page_started) 580 if (!page_started)
582 extent_write_locked_range(io_tree, 581 extent_write_locked_range(io_tree,
583 inode, async_extent->start, 582 inode, async_extent->start,
584 async_extent->start + 583 async_extent->start +
585 async_extent->ram_size - 1, 584 async_extent->ram_size - 1,
586 btrfs_get_extent, 585 btrfs_get_extent,
587 WB_SYNC_ALL); 586 WB_SYNC_ALL);
@@ -618,7 +617,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
618 set_bit(EXTENT_FLAG_PINNED, &em->flags); 617 set_bit(EXTENT_FLAG_PINNED, &em->flags);
619 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 618 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
620 619
621 while(1) { 620 while (1) {
622 spin_lock(&em_tree->lock); 621 spin_lock(&em_tree->lock);
623 ret = add_extent_mapping(em_tree, em); 622 ret = add_extent_mapping(em_tree, em);
624 spin_unlock(&em_tree->lock); 623 spin_unlock(&em_tree->lock);
@@ -651,11 +650,11 @@ static noinline int submit_compressed_extents(struct inode *inode,
651 NULL, 1, 1, 0, 1, 1, 0); 650 NULL, 1, 1, 0, 1, 1, 0);
652 651
653 ret = btrfs_submit_compressed_write(inode, 652 ret = btrfs_submit_compressed_write(inode,
654 async_extent->start, 653 async_extent->start,
655 async_extent->ram_size, 654 async_extent->ram_size,
656 ins.objectid, 655 ins.objectid,
657 ins.offset, async_extent->pages, 656 ins.offset, async_extent->pages,
658 async_extent->nr_pages); 657 async_extent->nr_pages);
659 658
660 BUG_ON(ret); 659 BUG_ON(ret);
661 trans = btrfs_join_transaction(root, 1); 660 trans = btrfs_join_transaction(root, 1);
@@ -735,14 +734,13 @@ static noinline int cow_file_range(struct inode *inode,
735 734
736 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 735 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
737 736
738 while(disk_num_bytes > 0) { 737 while (disk_num_bytes > 0) {
739 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); 738 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
740 ret = btrfs_reserve_extent(trans, root, cur_alloc_size, 739 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
741 root->sectorsize, 0, alloc_hint, 740 root->sectorsize, 0, alloc_hint,
742 (u64)-1, &ins, 1); 741 (u64)-1, &ins, 1);
743 if (ret) { 742 BUG_ON(ret);
744 BUG(); 743
745 }
746 em = alloc_extent_map(GFP_NOFS); 744 em = alloc_extent_map(GFP_NOFS);
747 em->start = start; 745 em->start = start;
748 em->orig_start = em->start; 746 em->orig_start = em->start;
@@ -755,7 +753,7 @@ static noinline int cow_file_range(struct inode *inode,
755 em->bdev = root->fs_info->fs_devices->latest_bdev; 753 em->bdev = root->fs_info->fs_devices->latest_bdev;
756 set_bit(EXTENT_FLAG_PINNED, &em->flags); 754 set_bit(EXTENT_FLAG_PINNED, &em->flags);
757 755
758 while(1) { 756 while (1) {
759 spin_lock(&em_tree->lock); 757 spin_lock(&em_tree->lock);
760 ret = add_extent_mapping(em_tree, em); 758 ret = add_extent_mapping(em_tree, em);
761 spin_unlock(&em_tree->lock); 759 spin_unlock(&em_tree->lock);
@@ -779,11 +777,9 @@ static noinline int cow_file_range(struct inode *inode,
779 BUG_ON(ret); 777 BUG_ON(ret);
780 } 778 }
781 779
782 if (disk_num_bytes < cur_alloc_size) { 780 if (disk_num_bytes < cur_alloc_size)
783 printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes,
784 cur_alloc_size);
785 break; 781 break;
786 } 782
787 /* we're not doing compressed IO, don't unlock the first 783 /* we're not doing compressed IO, don't unlock the first
788 * page (which the caller expects to stay locked), don't 784 * page (which the caller expects to stay locked), don't
789 * clear any dirty bits and don't set any writeback bits 785 * clear any dirty bits and don't set any writeback bits
@@ -842,9 +838,8 @@ static noinline void async_cow_submit(struct btrfs_work *work)
842 waitqueue_active(&root->fs_info->async_submit_wait)) 838 waitqueue_active(&root->fs_info->async_submit_wait))
843 wake_up(&root->fs_info->async_submit_wait); 839 wake_up(&root->fs_info->async_submit_wait);
844 840
845 if (async_cow->inode) { 841 if (async_cow->inode)
846 submit_compressed_extents(async_cow->inode, async_cow); 842 submit_compressed_extents(async_cow->inode, async_cow);
847 }
848} 843}
849 844
850static noinline void async_cow_free(struct btrfs_work *work) 845static noinline void async_cow_free(struct btrfs_work *work)
@@ -871,7 +866,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
871 866
872 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | 867 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
873 EXTENT_DELALLOC, 1, 0, GFP_NOFS); 868 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
874 while(start < end) { 869 while (start < end) {
875 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 870 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
876 async_cow->inode = inode; 871 async_cow->inode = inode;
877 async_cow->root = root; 872 async_cow->root = root;
@@ -904,7 +899,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
904 limit)); 899 limit));
905 } 900 }
906 901
907 while(atomic_read(&root->fs_info->async_submit_draining) && 902 while (atomic_read(&root->fs_info->async_submit_draining) &&
908 atomic_read(&root->fs_info->async_delalloc_pages)) { 903 atomic_read(&root->fs_info->async_delalloc_pages)) {
909 wait_event(root->fs_info->async_submit_wait, 904 wait_event(root->fs_info->async_submit_wait,
910 (atomic_read(&root->fs_info->async_delalloc_pages) == 905 (atomic_read(&root->fs_info->async_delalloc_pages) ==
@@ -918,7 +913,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
918 return 0; 913 return 0;
919} 914}
920 915
921static int noinline csum_exist_in_range(struct btrfs_root *root, 916static noinline int csum_exist_in_range(struct btrfs_root *root,
922 u64 bytenr, u64 num_bytes) 917 u64 bytenr, u64 num_bytes)
923{ 918{
924 int ret; 919 int ret;
@@ -1146,13 +1141,13 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1146 1141
1147 if (btrfs_test_flag(inode, NODATACOW)) 1142 if (btrfs_test_flag(inode, NODATACOW))
1148 ret = run_delalloc_nocow(inode, locked_page, start, end, 1143 ret = run_delalloc_nocow(inode, locked_page, start, end,
1149 page_started, 1, nr_written); 1144 page_started, 1, nr_written);
1150 else if (btrfs_test_flag(inode, PREALLOC)) 1145 else if (btrfs_test_flag(inode, PREALLOC))
1151 ret = run_delalloc_nocow(inode, locked_page, start, end, 1146 ret = run_delalloc_nocow(inode, locked_page, start, end,
1152 page_started, 0, nr_written); 1147 page_started, 0, nr_written);
1153 else 1148 else
1154 ret = cow_file_range_async(inode, locked_page, start, end, 1149 ret = cow_file_range_async(inode, locked_page, start, end,
1155 page_started, nr_written); 1150 page_started, nr_written);
1156 1151
1157 return ret; 1152 return ret;
1158} 1153}
@@ -1200,8 +1195,11 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1200 1195
1201 spin_lock(&root->fs_info->delalloc_lock); 1196 spin_lock(&root->fs_info->delalloc_lock);
1202 if (end - start + 1 > root->fs_info->delalloc_bytes) { 1197 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1203 printk("warning: delalloc account %Lu %Lu\n", 1198 printk(KERN_INFO "btrfs warning: delalloc account "
1204 end - start + 1, root->fs_info->delalloc_bytes); 1199 "%llu %llu\n",
1200 (unsigned long long)end - start + 1,
1201 (unsigned long long)
1202 root->fs_info->delalloc_bytes);
1205 root->fs_info->delalloc_bytes = 0; 1203 root->fs_info->delalloc_bytes = 0;
1206 BTRFS_I(inode)->delalloc_bytes = 0; 1204 BTRFS_I(inode)->delalloc_bytes = 0;
1207 } else { 1205 } else {
@@ -1241,9 +1239,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1241 ret = btrfs_map_block(map_tree, READ, logical, 1239 ret = btrfs_map_block(map_tree, READ, logical,
1242 &map_length, NULL, 0); 1240 &map_length, NULL, 0);
1243 1241
1244 if (map_length < length + size) { 1242 if (map_length < length + size)
1245 return 1; 1243 return 1;
1246 }
1247 return 0; 1244 return 0;
1248} 1245}
1249 1246
@@ -1255,8 +1252,9 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1255 * At IO completion time the cums attached on the ordered extent record 1252 * At IO completion time the cums attached on the ordered extent record
1256 * are inserted into the btree 1253 * are inserted into the btree
1257 */ 1254 */
1258static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, 1255static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1259 int mirror_num, unsigned long bio_flags) 1256 struct bio *bio, int mirror_num,
1257 unsigned long bio_flags)
1260{ 1258{
1261 struct btrfs_root *root = BTRFS_I(inode)->root; 1259 struct btrfs_root *root = BTRFS_I(inode)->root;
1262 int ret = 0; 1260 int ret = 0;
@@ -1341,9 +1339,8 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1341 1339
1342int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) 1340int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1343{ 1341{
1344 if ((end & (PAGE_CACHE_SIZE - 1)) == 0) { 1342 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1345 WARN_ON(1); 1343 WARN_ON(1);
1346 }
1347 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1344 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1348 GFP_NOFS); 1345 GFP_NOFS);
1349} 1346}
@@ -1755,14 +1752,14 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1755 } 1752 }
1756 local_irq_save(flags); 1753 local_irq_save(flags);
1757 kaddr = kmap_atomic(page, KM_IRQ0); 1754 kaddr = kmap_atomic(page, KM_IRQ0);
1758 if (ret) { 1755 if (ret)
1759 goto zeroit; 1756 goto zeroit;
1760 } 1757
1761 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); 1758 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1762 btrfs_csum_final(csum, (char *)&csum); 1759 btrfs_csum_final(csum, (char *)&csum);
1763 if (csum != private) { 1760 if (csum != private)
1764 goto zeroit; 1761 goto zeroit;
1765 } 1762
1766 kunmap_atomic(kaddr, KM_IRQ0); 1763 kunmap_atomic(kaddr, KM_IRQ0);
1767 local_irq_restore(flags); 1764 local_irq_restore(flags);
1768good: 1765good:
@@ -1773,9 +1770,10 @@ good:
1773 return 0; 1770 return 0;
1774 1771
1775zeroit: 1772zeroit:
1776 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n", 1773 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1777 page->mapping->host->i_ino, (unsigned long long)start, csum, 1774 "private %llu\n", page->mapping->host->i_ino,
1778 private); 1775 (unsigned long long)start, csum,
1776 (unsigned long long)private);
1779 memset(kaddr + offset, 1, end - start + 1); 1777 memset(kaddr + offset, 1, end - start + 1);
1780 flush_dcache_page(page); 1778 flush_dcache_page(page);
1781 kunmap_atomic(kaddr, KM_IRQ0); 1779 kunmap_atomic(kaddr, KM_IRQ0);
@@ -2097,9 +2095,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2097/* 2095/*
2098 * copy everything in the in-memory inode into the btree. 2096 * copy everything in the in-memory inode into the btree.
2099 */ 2097 */
2100int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, 2098noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2101 struct btrfs_root *root, 2099 struct btrfs_root *root, struct inode *inode)
2102 struct inode *inode)
2103{ 2100{
2104 struct btrfs_inode_item *inode_item; 2101 struct btrfs_inode_item *inode_item;
2105 struct btrfs_path *path; 2102 struct btrfs_path *path;
@@ -2174,7 +2171,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2174 inode->i_ino, 2171 inode->i_ino,
2175 dir->i_ino, &index); 2172 dir->i_ino, &index);
2176 if (ret) { 2173 if (ret) {
2177 printk("failed to delete reference to %.*s, " 2174 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2178 "inode %lu parent %lu\n", name_len, name, 2175 "inode %lu parent %lu\n", name_len, name,
2179 inode->i_ino, dir->i_ino); 2176 inode->i_ino, dir->i_ino);
2180 goto err; 2177 goto err;
@@ -2280,9 +2277,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2280 /* now the directory is empty */ 2277 /* now the directory is empty */
2281 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 2278 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2282 dentry->d_name.name, dentry->d_name.len); 2279 dentry->d_name.name, dentry->d_name.len);
2283 if (!err) { 2280 if (!err)
2284 btrfs_i_size_write(inode, 0); 2281 btrfs_i_size_write(inode, 0);
2285 }
2286 2282
2287fail_trans: 2283fail_trans:
2288 nr = trans->blocks_used; 2284 nr = trans->blocks_used;
@@ -2516,9 +2512,9 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2516 2512
2517search_again: 2513search_again:
2518 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2514 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2519 if (ret < 0) { 2515 if (ret < 0)
2520 goto error; 2516 goto error;
2521 } 2517
2522 if (ret > 0) { 2518 if (ret > 0) {
2523 /* there are no items in the tree for us to truncate, we're 2519 /* there are no items in the tree for us to truncate, we're
2524 * done 2520 * done
@@ -2530,7 +2526,7 @@ search_again:
2530 path->slots[0]--; 2526 path->slots[0]--;
2531 } 2527 }
2532 2528
2533 while(1) { 2529 while (1) {
2534 fi = NULL; 2530 fi = NULL;
2535 leaf = path->nodes[0]; 2531 leaf = path->nodes[0];
2536 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2532 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
@@ -2562,19 +2558,18 @@ search_again:
2562 item_end--; 2558 item_end--;
2563 } 2559 }
2564 if (item_end < new_size) { 2560 if (item_end < new_size) {
2565 if (found_type == BTRFS_DIR_ITEM_KEY) { 2561 if (found_type == BTRFS_DIR_ITEM_KEY)
2566 found_type = BTRFS_INODE_ITEM_KEY; 2562 found_type = BTRFS_INODE_ITEM_KEY;
2567 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) { 2563 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2568 found_type = BTRFS_EXTENT_DATA_KEY; 2564 found_type = BTRFS_EXTENT_DATA_KEY;
2569 } else if (found_type == BTRFS_EXTENT_DATA_KEY) { 2565 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2570 found_type = BTRFS_XATTR_ITEM_KEY; 2566 found_type = BTRFS_XATTR_ITEM_KEY;
2571 } else if (found_type == BTRFS_XATTR_ITEM_KEY) { 2567 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2572 found_type = BTRFS_INODE_REF_KEY; 2568 found_type = BTRFS_INODE_REF_KEY;
2573 } else if (found_type) { 2569 else if (found_type)
2574 found_type--; 2570 found_type--;
2575 } else { 2571 else
2576 break; 2572 break;
2577 }
2578 btrfs_set_key_type(&key, found_type); 2573 btrfs_set_key_type(&key, found_type);
2579 goto next; 2574 goto next;
2580 } 2575 }
@@ -2656,7 +2651,7 @@ delete:
2656 pending_del_nr++; 2651 pending_del_nr++;
2657 pending_del_slot = path->slots[0]; 2652 pending_del_slot = path->slots[0];
2658 } else { 2653 } else {
2659 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot); 2654 BUG();
2660 } 2655 }
2661 } else { 2656 } else {
2662 break; 2657 break;
@@ -2938,9 +2933,10 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2938 namelen, 0); 2933 namelen, 0);
2939 if (IS_ERR(di)) 2934 if (IS_ERR(di))
2940 ret = PTR_ERR(di); 2935 ret = PTR_ERR(di);
2941 if (!di || IS_ERR(di)) { 2936
2937 if (!di || IS_ERR(di))
2942 goto out_err; 2938 goto out_err;
2943 } 2939
2944 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 2940 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2945out: 2941out:
2946 btrfs_free_path(path); 2942 btrfs_free_path(path);
@@ -3020,8 +3016,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
3020static int btrfs_find_actor(struct inode *inode, void *opaque) 3016static int btrfs_find_actor(struct inode *inode, void *opaque)
3021{ 3017{
3022 struct btrfs_iget_args *args = opaque; 3018 struct btrfs_iget_args *args = opaque;
3023 return (args->ino == inode->i_ino && 3019 return args->ino == inode->i_ino &&
3024 args->root == BTRFS_I(inode)->root); 3020 args->root == BTRFS_I(inode)->root;
3025} 3021}
3026 3022
3027struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, 3023struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
@@ -3085,7 +3081,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3085 3081
3086struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 3082struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3087{ 3083{
3088 struct inode * inode; 3084 struct inode *inode;
3089 struct btrfs_inode *bi = BTRFS_I(dir); 3085 struct btrfs_inode *bi = BTRFS_I(dir);
3090 struct btrfs_root *root = bi->root; 3086 struct btrfs_root *root = bi->root;
3091 struct btrfs_root *sub_root = root; 3087 struct btrfs_root *sub_root = root;
@@ -3385,9 +3381,8 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
3385 3381
3386 if (BTRFS_I(dir)->index_cnt == (u64)-1) { 3382 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3387 ret = btrfs_set_inode_index_count(dir); 3383 ret = btrfs_set_inode_index_count(dir);
3388 if (ret) { 3384 if (ret)
3389 return ret; 3385 return ret;
3390 }
3391 } 3386 }
3392 3387
3393 *index = BTRFS_I(dir)->index_cnt; 3388 *index = BTRFS_I(dir)->index_cnt;
@@ -3879,12 +3874,13 @@ static noinline int uncompress_inline(struct btrfs_path *path,
3879 3874
3880/* 3875/*
3881 * a bit scary, this does extent mapping from logical file offset to the disk. 3876 * a bit scary, this does extent mapping from logical file offset to the disk.
3882 * the ugly parts come from merging extents from the disk with the 3877 * the ugly parts come from merging extents from the disk with the in-ram
3883 * in-ram representation. This gets more complex because of the data=ordered code, 3878 * representation. This gets more complex because of the data=ordered code,
3884 * where the in-ram extents might be locked pending data=ordered completion. 3879 * where the in-ram extents might be locked pending data=ordered completion.
3885 * 3880 *
3886 * This also copies inline extents directly into the page. 3881 * This also copies inline extents directly into the page.
3887 */ 3882 */
3883
3888struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 3884struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3889 size_t pg_offset, u64 start, u64 len, 3885 size_t pg_offset, u64 start, u64 len,
3890 int create) 3886 int create)
@@ -4081,7 +4077,7 @@ again:
4081 extent_map_end(em) - 1, GFP_NOFS); 4077 extent_map_end(em) - 1, GFP_NOFS);
4082 goto insert; 4078 goto insert;
4083 } else { 4079 } else {
4084 printk("unkknown found_type %d\n", found_type); 4080 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4085 WARN_ON(1); 4081 WARN_ON(1);
4086 } 4082 }
4087not_found: 4083not_found:
@@ -4093,7 +4089,11 @@ not_found_em:
4093insert: 4089insert:
4094 btrfs_release_path(root, path); 4090 btrfs_release_path(root, path);
4095 if (em->start > start || extent_map_end(em) <= start) { 4091 if (em->start > start || extent_map_end(em) <= start) {
4096 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len); 4092 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4093 "[%llu %llu]\n", (unsigned long long)em->start,
4094 (unsigned long long)em->len,
4095 (unsigned long long)start,
4096 (unsigned long long)len);
4097 err = -EIO; 4097 err = -EIO;
4098 goto out; 4098 goto out;
4099 } 4099 }
@@ -4130,8 +4130,6 @@ insert:
4130 } 4130 }
4131 } else { 4131 } else {
4132 err = -EIO; 4132 err = -EIO;
4133 printk("failing to insert %Lu %Lu\n",
4134 start, len);
4135 free_extent_map(em); 4133 free_extent_map(em);
4136 em = NULL; 4134 em = NULL;
4137 } 4135 }
@@ -4147,9 +4145,8 @@ out:
4147 btrfs_free_path(path); 4145 btrfs_free_path(path);
4148 if (trans) { 4146 if (trans) {
4149 ret = btrfs_end_transaction(trans, root); 4147 ret = btrfs_end_transaction(trans, root);
4150 if (!err) { 4148 if (!err)
4151 err = ret; 4149 err = ret;
4152 }
4153 } 4150 }
4154 if (err) { 4151 if (err) {
4155 free_extent_map(em); 4152 free_extent_map(em);
@@ -4482,13 +4479,15 @@ void btrfs_destroy_inode(struct inode *inode)
4482 } 4479 }
4483 spin_unlock(&BTRFS_I(inode)->root->list_lock); 4480 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4484 4481
4485 while(1) { 4482 while (1) {
4486 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 4483 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4487 if (!ordered) 4484 if (!ordered)
4488 break; 4485 break;
4489 else { 4486 else {
4490 printk("found ordered extent %Lu %Lu\n", 4487 printk(KERN_ERR "btrfs found ordered "
4491 ordered->file_offset, ordered->len); 4488 "extent %llu %llu on inode cleanup\n",
4489 (unsigned long long)ordered->file_offset,
4490 (unsigned long long)ordered->len);
4492 btrfs_remove_ordered_extent(inode, ordered); 4491 btrfs_remove_ordered_extent(inode, ordered);
4493 btrfs_put_ordered_extent(ordered); 4492 btrfs_put_ordered_extent(ordered);
4494 btrfs_put_ordered_extent(ordered); 4493 btrfs_put_ordered_extent(ordered);
@@ -4572,8 +4571,8 @@ static int btrfs_getattr(struct vfsmount *mnt,
4572 return 0; 4571 return 0;
4573} 4572}
4574 4573
4575static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry, 4574static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4576 struct inode * new_dir,struct dentry *new_dentry) 4575 struct inode *new_dir, struct dentry *new_dentry)
4577{ 4576{
4578 struct btrfs_trans_handle *trans; 4577 struct btrfs_trans_handle *trans;
4579 struct btrfs_root *root = BTRFS_I(old_dir)->root; 4578 struct btrfs_root *root = BTRFS_I(old_dir)->root;
@@ -4663,7 +4662,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4663 return -EROFS; 4662 return -EROFS;
4664 4663
4665 spin_lock(&root->fs_info->delalloc_lock); 4664 spin_lock(&root->fs_info->delalloc_lock);
4666 while(!list_empty(head)) { 4665 while (!list_empty(head)) {
4667 binode = list_entry(head->next, struct btrfs_inode, 4666 binode = list_entry(head->next, struct btrfs_inode,
4668 delalloc_inodes); 4667 delalloc_inodes);
4669 inode = igrab(&binode->vfs_inode); 4668 inode = igrab(&binode->vfs_inode);
@@ -4684,7 +4683,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4684 * ordered extents get created before we return 4683 * ordered extents get created before we return
4685 */ 4684 */
4686 atomic_inc(&root->fs_info->async_submit_draining); 4685 atomic_inc(&root->fs_info->async_submit_draining);
4687 while(atomic_read(&root->fs_info->nr_async_submits) || 4686 while (atomic_read(&root->fs_info->nr_async_submits) ||
4688 atomic_read(&root->fs_info->async_delalloc_pages)) { 4687 atomic_read(&root->fs_info->async_delalloc_pages)) {
4689 wait_event(root->fs_info->async_submit_wait, 4688 wait_event(root->fs_info->async_submit_wait,
4690 (atomic_read(&root->fs_info->nr_async_submits) == 0 && 4689 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ba484aac1b9c..c2aa33e3feb5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -311,7 +311,7 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name,
311 * to see if is references the subvolume where we are 311 * to see if is references the subvolume where we are
312 * placing this new snapshot. 312 * placing this new snapshot.
313 */ 313 */
314 while(1) { 314 while (1) {
315 if (!test || 315 if (!test ||
316 dir == snap_src->fs_info->sb->s_root || 316 dir == snap_src->fs_info->sb->s_root ||
317 test == snap_src->fs_info->sb->s_root || 317 test == snap_src->fs_info->sb->s_root ||
@@ -319,7 +319,8 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name,
319 break; 319 break;
320 } 320 }
321 if (S_ISLNK(test->d_inode->i_mode)) { 321 if (S_ISLNK(test->d_inode->i_mode)) {
322 printk("Symlink in snapshot path, failed\n"); 322 printk(KERN_INFO "Btrfs symlink in snapshot "
323 "path, failed\n");
323 error = -EMLINK; 324 error = -EMLINK;
324 btrfs_free_path(path); 325 btrfs_free_path(path);
325 goto out_drop_write; 326 goto out_drop_write;
@@ -329,7 +330,8 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name,
329 ret = btrfs_find_root_ref(snap_src->fs_info->tree_root, 330 ret = btrfs_find_root_ref(snap_src->fs_info->tree_root,
330 path, test_oid, parent_oid); 331 path, test_oid, parent_oid);
331 if (ret == 0) { 332 if (ret == 0) {
332 printk("Snapshot creation failed, looping\n"); 333 printk(KERN_INFO "Btrfs snapshot creation "
334 "failed, looping\n");
333 error = -EMLINK; 335 error = -EMLINK;
334 btrfs_free_path(path); 336 btrfs_free_path(path);
335 goto out_drop_write; 337 goto out_drop_write;
@@ -617,7 +619,8 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
617 619
618 src_inode = src_file->f_path.dentry->d_inode; 620 src_inode = src_file->f_path.dentry->d_inode;
619 if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) { 621 if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) {
620 printk("btrfs: Snapshot src from another FS\n"); 622 printk(KERN_INFO "btrfs: Snapshot src from "
623 "another FS\n");
621 ret = -EINVAL; 624 ret = -EINVAL;
622 fput(src_file); 625 fput(src_file);
623 goto out; 626 goto out;
@@ -810,9 +813,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
810 ((off + len) & (bs-1))) 813 ((off + len) & (bs-1)))
811 goto out_unlock; 814 goto out_unlock;
812 815
813 printk("final src extent is %llu~%llu\n", off, len);
814 printk("final dst extent is %llu~%llu\n", destoff, len);
815
816 /* do any pending delalloc/csum calc on src, one way or 816 /* do any pending delalloc/csum calc on src, one way or
817 another, and lock file content */ 817 another, and lock file content */
818 while (1) { 818 while (1) {
@@ -883,10 +883,13 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
883 comp = btrfs_file_extent_compression(leaf, extent); 883 comp = btrfs_file_extent_compression(leaf, extent);
884 type = btrfs_file_extent_type(leaf, extent); 884 type = btrfs_file_extent_type(leaf, extent);
885 if (type == BTRFS_FILE_EXTENT_REG) { 885 if (type == BTRFS_FILE_EXTENT_REG) {
886 disko = btrfs_file_extent_disk_bytenr(leaf, extent); 886 disko = btrfs_file_extent_disk_bytenr(leaf,
887 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); 887 extent);
888 diskl = btrfs_file_extent_disk_num_bytes(leaf,
889 extent);
888 datao = btrfs_file_extent_offset(leaf, extent); 890 datao = btrfs_file_extent_offset(leaf, extent);
889 datal = btrfs_file_extent_num_bytes(leaf, extent); 891 datal = btrfs_file_extent_num_bytes(leaf,
892 extent);
890 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 893 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
891 /* take upper bound, may be compressed */ 894 /* take upper bound, may be compressed */
892 datal = btrfs_file_extent_ram_bytes(leaf, 895 datal = btrfs_file_extent_ram_bytes(leaf,
@@ -916,8 +919,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
916 919
917 extent = btrfs_item_ptr(leaf, slot, 920 extent = btrfs_item_ptr(leaf, slot,
918 struct btrfs_file_extent_item); 921 struct btrfs_file_extent_item);
919 printk(" orig disk %llu~%llu data %llu~%llu\n",
920 disko, diskl, datao, datal);
921 922
922 if (off > key.offset) { 923 if (off > key.offset) {
923 datao += off - key.offset; 924 datao += off - key.offset;
@@ -929,8 +930,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
929 /* disko == 0 means it's a hole */ 930 /* disko == 0 means it's a hole */
930 if (!disko) 931 if (!disko)
931 datao = 0; 932 datao = 0;
932 printk(" final disk %llu~%llu data %llu~%llu\n",
933 disko, diskl, datao, datal);
934 933
935 btrfs_set_file_extent_offset(leaf, extent, 934 btrfs_set_file_extent_offset(leaf, extent,
936 datao); 935 datao);
@@ -952,12 +951,11 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
952 skip = off - key.offset; 951 skip = off - key.offset;
953 new_key.offset += skip; 952 new_key.offset += skip;
954 } 953 }
954
955 if (key.offset + datal > off+len) 955 if (key.offset + datal > off+len)
956 trim = key.offset + datal - (off+len); 956 trim = key.offset + datal - (off+len);
957 printk("len %lld skip %lld trim %lld\n", 957
958 datal, skip, trim);
959 if (comp && (skip || trim)) { 958 if (comp && (skip || trim)) {
960 printk("btrfs clone_range can't split compressed inline extents yet\n");
961 ret = -EINVAL; 959 ret = -EINVAL;
962 goto out; 960 goto out;
963 } 961 }
@@ -969,7 +967,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
969 goto out; 967 goto out;
970 968
971 if (skip) { 969 if (skip) {
972 u32 start = btrfs_file_extent_calc_inline_size(0); 970 u32 start =
971 btrfs_file_extent_calc_inline_size(0);
973 memmove(buf+start, buf+start+skip, 972 memmove(buf+start, buf+start+skip,
974 datal); 973 datal);
975 } 974 }
@@ -985,7 +984,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
985 btrfs_mark_buffer_dirty(leaf); 984 btrfs_mark_buffer_dirty(leaf);
986 } 985 }
987 986
988 next: 987next:
989 btrfs_release_path(root, path); 988 btrfs_release_path(root, path);
990 key.offset++; 989 key.offset++;
991 } 990 }
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index e30aa6e2958f..39bae7761db6 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -31,9 +31,10 @@
31 * difference in almost every workload, but spinning for the right amount of 31 * difference in almost every workload, but spinning for the right amount of
32 * time needs some help. 32 * time needs some help.
33 * 33 *
34 * In general, we want to spin as long as the lock holder is doing btree searches, 34 * In general, we want to spin as long as the lock holder is doing btree
35 * and we should give up if they are in more expensive code. 35 * searches, and we should give up if they are in more expensive code.
36 */ 36 */
37
37int btrfs_tree_lock(struct extent_buffer *eb) 38int btrfs_tree_lock(struct extent_buffer *eb)
38{ 39{
39 int i; 40 int i;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index d9e232227da4..a20940170274 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -39,11 +39,11 @@ static u64 entry_end(struct btrfs_ordered_extent *entry)
39static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 39static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
40 struct rb_node *node) 40 struct rb_node *node)
41{ 41{
42 struct rb_node ** p = &root->rb_node; 42 struct rb_node **p = &root->rb_node;
43 struct rb_node * parent = NULL; 43 struct rb_node *parent = NULL;
44 struct btrfs_ordered_extent *entry; 44 struct btrfs_ordered_extent *entry;
45 45
46 while(*p) { 46 while (*p) {
47 parent = *p; 47 parent = *p;
48 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 48 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
49 49
@@ -67,13 +67,13 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
67static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 67static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
68 struct rb_node **prev_ret) 68 struct rb_node **prev_ret)
69{ 69{
70 struct rb_node * n = root->rb_node; 70 struct rb_node *n = root->rb_node;
71 struct rb_node *prev = NULL; 71 struct rb_node *prev = NULL;
72 struct rb_node *test; 72 struct rb_node *test;
73 struct btrfs_ordered_extent *entry; 73 struct btrfs_ordered_extent *entry;
74 struct btrfs_ordered_extent *prev_entry = NULL; 74 struct btrfs_ordered_extent *prev_entry = NULL;
75 75
76 while(n) { 76 while (n) {
77 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 77 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
78 prev = n; 78 prev = n;
79 prev_entry = entry; 79 prev_entry = entry;
@@ -88,7 +88,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
88 if (!prev_ret) 88 if (!prev_ret)
89 return NULL; 89 return NULL;
90 90
91 while(prev && file_offset >= entry_end(prev_entry)) { 91 while (prev && file_offset >= entry_end(prev_entry)) {
92 test = rb_next(prev); 92 test = rb_next(prev);
93 if (!test) 93 if (!test)
94 break; 94 break;
@@ -102,7 +102,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
102 if (prev) 102 if (prev)
103 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 103 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
104 rb_node); 104 rb_node);
105 while(prev && file_offset < entry_end(prev_entry)) { 105 while (prev && file_offset < entry_end(prev_entry)) {
106 test = rb_prev(prev); 106 test = rb_prev(prev);
107 if (!test) 107 if (!test)
108 break; 108 break;
@@ -193,10 +193,8 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
193 193
194 node = tree_insert(&tree->tree, file_offset, 194 node = tree_insert(&tree->tree, file_offset,
195 &entry->rb_node); 195 &entry->rb_node);
196 if (node) { 196 BUG_ON(node);
197 printk("warning dup entry from add_ordered_extent\n"); 197
198 BUG();
199 }
200 set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, 198 set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset,
201 entry_end(entry) - 1, GFP_NOFS); 199 entry_end(entry) - 1, GFP_NOFS);
202 200
@@ -282,7 +280,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
282 struct btrfs_ordered_sum *sum; 280 struct btrfs_ordered_sum *sum;
283 281
284 if (atomic_dec_and_test(&entry->refs)) { 282 if (atomic_dec_and_test(&entry->refs)) {
285 while(!list_empty(&entry->list)) { 283 while (!list_empty(&entry->list)) {
286 cur = entry->list.next; 284 cur = entry->list.next;
287 sum = list_entry(cur, struct btrfs_ordered_sum, list); 285 sum = list_entry(cur, struct btrfs_ordered_sum, list);
288 list_del(&sum->list); 286 list_del(&sum->list);
@@ -432,11 +430,10 @@ again:
432 orig_end >> PAGE_CACHE_SHIFT); 430 orig_end >> PAGE_CACHE_SHIFT);
433 431
434 end = orig_end; 432 end = orig_end;
435 while(1) { 433 while (1) {
436 ordered = btrfs_lookup_first_ordered_extent(inode, end); 434 ordered = btrfs_lookup_first_ordered_extent(inode, end);
437 if (!ordered) { 435 if (!ordered)
438 break; 436 break;
439 }
440 if (ordered->file_offset > orig_end) { 437 if (ordered->file_offset > orig_end) {
441 btrfs_put_ordered_extent(ordered); 438 btrfs_put_ordered_extent(ordered);
442 break; 439 break;
@@ -492,7 +489,7 @@ out:
492 * if none is found 489 * if none is found
493 */ 490 */
494struct btrfs_ordered_extent * 491struct btrfs_ordered_extent *
495btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) 492btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
496{ 493{
497 struct btrfs_ordered_inode_tree *tree; 494 struct btrfs_ordered_inode_tree *tree;
498 struct rb_node *node; 495 struct rb_node *node;
@@ -553,7 +550,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
553 * yet 550 * yet
554 */ 551 */
555 node = &ordered->rb_node; 552 node = &ordered->rb_node;
556 while(1) { 553 while (1) {
557 node = rb_prev(node); 554 node = rb_prev(node);
558 if (!node) 555 if (!node)
559 break; 556 break;
@@ -581,9 +578,8 @@ int btrfs_ordered_update_i_size(struct inode *inode,
581 * between our ordered extent and the next one. 578 * between our ordered extent and the next one.
582 */ 579 */
583 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 580 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
584 if (test->file_offset > entry_end(ordered)) { 581 if (test->file_offset > entry_end(ordered))
585 i_size_test = test->file_offset; 582 i_size_test = test->file_offset;
586 }
587 } else { 583 } else {
588 i_size_test = i_size_read(inode); 584 i_size_test = i_size_read(inode);
589 } 585 }
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 64725c13aa11..5f8f218c1005 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -24,13 +24,14 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
24{ 24{
25 int num_stripes = btrfs_chunk_num_stripes(eb, chunk); 25 int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
26 int i; 26 int i;
27 printk("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n", 27 printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu "
28 "num_stripes %d\n",
28 (unsigned long long)btrfs_chunk_length(eb, chunk), 29 (unsigned long long)btrfs_chunk_length(eb, chunk),
29 (unsigned long long)btrfs_chunk_owner(eb, chunk), 30 (unsigned long long)btrfs_chunk_owner(eb, chunk),
30 (unsigned long long)btrfs_chunk_type(eb, chunk), 31 (unsigned long long)btrfs_chunk_type(eb, chunk),
31 num_stripes); 32 num_stripes);
32 for (i = 0 ; i < num_stripes ; i++) { 33 for (i = 0 ; i < num_stripes ; i++) {
33 printk("\t\t\tstripe %d devid %llu offset %llu\n", i, 34 printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i,
34 (unsigned long long)btrfs_stripe_devid_nr(eb, chunk, i), 35 (unsigned long long)btrfs_stripe_devid_nr(eb, chunk, i),
35 (unsigned long long)btrfs_stripe_offset_nr(eb, chunk, i)); 36 (unsigned long long)btrfs_stripe_offset_nr(eb, chunk, i));
36 } 37 }
@@ -38,8 +39,8 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
38static void print_dev_item(struct extent_buffer *eb, 39static void print_dev_item(struct extent_buffer *eb,
39 struct btrfs_dev_item *dev_item) 40 struct btrfs_dev_item *dev_item)
40{ 41{
41 printk("\t\tdev item devid %llu " 42 printk(KERN_INFO "\t\tdev item devid %llu "
42 "total_bytes %llu bytes used %Lu\n", 43 "total_bytes %llu bytes used %llu\n",
43 (unsigned long long)btrfs_device_id(eb, dev_item), 44 (unsigned long long)btrfs_device_id(eb, dev_item),
44 (unsigned long long)btrfs_device_total_bytes(eb, dev_item), 45 (unsigned long long)btrfs_device_total_bytes(eb, dev_item),
45 (unsigned long long)btrfs_device_bytes_used(eb, dev_item)); 46 (unsigned long long)btrfs_device_bytes_used(eb, dev_item));
@@ -61,14 +62,15 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
61 struct btrfs_dev_extent *dev_extent; 62 struct btrfs_dev_extent *dev_extent;
62 u32 type; 63 u32 type;
63 64
64 printk("leaf %llu total ptrs %d free space %d\n", 65 printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n",
65 (unsigned long long)btrfs_header_bytenr(l), nr, 66 (unsigned long long)btrfs_header_bytenr(l), nr,
66 btrfs_leaf_free_space(root, l)); 67 btrfs_leaf_free_space(root, l));
67 for (i = 0 ; i < nr ; i++) { 68 for (i = 0 ; i < nr ; i++) {
68 item = btrfs_item_nr(l, i); 69 item = btrfs_item_nr(l, i);
69 btrfs_item_key_to_cpu(l, &key, i); 70 btrfs_item_key_to_cpu(l, &key, i);
70 type = btrfs_key_type(&key); 71 type = btrfs_key_type(&key);
71 printk("\titem %d key (%llu %x %llu) itemoff %d itemsize %d\n", 72 printk(KERN_INFO "\titem %d key (%llu %x %llu) itemoff %d "
73 "itemsize %d\n",
72 i, 74 i,
73 (unsigned long long)key.objectid, type, 75 (unsigned long long)key.objectid, type,
74 (unsigned long long)key.offset, 76 (unsigned long long)key.offset,
@@ -76,33 +78,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
76 switch (type) { 78 switch (type) {
77 case BTRFS_INODE_ITEM_KEY: 79 case BTRFS_INODE_ITEM_KEY:
78 ii = btrfs_item_ptr(l, i, struct btrfs_inode_item); 80 ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
79 printk("\t\tinode generation %llu size %llu mode %o\n", 81 printk(KERN_INFO "\t\tinode generation %llu size %llu "
80 (unsigned long long)btrfs_inode_generation(l, ii), 82 "mode %o\n",
83 (unsigned long long)
84 btrfs_inode_generation(l, ii),
81 (unsigned long long)btrfs_inode_size(l, ii), 85 (unsigned long long)btrfs_inode_size(l, ii),
82 btrfs_inode_mode(l, ii)); 86 btrfs_inode_mode(l, ii));
83 break; 87 break;
84 case BTRFS_DIR_ITEM_KEY: 88 case BTRFS_DIR_ITEM_KEY:
85 di = btrfs_item_ptr(l, i, struct btrfs_dir_item); 89 di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
86 btrfs_dir_item_key_to_cpu(l, di, &found_key); 90 btrfs_dir_item_key_to_cpu(l, di, &found_key);
87 printk("\t\tdir oid %llu type %u\n", 91 printk(KERN_INFO "\t\tdir oid %llu type %u\n",
88 (unsigned long long)found_key.objectid, 92 (unsigned long long)found_key.objectid,
89 btrfs_dir_type(l, di)); 93 btrfs_dir_type(l, di));
90 break; 94 break;
91 case BTRFS_ROOT_ITEM_KEY: 95 case BTRFS_ROOT_ITEM_KEY:
92 ri = btrfs_item_ptr(l, i, struct btrfs_root_item); 96 ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
93 printk("\t\troot data bytenr %llu refs %u\n", 97 printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n",
94 (unsigned long long)btrfs_disk_root_bytenr(l, ri), 98 (unsigned long long)
99 btrfs_disk_root_bytenr(l, ri),
95 btrfs_disk_root_refs(l, ri)); 100 btrfs_disk_root_refs(l, ri));
96 break; 101 break;
97 case BTRFS_EXTENT_ITEM_KEY: 102 case BTRFS_EXTENT_ITEM_KEY:
98 ei = btrfs_item_ptr(l, i, struct btrfs_extent_item); 103 ei = btrfs_item_ptr(l, i, struct btrfs_extent_item);
99 printk("\t\textent data refs %u\n", 104 printk(KERN_INFO "\t\textent data refs %u\n",
100 btrfs_extent_refs(l, ei)); 105 btrfs_extent_refs(l, ei));
101 break; 106 break;
102 case BTRFS_EXTENT_REF_KEY: 107 case BTRFS_EXTENT_REF_KEY:
103 ref = btrfs_item_ptr(l, i, struct btrfs_extent_ref); 108 ref = btrfs_item_ptr(l, i, struct btrfs_extent_ref);
104 printk("\t\textent back ref root %llu gen %llu " 109 printk(KERN_INFO "\t\textent back ref root %llu "
105 "owner %llu num_refs %lu\n", 110 "gen %llu owner %llu num_refs %lu\n",
106 (unsigned long long)btrfs_ref_root(l, ref), 111 (unsigned long long)btrfs_ref_root(l, ref),
107 (unsigned long long)btrfs_ref_generation(l, ref), 112 (unsigned long long)btrfs_ref_generation(l, ref),
108 (unsigned long long)btrfs_ref_objectid(l, ref), 113 (unsigned long long)btrfs_ref_objectid(l, ref),
@@ -114,26 +119,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
114 struct btrfs_file_extent_item); 119 struct btrfs_file_extent_item);
115 if (btrfs_file_extent_type(l, fi) == 120 if (btrfs_file_extent_type(l, fi) ==
116 BTRFS_FILE_EXTENT_INLINE) { 121 BTRFS_FILE_EXTENT_INLINE) {
117 printk("\t\tinline extent data size %u\n", 122 printk(KERN_INFO "\t\tinline extent data "
118 btrfs_file_extent_inline_len(l, fi)); 123 "size %u\n",
124 btrfs_file_extent_inline_len(l, fi));
119 break; 125 break;
120 } 126 }
121 printk("\t\textent data disk bytenr %llu nr %llu\n", 127 printk(KERN_INFO "\t\textent data disk bytenr %llu "
122 (unsigned long long)btrfs_file_extent_disk_bytenr(l, fi), 128 "nr %llu\n",
123 (unsigned long long)btrfs_file_extent_disk_num_bytes(l, fi)); 129 (unsigned long long)
124 printk("\t\textent data offset %llu nr %llu ram %llu\n", 130 btrfs_file_extent_disk_bytenr(l, fi),
125 (unsigned long long)btrfs_file_extent_offset(l, fi), 131 (unsigned long long)
126 (unsigned long long)btrfs_file_extent_num_bytes(l, fi), 132 btrfs_file_extent_disk_num_bytes(l, fi));
127 (unsigned long long)btrfs_file_extent_ram_bytes(l, fi)); 133 printk(KERN_INFO "\t\textent data offset %llu "
134 "nr %llu ram %llu\n",
135 (unsigned long long)
136 btrfs_file_extent_offset(l, fi),
137 (unsigned long long)
138 btrfs_file_extent_num_bytes(l, fi),
139 (unsigned long long)
140 btrfs_file_extent_ram_bytes(l, fi));
128 break; 141 break;
129 case BTRFS_BLOCK_GROUP_ITEM_KEY: 142 case BTRFS_BLOCK_GROUP_ITEM_KEY:
130 bi = btrfs_item_ptr(l, i, 143 bi = btrfs_item_ptr(l, i,
131 struct btrfs_block_group_item); 144 struct btrfs_block_group_item);
132 printk("\t\tblock group used %llu\n", 145 printk(KERN_INFO "\t\tblock group used %llu\n",
133 (unsigned long long)btrfs_disk_block_group_used(l, bi)); 146 (unsigned long long)
147 btrfs_disk_block_group_used(l, bi));
134 break; 148 break;
135 case BTRFS_CHUNK_ITEM_KEY: 149 case BTRFS_CHUNK_ITEM_KEY:
136 print_chunk(l, btrfs_item_ptr(l, i, struct btrfs_chunk)); 150 print_chunk(l, btrfs_item_ptr(l, i,
151 struct btrfs_chunk));
137 break; 152 break;
138 case BTRFS_DEV_ITEM_KEY: 153 case BTRFS_DEV_ITEM_KEY:
139 print_dev_item(l, btrfs_item_ptr(l, i, 154 print_dev_item(l, btrfs_item_ptr(l, i,
@@ -142,7 +157,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
142 case BTRFS_DEV_EXTENT_KEY: 157 case BTRFS_DEV_EXTENT_KEY:
143 dev_extent = btrfs_item_ptr(l, i, 158 dev_extent = btrfs_item_ptr(l, i,
144 struct btrfs_dev_extent); 159 struct btrfs_dev_extent);
145 printk("\t\tdev extent chunk_tree %llu\n" 160 printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n"
146 "\t\tchunk objectid %llu chunk offset %llu " 161 "\t\tchunk objectid %llu chunk offset %llu "
147 "length %llu\n", 162 "length %llu\n",
148 (unsigned long long) 163 (unsigned long long)
@@ -171,13 +186,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
171 btrfs_print_leaf(root, c); 186 btrfs_print_leaf(root, c);
172 return; 187 return;
173 } 188 }
174 printk("node %llu level %d total ptrs %d free spc %u\n", 189 printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
175 (unsigned long long)btrfs_header_bytenr(c), 190 (unsigned long long)btrfs_header_bytenr(c),
176 btrfs_header_level(c), nr, 191 btrfs_header_level(c), nr,
177 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); 192 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
178 for (i = 0; i < nr; i++) { 193 for (i = 0; i < nr; i++) {
179 btrfs_node_key_to_cpu(c, &key, i); 194 btrfs_node_key_to_cpu(c, &key, i);
180 printk("\tkey %d (%llu %u %llu) block %llu\n", 195 printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n",
181 i, 196 i,
182 (unsigned long long)key.objectid, 197 (unsigned long long)key.objectid,
183 key.type, 198 key.type,
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
index a50ebb67055d..6f0acc4c9eab 100644
--- a/fs/btrfs/ref-cache.c
+++ b/fs/btrfs/ref-cache.c
@@ -74,11 +74,11 @@ void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
74static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 74static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
75 struct rb_node *node) 75 struct rb_node *node)
76{ 76{
77 struct rb_node ** p = &root->rb_node; 77 struct rb_node **p = &root->rb_node;
78 struct rb_node * parent = NULL; 78 struct rb_node *parent = NULL;
79 struct btrfs_leaf_ref *entry; 79 struct btrfs_leaf_ref *entry;
80 80
81 while(*p) { 81 while (*p) {
82 parent = *p; 82 parent = *p;
83 entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node); 83 entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
84 84
@@ -98,10 +98,10 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
98 98
99static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 99static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
100{ 100{
101 struct rb_node * n = root->rb_node; 101 struct rb_node *n = root->rb_node;
102 struct btrfs_leaf_ref *entry; 102 struct btrfs_leaf_ref *entry;
103 103
104 while(n) { 104 while (n) {
105 entry = rb_entry(n, struct btrfs_leaf_ref, rb_node); 105 entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
106 WARN_ON(!entry->in_tree); 106 WARN_ON(!entry->in_tree);
107 107
@@ -127,7 +127,7 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
127 return 0; 127 return 0;
128 128
129 spin_lock(&tree->lock); 129 spin_lock(&tree->lock);
130 while(!list_empty(&tree->list)) { 130 while (!list_empty(&tree->list)) {
131 ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); 131 ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
132 BUG_ON(ref->tree != tree); 132 BUG_ON(ref->tree != tree);
133 if (ref->root_gen > max_root_gen) 133 if (ref->root_gen > max_root_gen)
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index f99335a999d6..b48650de4472 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -132,8 +132,9 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
132 132
133 if (ret != 0) { 133 if (ret != 0) {
134 btrfs_print_leaf(root, path->nodes[0]); 134 btrfs_print_leaf(root, path->nodes[0]);
135 printk("unable to update root key %Lu %u %Lu\n", 135 printk(KERN_CRIT "unable to update root key %llu %u %llu\n",
136 key->objectid, key->type, key->offset); 136 (unsigned long long)key->objectid, key->type,
137 (unsigned long long)key->offset);
137 BUG_ON(1); 138 BUG_ON(1);
138 } 139 }
139 140
@@ -159,9 +160,9 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
159 160
160/* 161/*
161 * at mount time we want to find all the old transaction snapshots that were in 162 * at mount time we want to find all the old transaction snapshots that were in
162 * the process of being deleted if we crashed. This is any root item with an offset 163 * the process of being deleted if we crashed. This is any root item with an
163 * lower than the latest root. They need to be queued for deletion to finish 164 * offset lower than the latest root. They need to be queued for deletion to
164 * what was happening when we crashed. 165 * finish what was happening when we crashed.
165 */ 166 */
166int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid, 167int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid,
167 struct btrfs_root *latest) 168 struct btrfs_root *latest)
@@ -188,7 +189,7 @@ again:
188 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 189 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
189 if (ret < 0) 190 if (ret < 0)
190 goto err; 191 goto err;
191 while(1) { 192 while (1) {
192 leaf = path->nodes[0]; 193 leaf = path->nodes[0];
193 nritems = btrfs_header_nritems(leaf); 194 nritems = btrfs_header_nritems(leaf);
194 slot = path->slots[0]; 195 slot = path->slots[0];
@@ -258,11 +259,7 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
258 ret = btrfs_search_slot(trans, root, key, path, -1, 1); 259 ret = btrfs_search_slot(trans, root, key, path, -1, 1);
259 if (ret < 0) 260 if (ret < 0)
260 goto out; 261 goto out;
261 if (ret) {
262btrfs_print_leaf(root, path->nodes[0]);
263printk("failed to del %Lu %u %Lu\n", key->objectid, key->type, key->offset);
264 262
265 }
266 BUG_ON(ret != 0); 263 BUG_ON(ret != 0);
267 leaf = path->nodes[0]; 264 leaf = path->nodes[0];
268 ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item); 265 ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index 8d7f568009c9..c0f7ecaf1e79 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -66,7 +66,7 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
66 unsigned long map_len; \ 66 unsigned long map_len; \
67 u##bits res; \ 67 u##bits res; \
68 err = map_extent_buffer(eb, offset, \ 68 err = map_extent_buffer(eb, offset, \
69 sizeof(((type *)0)->member), \ 69 sizeof(((type *)0)->member), \
70 &map_token, &kaddr, \ 70 &map_token, &kaddr, \
71 &map_start, &map_len, KM_USER1); \ 71 &map_start, &map_len, KM_USER1); \
72 if (err) { \ 72 if (err) { \
@@ -103,7 +103,7 @@ void btrfs_set_##name(struct extent_buffer *eb, \
103 unsigned long map_start; \ 103 unsigned long map_start; \
104 unsigned long map_len; \ 104 unsigned long map_len; \
105 err = map_extent_buffer(eb, offset, \ 105 err = map_extent_buffer(eb, offset, \
106 sizeof(((type *)0)->member), \ 106 sizeof(((type *)0)->member), \
107 &map_token, &kaddr, \ 107 &map_token, &kaddr, \
108 &map_start, &map_len, KM_USER1); \ 108 &map_start, &map_len, KM_USER1); \
109 if (err) { \ 109 if (err) { \
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index ccdcb7bb7ad8..b4c101d9322c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -55,18 +55,12 @@
55 55
56static struct super_operations btrfs_super_ops; 56static struct super_operations btrfs_super_ops;
57 57
58static void btrfs_put_super (struct super_block * sb) 58static void btrfs_put_super(struct super_block *sb)
59{ 59{
60 struct btrfs_root *root = btrfs_sb(sb); 60 struct btrfs_root *root = btrfs_sb(sb);
61 int ret; 61 int ret;
62 62
63 ret = close_ctree(root); 63 ret = close_ctree(root);
64 if (ret) {
65 printk("close ctree returns %d\n", ret);
66 }
67#if 0
68 btrfs_sysfs_del_super(root->fs_info);
69#endif
70 sb->s_fs_info = NULL; 64 sb->s_fs_info = NULL;
71} 65}
72 66
@@ -299,12 +293,12 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
299 return error; 293 return error;
300} 294}
301 295
302static int btrfs_fill_super(struct super_block * sb, 296static int btrfs_fill_super(struct super_block *sb,
303 struct btrfs_fs_devices *fs_devices, 297 struct btrfs_fs_devices *fs_devices,
304 void * data, int silent) 298 void *data, int silent)
305{ 299{
306 struct inode * inode; 300 struct inode *inode;
307 struct dentry * root_dentry; 301 struct dentry *root_dentry;
308 struct btrfs_super_block *disk_super; 302 struct btrfs_super_block *disk_super;
309 struct btrfs_root *tree_root; 303 struct btrfs_root *tree_root;
310 struct btrfs_inode *bi; 304 struct btrfs_inode *bi;
@@ -479,8 +473,10 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
479 root = dget(s->s_root); 473 root = dget(s->s_root);
480 else { 474 else {
481 mutex_lock(&s->s_root->d_inode->i_mutex); 475 mutex_lock(&s->s_root->d_inode->i_mutex);
482 root = lookup_one_len(subvol_name, s->s_root, strlen(subvol_name)); 476 root = lookup_one_len(subvol_name, s->s_root,
477 strlen(subvol_name));
483 mutex_unlock(&s->s_root->d_inode->i_mutex); 478 mutex_unlock(&s->s_root->d_inode->i_mutex);
479
484 if (IS_ERR(root)) { 480 if (IS_ERR(root)) {
485 up_write(&s->s_umount); 481 up_write(&s->s_umount);
486 deactivate_super(s); 482 deactivate_super(s);
@@ -557,8 +553,9 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
557 buf->f_bavail = buf->f_bfree; 553 buf->f_bavail = buf->f_bfree;
558 buf->f_bsize = dentry->d_sb->s_blocksize; 554 buf->f_bsize = dentry->d_sb->s_blocksize;
559 buf->f_type = BTRFS_SUPER_MAGIC; 555 buf->f_type = BTRFS_SUPER_MAGIC;
556
560 /* We treat it as constant endianness (it doesn't matter _which_) 557 /* We treat it as constant endianness (it doesn't matter _which_)
561 because we want the fsid to come out the same whether mounted 558 because we want the fsid to come out the same whether mounted
562 on a big-endian or little-endian host */ 559 on a big-endian or little-endian host */
563 buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); 560 buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
564 buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); 561 buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
@@ -658,7 +655,7 @@ static int btrfs_interface_init(void)
658static void btrfs_interface_exit(void) 655static void btrfs_interface_exit(void)
659{ 656{
660 if (misc_deregister(&btrfs_misc) < 0) 657 if (misc_deregister(&btrfs_misc) < 0)
661 printk("misc_deregister failed for control device"); 658 printk(KERN_INFO "misc_deregister failed for control device");
662} 659}
663 660
664static int __init init_btrfs_fs(void) 661static int __init init_btrfs_fs(void)
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 04087c020845..a240b6fa81df 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -67,7 +67,8 @@ struct btrfs_root_attr {
67}; 67};
68 68
69#define ROOT_ATTR(name, mode, show, store) \ 69#define ROOT_ATTR(name, mode, show, store) \
70static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, show, store) 70static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
71 show, store)
71 72
72ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL); 73ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL);
73ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL); 74ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL);
@@ -86,7 +87,8 @@ struct btrfs_super_attr {
86}; 87};
87 88
88#define SUPER_ATTR(name, mode, show, store) \ 89#define SUPER_ATTR(name, mode, show, store) \
89static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, show, store) 90static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
91 show, store)
90 92
91SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL); 93SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL);
92SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL); 94SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4e7b56e9d3a5..56ab1f5ea11b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,9 +28,6 @@
28#include "ref-cache.h" 28#include "ref-cache.h"
29#include "tree-log.h" 29#include "tree-log.h"
30 30
31extern struct kmem_cache *btrfs_trans_handle_cachep;
32extern struct kmem_cache *btrfs_transaction_cachep;
33
34#define BTRFS_ROOT_TRANS_TAG 0 31#define BTRFS_ROOT_TRANS_TAG 0
35 32
36static noinline void put_transaction(struct btrfs_transaction *transaction) 33static noinline void put_transaction(struct btrfs_transaction *transaction)
@@ -85,10 +82,10 @@ static noinline int join_transaction(struct btrfs_root *root)
85} 82}
86 83
87/* 84/*
88 * this does all the record keeping required to make sure that a 85 * this does all the record keeping required to make sure that a reference
89 * reference counted root is properly recorded in a given transaction. 86 * counted root is properly recorded in a given transaction. This is required
90 * This is required to make sure the old root from before we joined the transaction 87 * to make sure the old root from before we joined the transaction is deleted
91 * is deleted when the transaction commits 88 * when the transaction commits
92 */ 89 */
93noinline int btrfs_record_root_in_trans(struct btrfs_root *root) 90noinline int btrfs_record_root_in_trans(struct btrfs_root *root)
94{ 91{
@@ -144,7 +141,7 @@ static void wait_current_trans(struct btrfs_root *root)
144 if (cur_trans && cur_trans->blocked) { 141 if (cur_trans && cur_trans->blocked) {
145 DEFINE_WAIT(wait); 142 DEFINE_WAIT(wait);
146 cur_trans->use_count++; 143 cur_trans->use_count++;
147 while(1) { 144 while (1) {
148 prepare_to_wait(&root->fs_info->transaction_wait, &wait, 145 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
149 TASK_UNINTERRUPTIBLE); 146 TASK_UNINTERRUPTIBLE);
150 if (cur_trans->blocked) { 147 if (cur_trans->blocked) {
@@ -213,7 +210,7 @@ static noinline int wait_for_commit(struct btrfs_root *root,
213{ 210{
214 DEFINE_WAIT(wait); 211 DEFINE_WAIT(wait);
215 mutex_lock(&root->fs_info->trans_mutex); 212 mutex_lock(&root->fs_info->trans_mutex);
216 while(!commit->commit_done) { 213 while (!commit->commit_done) {
217 prepare_to_wait(&commit->commit_wait, &wait, 214 prepare_to_wait(&commit->commit_wait, &wait,
218 TASK_UNINTERRUPTIBLE); 215 TASK_UNINTERRUPTIBLE);
219 if (commit->commit_done) 216 if (commit->commit_done)
@@ -228,8 +225,8 @@ static noinline int wait_for_commit(struct btrfs_root *root,
228} 225}
229 226
230/* 227/*
231 * rate limit against the drop_snapshot code. This helps to slow down new operations 228 * rate limit against the drop_snapshot code. This helps to slow down new
232 * if the drop_snapshot code isn't able to keep up. 229 * operations if the drop_snapshot code isn't able to keep up.
233 */ 230 */
234static void throttle_on_drops(struct btrfs_root *root) 231static void throttle_on_drops(struct btrfs_root *root)
235{ 232{
@@ -332,12 +329,12 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
332 u64 end; 329 u64 end;
333 unsigned long index; 330 unsigned long index;
334 331
335 while(1) { 332 while (1) {
336 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 333 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
337 EXTENT_DIRTY); 334 EXTENT_DIRTY);
338 if (ret) 335 if (ret)
339 break; 336 break;
340 while(start <= end) { 337 while (start <= end) {
341 cond_resched(); 338 cond_resched();
342 339
343 index = start >> PAGE_CACHE_SHIFT; 340 index = start >> PAGE_CACHE_SHIFT;
@@ -368,14 +365,14 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
368 page_cache_release(page); 365 page_cache_release(page);
369 } 366 }
370 } 367 }
371 while(1) { 368 while (1) {
372 ret = find_first_extent_bit(dirty_pages, 0, &start, &end, 369 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
373 EXTENT_DIRTY); 370 EXTENT_DIRTY);
374 if (ret) 371 if (ret)
375 break; 372 break;
376 373
377 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS); 374 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
378 while(start <= end) { 375 while (start <= end) {
379 index = start >> PAGE_CACHE_SHIFT; 376 index = start >> PAGE_CACHE_SHIFT;
380 start = (u64)(index + 1) << PAGE_CACHE_SHIFT; 377 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
381 page = find_get_page(btree_inode->i_mapping, index); 378 page = find_get_page(btree_inode->i_mapping, index);
@@ -431,7 +428,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
431 btrfs_write_dirty_block_groups(trans, root); 428 btrfs_write_dirty_block_groups(trans, root);
432 btrfs_extent_post_op(trans, root); 429 btrfs_extent_post_op(trans, root);
433 430
434 while(1) { 431 while (1) {
435 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 432 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
436 if (old_root_bytenr == root->node->start) 433 if (old_root_bytenr == root->node->start)
437 break; 434 break;
@@ -472,7 +469,7 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
472 469
473 btrfs_extent_post_op(trans, fs_info->tree_root); 470 btrfs_extent_post_op(trans, fs_info->tree_root);
474 471
475 while(!list_empty(&fs_info->dirty_cowonly_roots)) { 472 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
476 next = fs_info->dirty_cowonly_roots.next; 473 next = fs_info->dirty_cowonly_roots.next;
477 list_del_init(next); 474 list_del_init(next);
478 root = list_entry(next, struct btrfs_root, dirty_list); 475 root = list_entry(next, struct btrfs_root, dirty_list);
@@ -521,7 +518,7 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
521 int err = 0; 518 int err = 0;
522 u32 refs; 519 u32 refs;
523 520
524 while(1) { 521 while (1) {
525 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0, 522 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
526 ARRAY_SIZE(gang), 523 ARRAY_SIZE(gang),
527 BTRFS_ROOT_TRANS_TAG); 524 BTRFS_ROOT_TRANS_TAG);
@@ -653,7 +650,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
653 int ret = 0; 650 int ret = 0;
654 int err; 651 int err;
655 652
656 while(!list_empty(list)) { 653 while (!list_empty(list)) {
657 struct btrfs_root *root; 654 struct btrfs_root *root;
658 655
659 dirty = list_entry(list->prev, struct btrfs_dirty_root, list); 656 dirty = list_entry(list->prev, struct btrfs_dirty_root, list);
@@ -663,13 +660,12 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
663 root = dirty->latest_root; 660 root = dirty->latest_root;
664 atomic_inc(&root->fs_info->throttles); 661 atomic_inc(&root->fs_info->throttles);
665 662
666 while(1) { 663 while (1) {
667 trans = btrfs_start_transaction(tree_root, 1); 664 trans = btrfs_start_transaction(tree_root, 1);
668 mutex_lock(&root->fs_info->drop_mutex); 665 mutex_lock(&root->fs_info->drop_mutex);
669 ret = btrfs_drop_snapshot(trans, dirty->root); 666 ret = btrfs_drop_snapshot(trans, dirty->root);
670 if (ret != -EAGAIN) { 667 if (ret != -EAGAIN)
671 break; 668 break;
672 }
673 mutex_unlock(&root->fs_info->drop_mutex); 669 mutex_unlock(&root->fs_info->drop_mutex);
674 670
675 err = btrfs_update_root(trans, 671 err = btrfs_update_root(trans,
@@ -874,7 +870,7 @@ static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans,
874 struct list_head *head = &trans->transaction->pending_snapshots; 870 struct list_head *head = &trans->transaction->pending_snapshots;
875 int ret; 871 int ret;
876 872
877 while(!list_empty(head)) { 873 while (!list_empty(head)) {
878 pending = list_entry(head->next, 874 pending = list_entry(head->next,
879 struct btrfs_pending_snapshot, list); 875 struct btrfs_pending_snapshot, list);
880 ret = finish_pending_snapshot(fs_info, pending); 876 ret = finish_pending_snapshot(fs_info, pending);
@@ -1076,9 +1072,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1076 1072
1077 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1073 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1078 1074
1079 if (root->fs_info->closing) { 1075 if (root->fs_info->closing)
1080 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots); 1076 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
1081 }
1082 return ret; 1077 return ret;
1083} 1078}
1084 1079
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index ffe7f639732b..ea292117f882 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -66,9 +66,9 @@ static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
66 trans->block_group = BTRFS_I(inode)->block_group; 66 trans->block_group = BTRFS_I(inode)->block_group;
67} 67}
68 68
69static inline void btrfs_update_inode_block_group(struct 69static inline void btrfs_update_inode_block_group(
70 btrfs_trans_handle *trans, 70 struct btrfs_trans_handle *trans,
71 struct inode *inode) 71 struct inode *inode)
72{ 72{
73 BTRFS_I(inode)->block_group = trans->block_group; 73 BTRFS_I(inode)->block_group = trans->block_group;
74} 74}
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index a6a3956cedfb..3e8358c36165 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -23,10 +23,11 @@
23#include "transaction.h" 23#include "transaction.h"
24#include "locking.h" 24#include "locking.h"
25 25
26/* defrag all the leaves in a given btree. If cache_only == 1, don't read things 26/* defrag all the leaves in a given btree. If cache_only == 1, don't read
27 * from disk, otherwise read all the leaves and try to get key order to 27 * things from disk, otherwise read all the leaves and try to get key order to
28 * better reflect disk order 28 * better reflect disk order
29 */ 29 */
30
30int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 31int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
31 struct btrfs_root *root, int cache_only) 32 struct btrfs_root *root, int cache_only)
32{ 33{
@@ -65,9 +66,9 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
65 level = btrfs_header_level(root->node); 66 level = btrfs_header_level(root->node);
66 orig_level = level; 67 orig_level = level;
67 68
68 if (level == 0) { 69 if (level == 0)
69 goto out; 70 goto out;
70 } 71
71 if (root->defrag_progress.objectid == 0) { 72 if (root->defrag_progress.objectid == 0) {
72 struct extent_buffer *root_node; 73 struct extent_buffer *root_node;
73 u32 nritems; 74 u32 nritems;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index b1c2921f5bef..3a72a1b6c247 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -829,7 +829,7 @@ conflict_again:
829 */ 829 */
830 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 830 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
831 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 831 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
832 while(ptr < ptr_end) { 832 while (ptr < ptr_end) {
833 victim_ref = (struct btrfs_inode_ref *)ptr; 833 victim_ref = (struct btrfs_inode_ref *)ptr;
834 victim_name_len = btrfs_inode_ref_name_len(leaf, 834 victim_name_len = btrfs_inode_ref_name_len(leaf,
835 victim_ref); 835 victim_ref);
@@ -938,9 +938,8 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans,
938 938
939 file_bytes = (item_size / csum_size) * root->sectorsize; 939 file_bytes = (item_size / csum_size) * root->sectorsize;
940 sums = kzalloc(btrfs_ordered_sum_size(root, file_bytes), GFP_NOFS); 940 sums = kzalloc(btrfs_ordered_sum_size(root, file_bytes), GFP_NOFS);
941 if (!sums) { 941 if (!sums)
942 return -ENOMEM; 942 return -ENOMEM;
943 }
944 943
945 INIT_LIST_HEAD(&sums->list); 944 INIT_LIST_HEAD(&sums->list);
946 sums->len = file_bytes; 945 sums->len = file_bytes;
@@ -952,7 +951,7 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans,
952 sector_sum = sums->sums; 951 sector_sum = sums->sums;
953 cur_offset = key->offset; 952 cur_offset = key->offset;
954 ptr = btrfs_item_ptr_offset(eb, slot); 953 ptr = btrfs_item_ptr_offset(eb, slot);
955 while(item_size > 0) { 954 while (item_size > 0) {
956 sector_sum->bytenr = cur_offset; 955 sector_sum->bytenr = cur_offset;
957 read_extent_buffer(eb, &sector_sum->sum, ptr, csum_size); 956 read_extent_buffer(eb, &sector_sum->sum, ptr, csum_size);
958 sector_sum++; 957 sector_sum++;
@@ -995,7 +994,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
995 994
996 path = btrfs_alloc_path(); 995 path = btrfs_alloc_path();
997 996
998 while(1) { 997 while (1) {
999 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 998 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1000 if (ret < 0) 999 if (ret < 0)
1001 break; 1000 break;
@@ -1012,7 +1011,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1012 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1011 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1013 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1012 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1014 path->slots[0]); 1013 path->slots[0]);
1015 while(ptr < ptr_end) { 1014 while (ptr < ptr_end) {
1016 struct btrfs_inode_ref *ref; 1015 struct btrfs_inode_ref *ref;
1017 1016
1018 ref = (struct btrfs_inode_ref *)ptr; 1017 ref = (struct btrfs_inode_ref *)ptr;
@@ -1048,7 +1047,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1048 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1047 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1049 key.type = BTRFS_ORPHAN_ITEM_KEY; 1048 key.type = BTRFS_ORPHAN_ITEM_KEY;
1050 key.offset = (u64)-1; 1049 key.offset = (u64)-1;
1051 while(1) { 1050 while (1) {
1052 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1051 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1053 if (ret < 0) 1052 if (ret < 0)
1054 break; 1053 break;
@@ -1206,8 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1206 if (key->type == BTRFS_DIR_ITEM_KEY) { 1205 if (key->type == BTRFS_DIR_ITEM_KEY) {
1207 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1206 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1208 name, name_len, 1); 1207 name, name_len, 1);
1209 } 1208 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1210 else if (key->type == BTRFS_DIR_INDEX_KEY) {
1211 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1209 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1212 key->objectid, 1210 key->objectid,
1213 key->offset, name, 1211 key->offset, name,
@@ -1282,7 +1280,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1282 1280
1283 ptr = btrfs_item_ptr_offset(eb, slot); 1281 ptr = btrfs_item_ptr_offset(eb, slot);
1284 ptr_end = ptr + item_size; 1282 ptr_end = ptr + item_size;
1285 while(ptr < ptr_end) { 1283 while (ptr < ptr_end) {
1286 di = (struct btrfs_dir_item *)ptr; 1284 di = (struct btrfs_dir_item *)ptr;
1287 name_len = btrfs_dir_name_len(eb, di); 1285 name_len = btrfs_dir_name_len(eb, di);
1288 ret = replay_one_name(trans, root, path, eb, di, key); 1286 ret = replay_one_name(trans, root, path, eb, di, key);
@@ -1408,7 +1406,7 @@ again:
1408 item_size = btrfs_item_size_nr(eb, slot); 1406 item_size = btrfs_item_size_nr(eb, slot);
1409 ptr = btrfs_item_ptr_offset(eb, slot); 1407 ptr = btrfs_item_ptr_offset(eb, slot);
1410 ptr_end = ptr + item_size; 1408 ptr_end = ptr + item_size;
1411 while(ptr < ptr_end) { 1409 while (ptr < ptr_end) {
1412 di = (struct btrfs_dir_item *)ptr; 1410 di = (struct btrfs_dir_item *)ptr;
1413 name_len = btrfs_dir_name_len(eb, di); 1411 name_len = btrfs_dir_name_len(eb, di);
1414 name = kmalloc(name_len, GFP_NOFS); 1412 name = kmalloc(name_len, GFP_NOFS);
@@ -1513,14 +1511,14 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1513again: 1511again:
1514 range_start = 0; 1512 range_start = 0;
1515 range_end = 0; 1513 range_end = 0;
1516 while(1) { 1514 while (1) {
1517 ret = find_dir_range(log, path, dirid, key_type, 1515 ret = find_dir_range(log, path, dirid, key_type,
1518 &range_start, &range_end); 1516 &range_start, &range_end);
1519 if (ret != 0) 1517 if (ret != 0)
1520 break; 1518 break;
1521 1519
1522 dir_key.offset = range_start; 1520 dir_key.offset = range_start;
1523 while(1) { 1521 while (1) {
1524 int nritems; 1522 int nritems;
1525 ret = btrfs_search_slot(NULL, root, &dir_key, path, 1523 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1526 0, 0); 1524 0, 0);
@@ -1676,7 +1674,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1676 return 0; 1674 return 0;
1677} 1675}
1678 1676
1679static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, 1677static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1680 struct btrfs_root *root, 1678 struct btrfs_root *root,
1681 struct btrfs_path *path, int *level, 1679 struct btrfs_path *path, int *level,
1682 struct walk_control *wc) 1680 struct walk_control *wc)
@@ -1694,7 +1692,7 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
1694 WARN_ON(*level < 0); 1692 WARN_ON(*level < 0);
1695 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1693 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1696 1694
1697 while(*level > 0) { 1695 while (*level > 0) {
1698 WARN_ON(*level < 0); 1696 WARN_ON(*level < 0);
1699 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1697 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1700 cur = path->nodes[*level]; 1698 cur = path->nodes[*level];
@@ -1753,11 +1751,11 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
1753 WARN_ON(*level < 0); 1751 WARN_ON(*level < 0);
1754 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1752 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1755 1753
1756 if (path->nodes[*level] == root->node) { 1754 if (path->nodes[*level] == root->node)
1757 parent = path->nodes[*level]; 1755 parent = path->nodes[*level];
1758 } else { 1756 else
1759 parent = path->nodes[*level + 1]; 1757 parent = path->nodes[*level + 1];
1760 } 1758
1761 bytenr = path->nodes[*level]->start; 1759 bytenr = path->nodes[*level]->start;
1762 1760
1763 blocksize = btrfs_level_size(root, *level); 1761 blocksize = btrfs_level_size(root, *level);
@@ -1790,7 +1788,7 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
1790 return 0; 1788 return 0;
1791} 1789}
1792 1790
1793static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans, 1791static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1794 struct btrfs_root *root, 1792 struct btrfs_root *root,
1795 struct btrfs_path *path, int *level, 1793 struct btrfs_path *path, int *level,
1796 struct walk_control *wc) 1794 struct walk_control *wc)
@@ -1801,7 +1799,7 @@ static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans,
1801 int slot; 1799 int slot;
1802 int ret; 1800 int ret;
1803 1801
1804 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 1802 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1805 slot = path->slots[i]; 1803 slot = path->slots[i];
1806 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { 1804 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
1807 struct extent_buffer *node; 1805 struct extent_buffer *node;
@@ -1875,7 +1873,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1875 extent_buffer_get(log->node); 1873 extent_buffer_get(log->node);
1876 path->slots[level] = 0; 1874 path->slots[level] = 0;
1877 1875
1878 while(1) { 1876 while (1) {
1879 wret = walk_down_log_tree(trans, log, path, &level, wc); 1877 wret = walk_down_log_tree(trans, log, path, &level, wc);
1880 if (wret > 0) 1878 if (wret > 0)
1881 break; 1879 break;
@@ -1941,7 +1939,7 @@ static int wait_log_commit(struct btrfs_root *log)
1941 schedule(); 1939 schedule();
1942 finish_wait(&log->fs_info->tree_log_wait, &wait); 1940 finish_wait(&log->fs_info->tree_log_wait, &wait);
1943 mutex_lock(&log->fs_info->tree_log_mutex); 1941 mutex_lock(&log->fs_info->tree_log_mutex);
1944 } while(transid == log->fs_info->tree_log_transid && 1942 } while (transid == log->fs_info->tree_log_transid &&
1945 atomic_read(&log->fs_info->tree_log_commit)); 1943 atomic_read(&log->fs_info->tree_log_commit));
1946 return 0; 1944 return 0;
1947} 1945}
@@ -1965,13 +1963,13 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1965 } 1963 }
1966 atomic_set(&log->fs_info->tree_log_commit, 1); 1964 atomic_set(&log->fs_info->tree_log_commit, 1);
1967 1965
1968 while(1) { 1966 while (1) {
1969 batch = log->fs_info->tree_log_batch; 1967 batch = log->fs_info->tree_log_batch;
1970 mutex_unlock(&log->fs_info->tree_log_mutex); 1968 mutex_unlock(&log->fs_info->tree_log_mutex);
1971 schedule_timeout_uninterruptible(1); 1969 schedule_timeout_uninterruptible(1);
1972 mutex_lock(&log->fs_info->tree_log_mutex); 1970 mutex_lock(&log->fs_info->tree_log_mutex);
1973 1971
1974 while(atomic_read(&log->fs_info->tree_log_writers)) { 1972 while (atomic_read(&log->fs_info->tree_log_writers)) {
1975 DEFINE_WAIT(wait); 1973 DEFINE_WAIT(wait);
1976 prepare_to_wait(&log->fs_info->tree_log_wait, &wait, 1974 prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
1977 TASK_UNINTERRUPTIBLE); 1975 TASK_UNINTERRUPTIBLE);
@@ -2030,7 +2028,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2030 ret = walk_log_tree(trans, log, &wc); 2028 ret = walk_log_tree(trans, log, &wc);
2031 BUG_ON(ret); 2029 BUG_ON(ret);
2032 2030
2033 while(1) { 2031 while (1) {
2034 ret = find_first_extent_bit(&log->dirty_log_pages, 2032 ret = find_first_extent_bit(&log->dirty_log_pages,
2035 0, &start, &end, EXTENT_DIRTY); 2033 0, &start, &end, EXTENT_DIRTY);
2036 if (ret) 2034 if (ret)
@@ -2287,9 +2285,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2287 struct btrfs_key tmp; 2285 struct btrfs_key tmp;
2288 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 2286 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2289 path->slots[0]); 2287 path->slots[0]);
2290 if (key_type == tmp.type) { 2288 if (key_type == tmp.type)
2291 first_offset = max(min_offset, tmp.offset) + 1; 2289 first_offset = max(min_offset, tmp.offset) + 1;
2292 }
2293 } 2290 }
2294 goto done; 2291 goto done;
2295 } 2292 }
@@ -2319,7 +2316,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2319 * we have a block from this transaction, log every item in it 2316 * we have a block from this transaction, log every item in it
2320 * from our directory 2317 * from our directory
2321 */ 2318 */
2322 while(1) { 2319 while (1) {
2323 struct btrfs_key tmp; 2320 struct btrfs_key tmp;
2324 src = path->nodes[0]; 2321 src = path->nodes[0];
2325 nritems = btrfs_header_nritems(src); 2322 nritems = btrfs_header_nritems(src);
@@ -2396,7 +2393,7 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
2396again: 2393again:
2397 min_key = 0; 2394 min_key = 0;
2398 max_key = 0; 2395 max_key = 0;
2399 while(1) { 2396 while (1) {
2400 ret = log_dir_items(trans, root, inode, path, 2397 ret = log_dir_items(trans, root, inode, path,
2401 dst_path, key_type, min_key, 2398 dst_path, key_type, min_key,
2402 &max_key); 2399 &max_key);
@@ -2432,7 +2429,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
2432 key.type = max_key_type; 2429 key.type = max_key_type;
2433 key.offset = (u64)-1; 2430 key.offset = (u64)-1;
2434 2431
2435 while(1) { 2432 while (1) {
2436 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 2433 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2437 2434
2438 if (ret != 1) 2435 if (ret != 1)
@@ -2481,7 +2478,7 @@ static noinline int copy_extent_csums(struct btrfs_trans_handle *trans,
2481 list_add_tail(&sums->list, list); 2478 list_add_tail(&sums->list, list);
2482 2479
2483 path = btrfs_alloc_path(); 2480 path = btrfs_alloc_path();
2484 while(disk_bytenr < end) { 2481 while (disk_bytenr < end) {
2485 if (!item || disk_bytenr < item_start_offset || 2482 if (!item || disk_bytenr < item_start_offset ||
2486 disk_bytenr >= item_last_offset) { 2483 disk_bytenr >= item_last_offset) {
2487 struct btrfs_key found_key; 2484 struct btrfs_key found_key;
@@ -2496,7 +2493,8 @@ static noinline int copy_extent_csums(struct btrfs_trans_handle *trans,
2496 if (ret == -ENOENT || ret == -EFBIG) 2493 if (ret == -ENOENT || ret == -EFBIG)
2497 ret = 0; 2494 ret = 0;
2498 sum = 0; 2495 sum = 0;
2499 printk("log no csum found for byte %llu\n", 2496 printk(KERN_INFO "log no csum found for "
2497 "byte %llu\n",
2500 (unsigned long long)disk_bytenr); 2498 (unsigned long long)disk_bytenr);
2501 item = NULL; 2499 item = NULL;
2502 btrfs_release_path(root, path); 2500 btrfs_release_path(root, path);
@@ -2643,7 +2641,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
2643 * we have to do this after the loop above to avoid changing the 2641 * we have to do this after the loop above to avoid changing the
2644 * log tree while trying to change the log tree. 2642 * log tree while trying to change the log tree.
2645 */ 2643 */
2646 while(!list_empty(&ordered_sums)) { 2644 while (!list_empty(&ordered_sums)) {
2647 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 2645 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
2648 struct btrfs_ordered_sum, 2646 struct btrfs_ordered_sum,
2649 list); 2647 list);
@@ -2736,7 +2734,7 @@ static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
2736 BUG_ON(ret); 2734 BUG_ON(ret);
2737 path->keep_locks = 1; 2735 path->keep_locks = 1;
2738 2736
2739 while(1) { 2737 while (1) {
2740 ins_nr = 0; 2738 ins_nr = 0;
2741 ret = btrfs_search_forward(root, &min_key, &max_key, 2739 ret = btrfs_search_forward(root, &min_key, &max_key,
2742 path, 0, trans->transid); 2740 path, 0, trans->transid);
@@ -2848,7 +2846,7 @@ int btrfs_log_dentry(struct btrfs_trans_handle *trans,
2848 2846
2849 start_log_trans(trans, root); 2847 start_log_trans(trans, root);
2850 sb = dentry->d_inode->i_sb; 2848 sb = dentry->d_inode->i_sb;
2851 while(1) { 2849 while (1) {
2852 ret = __btrfs_log_inode(trans, root, dentry->d_inode, 2850 ret = __btrfs_log_inode(trans, root, dentry->d_inode,
2853 inode_only); 2851 inode_only);
2854 BUG_ON(ret); 2852 BUG_ON(ret);
@@ -2919,7 +2917,7 @@ again:
2919 key.offset = (u64)-1; 2917 key.offset = (u64)-1;
2920 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 2918 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2921 2919
2922 while(1) { 2920 while (1) {
2923 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 2921 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
2924 if (ret < 0) 2922 if (ret < 0)
2925 break; 2923 break;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6672adcec9f8..b187b537888e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -140,7 +140,7 @@ static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
140 * the list if the block device is congested. This way, multiple devices 140 * the list if the block device is congested. This way, multiple devices
141 * can make progress from a single worker thread. 141 * can make progress from a single worker thread.
142 */ 142 */
143static int noinline run_scheduled_bios(struct btrfs_device *device) 143static noinline int run_scheduled_bios(struct btrfs_device *device)
144{ 144{
145 struct bio *pending; 145 struct bio *pending;
146 struct backing_dev_info *bdi; 146 struct backing_dev_info *bdi;
@@ -187,7 +187,7 @@ loop:
187 } 187 }
188 spin_unlock(&device->io_lock); 188 spin_unlock(&device->io_lock);
189 189
190 while(pending) { 190 while (pending) {
191 cur = pending; 191 cur = pending;
192 pending = pending->bi_next; 192 pending = pending->bi_next;
193 cur->bi_next = NULL; 193 cur->bi_next = NULL;
@@ -458,7 +458,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
458 458
459 bdev = open_bdev_exclusive(device->name, flags, holder); 459 bdev = open_bdev_exclusive(device->name, flags, holder);
460 if (IS_ERR(bdev)) { 460 if (IS_ERR(bdev)) {
461 printk("open %s failed\n", device->name); 461 printk(KERN_INFO "open %s failed\n", device->name);
462 goto error; 462 goto error;
463 } 463 }
464 set_blocksize(bdev, 4096); 464 set_blocksize(bdev, 4096);
@@ -570,14 +570,15 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
570 devid = le64_to_cpu(disk_super->dev_item.devid); 570 devid = le64_to_cpu(disk_super->dev_item.devid);
571 transid = btrfs_super_generation(disk_super); 571 transid = btrfs_super_generation(disk_super);
572 if (disk_super->label[0]) 572 if (disk_super->label[0])
573 printk("device label %s ", disk_super->label); 573 printk(KERN_INFO "device label %s ", disk_super->label);
574 else { 574 else {
575 /* FIXME, make a readl uuid parser */ 575 /* FIXME, make a readl uuid parser */
576 printk("device fsid %llx-%llx ", 576 printk(KERN_INFO "device fsid %llx-%llx ",
577 *(unsigned long long *)disk_super->fsid, 577 *(unsigned long long *)disk_super->fsid,
578 *(unsigned long long *)(disk_super->fsid + 8)); 578 *(unsigned long long *)(disk_super->fsid + 8));
579 } 579 }
580 printk("devid %Lu transid %Lu %s\n", devid, transid, path); 580 printk(KERN_INFO "devid %llu transid %llu %s\n",
581 (unsigned long long)devid, (unsigned long long)transid, path);
581 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 582 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
582 583
583 brelse(bh); 584 brelse(bh);
@@ -683,9 +684,8 @@ no_more_items:
683 goto check_pending; 684 goto check_pending;
684 } 685 }
685 } 686 }
686 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) { 687 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
687 goto next; 688 goto next;
688 }
689 689
690 start_found = 1; 690 start_found = 1;
691 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 691 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
@@ -1001,14 +1001,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1001 1001
1002 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && 1002 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1003 root->fs_info->fs_devices->rw_devices <= 4) { 1003 root->fs_info->fs_devices->rw_devices <= 4) {
1004 printk("btrfs: unable to go below four devices on raid10\n"); 1004 printk(KERN_ERR "btrfs: unable to go below four devices "
1005 "on raid10\n");
1005 ret = -EINVAL; 1006 ret = -EINVAL;
1006 goto out; 1007 goto out;
1007 } 1008 }
1008 1009
1009 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && 1010 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1010 root->fs_info->fs_devices->rw_devices <= 2) { 1011 root->fs_info->fs_devices->rw_devices <= 2) {
1011 printk("btrfs: unable to go below two devices on raid1\n"); 1012 printk(KERN_ERR "btrfs: unable to go below two "
1013 "devices on raid1\n");
1012 ret = -EINVAL; 1014 ret = -EINVAL;
1013 goto out; 1015 goto out;
1014 } 1016 }
@@ -1031,7 +1033,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1031 bh = NULL; 1033 bh = NULL;
1032 disk_super = NULL; 1034 disk_super = NULL;
1033 if (!device) { 1035 if (!device) {
1034 printk("btrfs: no missing devices found to remove\n"); 1036 printk(KERN_ERR "btrfs: no missing devices found to "
1037 "remove\n");
1035 goto out; 1038 goto out;
1036 } 1039 }
1037 } else { 1040 } else {
@@ -1060,7 +1063,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1060 } 1063 }
1061 1064
1062 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { 1065 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1063 printk("btrfs: unable to remove the only writeable device\n"); 1066 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1067 "device\n");
1064 ret = -EINVAL; 1068 ret = -EINVAL;
1065 goto error_brelse; 1069 goto error_brelse;
1066 } 1070 }
@@ -1286,9 +1290,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1286 return -EINVAL; 1290 return -EINVAL;
1287 1291
1288 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); 1292 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1289 if (!bdev) { 1293 if (!bdev)
1290 return -EIO; 1294 return -EIO;
1291 }
1292 1295
1293 if (root->fs_info->fs_devices->seeding) { 1296 if (root->fs_info->fs_devices->seeding) {
1294 seeding_dev = 1; 1297 seeding_dev = 1;
@@ -1401,8 +1404,8 @@ error:
1401 goto out; 1404 goto out;
1402} 1405}
1403 1406
1404static int noinline btrfs_update_device(struct btrfs_trans_handle *trans, 1407static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1405 struct btrfs_device *device) 1408 struct btrfs_device *device)
1406{ 1409{
1407 int ret; 1410 int ret;
1408 struct btrfs_path *path; 1411 struct btrfs_path *path;
@@ -1563,7 +1566,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1563 int ret; 1566 int ret;
1564 int i; 1567 int i;
1565 1568
1566 printk("btrfs relocating chunk %llu\n", 1569 printk(KERN_INFO "btrfs relocating chunk %llu\n",
1567 (unsigned long long)chunk_offset); 1570 (unsigned long long)chunk_offset);
1568 root = root->fs_info->chunk_root; 1571 root = root->fs_info->chunk_root;
1569 extent_root = root->fs_info->extent_root; 1572 extent_root = root->fs_info->extent_root;
@@ -1748,7 +1751,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
1748 key.offset = (u64)-1; 1751 key.offset = (u64)-1;
1749 key.type = BTRFS_CHUNK_ITEM_KEY; 1752 key.type = BTRFS_CHUNK_ITEM_KEY;
1750 1753
1751 while(1) { 1754 while (1) {
1752 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 1755 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1753 if (ret < 0) 1756 if (ret < 0)
1754 goto error; 1757 goto error;
@@ -1916,7 +1919,7 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1916 return 0; 1919 return 0;
1917} 1920}
1918 1921
1919static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size, 1922static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
1920 int num_stripes, int sub_stripes) 1923 int num_stripes, int sub_stripes)
1921{ 1924{
1922 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) 1925 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
@@ -2041,7 +2044,7 @@ again:
2041 min_free += 1024 * 1024; 2044 min_free += 1024 * 1024;
2042 2045
2043 INIT_LIST_HEAD(&private_devs); 2046 INIT_LIST_HEAD(&private_devs);
2044 while(index < num_stripes) { 2047 while (index < num_stripes) {
2045 device = list_entry(cur, struct btrfs_device, dev_alloc_list); 2048 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2046 BUG_ON(!device->writeable); 2049 BUG_ON(!device->writeable);
2047 if (device->total_bytes > device->bytes_used) 2050 if (device->total_bytes > device->bytes_used)
@@ -2242,7 +2245,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2242 return 0; 2245 return 0;
2243} 2246}
2244 2247
2245static int noinline init_first_rw_device(struct btrfs_trans_handle *trans, 2248static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2246 struct btrfs_root *root, 2249 struct btrfs_root *root,
2247 struct btrfs_device *device) 2250 struct btrfs_device *device)
2248{ 2251{
@@ -2338,7 +2341,7 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2338{ 2341{
2339 struct extent_map *em; 2342 struct extent_map *em;
2340 2343
2341 while(1) { 2344 while (1) {
2342 spin_lock(&tree->map_tree.lock); 2345 spin_lock(&tree->map_tree.lock);
2343 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 2346 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2344 if (em) 2347 if (em)
@@ -2413,9 +2416,8 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2413 int max_errors = 0; 2416 int max_errors = 0;
2414 struct btrfs_multi_bio *multi = NULL; 2417 struct btrfs_multi_bio *multi = NULL;
2415 2418
2416 if (multi_ret && !(rw & (1 << BIO_RW))) { 2419 if (multi_ret && !(rw & (1 << BIO_RW)))
2417 stripes_allocated = 1; 2420 stripes_allocated = 1;
2418 }
2419again: 2421again:
2420 if (multi_ret) { 2422 if (multi_ret) {
2421 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), 2423 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
@@ -2434,7 +2436,9 @@ again:
2434 return 0; 2436 return 0;
2435 2437
2436 if (!em) { 2438 if (!em) {
2437 printk("unable to find logical %Lu len %Lu\n", logical, *length); 2439 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2440 (unsigned long long)logical,
2441 (unsigned long long)*length);
2438 BUG(); 2442 BUG();
2439 } 2443 }
2440 2444
@@ -2541,9 +2545,8 @@ again:
2541 device = map->stripes[stripe_index].dev; 2545 device = map->stripes[stripe_index].dev;
2542 if (device->bdev) { 2546 if (device->bdev) {
2543 bdi = blk_get_backing_dev_info(device->bdev); 2547 bdi = blk_get_backing_dev_info(device->bdev);
2544 if (bdi->unplug_io_fn) { 2548 if (bdi->unplug_io_fn)
2545 bdi->unplug_io_fn(bdi, unplug_page); 2549 bdi->unplug_io_fn(bdi, unplug_page);
2546 }
2547 } 2550 }
2548 } else { 2551 } else {
2549 multi->stripes[i].physical = 2552 multi->stripes[i].physical =
@@ -2717,7 +2720,7 @@ struct async_sched {
2717 * This will add one bio to the pending list for a device and make sure 2720 * This will add one bio to the pending list for a device and make sure
2718 * the work struct is scheduled. 2721 * the work struct is scheduled.
2719 */ 2722 */
2720static int noinline schedule_bio(struct btrfs_root *root, 2723static noinline int schedule_bio(struct btrfs_root *root,
2721 struct btrfs_device *device, 2724 struct btrfs_device *device,
2722 int rw, struct bio *bio) 2725 int rw, struct bio *bio)
2723{ 2726{
@@ -2785,8 +2788,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2785 2788
2786 total_devs = multi->num_stripes; 2789 total_devs = multi->num_stripes;
2787 if (map_length < length) { 2790 if (map_length < length) {
2788 printk("mapping failed logical %Lu bio len %Lu " 2791 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
2789 "len %Lu\n", logical, length, map_length); 2792 "len %llu\n", (unsigned long long)logical,
2793 (unsigned long long)length,
2794 (unsigned long long)map_length);
2790 BUG(); 2795 BUG();
2791 } 2796 }
2792 multi->end_io = first_bio->bi_end_io; 2797 multi->end_io = first_bio->bi_end_io;
@@ -2794,7 +2799,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2794 multi->orig_bio = first_bio; 2799 multi->orig_bio = first_bio;
2795 atomic_set(&multi->stripes_pending, multi->num_stripes); 2800 atomic_set(&multi->stripes_pending, multi->num_stripes);
2796 2801
2797 while(dev_nr < total_devs) { 2802 while (dev_nr < total_devs) {
2798 if (total_devs > 1) { 2803 if (total_devs > 1) {
2799 if (dev_nr < total_devs - 1) { 2804 if (dev_nr < total_devs - 1) {
2800 bio = bio_clone(first_bio, GFP_NOFS); 2805 bio = bio_clone(first_bio, GFP_NOFS);
@@ -3058,7 +3063,8 @@ static int read_one_dev(struct btrfs_root *root,
3058 return -EIO; 3063 return -EIO;
3059 3064
3060 if (!device) { 3065 if (!device) {
3061 printk("warning devid %Lu missing\n", devid); 3066 printk(KERN_WARNING "warning devid %llu missing\n",
3067 (unsigned long long)devid);
3062 device = add_missing_dev(root, devid, dev_uuid); 3068 device = add_missing_dev(root, devid, dev_uuid);
3063 if (!device) 3069 if (!device)
3064 return -ENOMEM; 3070 return -ENOMEM;
@@ -3078,12 +3084,6 @@ static int read_one_dev(struct btrfs_root *root,
3078 if (device->writeable) 3084 if (device->writeable)
3079 device->fs_devices->total_rw_bytes += device->total_bytes; 3085 device->fs_devices->total_rw_bytes += device->total_bytes;
3080 ret = 0; 3086 ret = 0;
3081#if 0
3082 ret = btrfs_open_device(device);
3083 if (ret) {
3084 kfree(device);
3085 }
3086#endif
3087 return ret; 3087 return ret;
3088} 3088}
3089 3089
@@ -3174,7 +3174,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
3174 key.type = 0; 3174 key.type = 0;
3175again: 3175again:
3176 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3176 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3177 while(1) { 3177 while (1) {
3178 leaf = path->nodes[0]; 3178 leaf = path->nodes[0];
3179 slot = path->slots[0]; 3179 slot = path->slots[0];
3180 if (slot >= btrfs_header_nritems(leaf)) { 3180 if (slot >= btrfs_header_nritems(leaf)) {
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 4146f0710e6a..7f332e270894 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -264,7 +264,8 @@ struct xattr_handler *btrfs_xattr_handlers[] = {
264 */ 264 */
265static bool btrfs_is_valid_xattr(const char *name) 265static bool btrfs_is_valid_xattr(const char *name)
266{ 266{
267 return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || 267 return !strncmp(name, XATTR_SECURITY_PREFIX,
268 XATTR_SECURITY_PREFIX_LEN) ||
268 !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || 269 !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
269 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || 270 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
270 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); 271 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index c4617cde6c73..ecfbce836d32 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -152,7 +152,7 @@ static int free_workspace(struct workspace *workspace)
152static void free_workspaces(void) 152static void free_workspaces(void)
153{ 153{
154 struct workspace *workspace; 154 struct workspace *workspace;
155 while(!list_empty(&idle_workspace)) { 155 while (!list_empty(&idle_workspace)) {
156 workspace = list_entry(idle_workspace.next, struct workspace, 156 workspace = list_entry(idle_workspace.next, struct workspace,
157 list); 157 list);
158 list_del(&workspace->list); 158 list_del(&workspace->list);
@@ -397,12 +397,10 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
397 ret = -1; 397 ret = -1;
398 goto out; 398 goto out;
399 } 399 }
400 while(workspace->inf_strm.total_in < srclen) { 400 while (workspace->inf_strm.total_in < srclen) {
401 ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); 401 ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
402 if (ret != Z_OK && ret != Z_STREAM_END) { 402 if (ret != Z_OK && ret != Z_STREAM_END)
403 break; 403 break;
404 }
405
406 /* 404 /*
407 * buf start is the byte offset we're of the start of 405 * buf start is the byte offset we're of the start of
408 * our workspace buffer 406 * our workspace buffer
@@ -424,16 +422,14 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
424 /* we didn't make progress in this inflate 422 /* we didn't make progress in this inflate
425 * call, we're done 423 * call, we're done
426 */ 424 */
427 if (ret != Z_STREAM_END) { 425 if (ret != Z_STREAM_END)
428 ret = -1; 426 ret = -1;
429 }
430 break; 427 break;
431 } 428 }
432 429
433 /* we haven't yet hit data corresponding to this page */ 430 /* we haven't yet hit data corresponding to this page */
434 if (total_out <= start_byte) { 431 if (total_out <= start_byte)
435 goto next; 432 goto next;
436 }
437 433
438 /* 434 /*
439 * the start of the data we care about is offset into 435 * the start of the data we care about is offset into
@@ -448,7 +444,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
448 current_buf_start = buf_start; 444 current_buf_start = buf_start;
449 445
450 /* copy bytes from the working buffer into the pages */ 446 /* copy bytes from the working buffer into the pages */
451 while(working_bytes > 0) { 447 while (working_bytes > 0) {
452 bytes = min(PAGE_CACHE_SIZE - pg_offset, 448 bytes = min(PAGE_CACHE_SIZE - pg_offset,
453 PAGE_CACHE_SIZE - buf_offset); 449 PAGE_CACHE_SIZE - buf_offset);
454 bytes = min(bytes, working_bytes); 450 bytes = min(bytes, working_bytes);
@@ -471,6 +467,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
471 ret = 0; 467 ret = 0;
472 goto done; 468 goto done;
473 } 469 }
470
474 page_out = bvec[page_out_index].bv_page; 471 page_out = bvec[page_out_index].bv_page;
475 pg_offset = 0; 472 pg_offset = 0;
476 page_bytes_left = PAGE_CACHE_SIZE; 473 page_bytes_left = PAGE_CACHE_SIZE;
@@ -480,9 +477,8 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
480 * make sure our new page is covered by this 477 * make sure our new page is covered by this
481 * working buffer 478 * working buffer
482 */ 479 */
483 if (total_out <= start_byte) { 480 if (total_out <= start_byte)
484 goto next; 481 goto next;
485 }
486 482
487 /* the next page in the biovec might not 483 /* the next page in the biovec might not
488 * be adjacent to the last page, but it 484 * be adjacent to the last page, but it
@@ -517,11 +513,10 @@ next:
517 PAGE_CACHE_SIZE); 513 PAGE_CACHE_SIZE);
518 } 514 }
519 } 515 }
520 if (ret != Z_STREAM_END) { 516 if (ret != Z_STREAM_END)
521 ret = -1; 517 ret = -1;
522 } else { 518 else
523 ret = 0; 519 ret = 0;
524 }
525done: 520done:
526 zlib_inflateEnd(&workspace->inf_strm); 521 zlib_inflateEnd(&workspace->inf_strm);
527 if (data_in) 522 if (data_in)
@@ -579,16 +574,15 @@ int btrfs_zlib_decompress(unsigned char *data_in,
579 goto out; 574 goto out;
580 } 575 }
581 576
582 while(bytes_left > 0) { 577 while (bytes_left > 0) {
583 unsigned long buf_start; 578 unsigned long buf_start;
584 unsigned long buf_offset; 579 unsigned long buf_offset;
585 unsigned long bytes; 580 unsigned long bytes;
586 unsigned long pg_offset = 0; 581 unsigned long pg_offset = 0;
587 582
588 ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); 583 ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
589 if (ret != Z_OK && ret != Z_STREAM_END) { 584 if (ret != Z_OK && ret != Z_STREAM_END)
590 break; 585 break;
591 }
592 586
593 buf_start = total_out; 587 buf_start = total_out;
594 total_out = workspace->inf_strm.total_out; 588 total_out = workspace->inf_strm.total_out;
@@ -598,15 +592,13 @@ int btrfs_zlib_decompress(unsigned char *data_in,
598 break; 592 break;
599 } 593 }
600 594
601 if (total_out <= start_byte) { 595 if (total_out <= start_byte)
602 goto next; 596 goto next;
603 }
604 597
605 if (total_out > start_byte && buf_start < start_byte) { 598 if (total_out > start_byte && buf_start < start_byte)
606 buf_offset = start_byte - buf_start; 599 buf_offset = start_byte - buf_start;
607 } else { 600 else
608 buf_offset = 0; 601 buf_offset = 0;
609 }
610 602
611 bytes = min(PAGE_CACHE_SIZE - pg_offset, 603 bytes = min(PAGE_CACHE_SIZE - pg_offset,
612 PAGE_CACHE_SIZE - buf_offset); 604 PAGE_CACHE_SIZE - buf_offset);
@@ -622,11 +614,12 @@ next:
622 workspace->inf_strm.next_out = workspace->buf; 614 workspace->inf_strm.next_out = workspace->buf;
623 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; 615 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
624 } 616 }
625 if (ret != Z_STREAM_END && bytes_left != 0) { 617
618 if (ret != Z_STREAM_END && bytes_left != 0)
626 ret = -1; 619 ret = -1;
627 } else { 620 else
628 ret = 0; 621 ret = 0;
629 } 622
630 zlib_inflateEnd(&workspace->inf_strm); 623 zlib_inflateEnd(&workspace->inf_strm);
631out: 624out:
632 free_workspace(workspace); 625 free_workspace(workspace);