aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-09-05 16:09:51 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:07 -0400
commita1b32a5932cfac7c38b442582285f3da2a09dfd8 (patch)
treed468c51c7391ee1a0cfcc6ff1e76e11c01646efe /fs
parent95819c05732c511338b43c115ffbcee978c02888 (diff)
Btrfs: Add debugging checks to track down corrupted metadata
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/disk-io.c7
-rw-r--r--fs/btrfs/extent_io.c21
-rw-r--r--fs/btrfs/file.c15
-rw-r--r--fs/btrfs/volumes.c40
4 files changed, 49 insertions, 34 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 6a218f792e59..8e7a938bfbc7 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -250,7 +250,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
250 if (!ret && 250 if (!ret &&
251 !verify_parent_transid(io_tree, eb, parent_transid)) 251 !verify_parent_transid(io_tree, eb, parent_transid))
252 return ret; 252 return ret;
253 253printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num);
254 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, 254 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
255 eb->start, eb->len); 255 eb->start, eb->len);
256 if (num_copies == 1) 256 if (num_copies == 1)
@@ -348,6 +348,9 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
348 348
349 found_start = btrfs_header_bytenr(eb); 349 found_start = btrfs_header_bytenr(eb);
350 if (found_start != start) { 350 if (found_start != start) {
351 printk("bad tree block start %llu %llu\n",
352 (unsigned long long)found_start,
353 (unsigned long long)eb->start);
351 ret = -EIO; 354 ret = -EIO;
352 goto err; 355 goto err;
353 } 356 }
@@ -709,6 +712,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
709 712
710 if (ret == 0) { 713 if (ret == 0) {
711 buf->flags |= EXTENT_UPTODATE; 714 buf->flags |= EXTENT_UPTODATE;
715 } else {
716 WARN_ON(1);
712 } 717 }
713 return buf; 718 return buf;
714 719
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 83ba0c328722..7ca89c45d401 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1811,6 +1811,7 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1811 } 1811 }
1812 /* the get_extent function already copied into the page */ 1812 /* the get_extent function already copied into the page */
1813 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { 1813 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1814 check_page_uptodate(tree, page);
1814 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1815 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1815 cur = cur + iosize; 1816 cur = cur + iosize;
1816 page_offset += iosize; 1817 page_offset += iosize;
@@ -2785,21 +2786,20 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
2785 * properly set. releasepage may drop page->private 2786 * properly set. releasepage may drop page->private
2786 * on us if the page isn't already dirty. 2787 * on us if the page isn't already dirty.
2787 */ 2788 */
2789 lock_page(page);
2788 if (i == 0) { 2790 if (i == 0) {
2789 lock_page(page);
2790 set_page_extent_head(page, eb->len); 2791 set_page_extent_head(page, eb->len);
2791 } else if (PagePrivate(page) && 2792 } else if (PagePrivate(page) &&
2792 page->private != EXTENT_PAGE_PRIVATE) { 2793 page->private != EXTENT_PAGE_PRIVATE) {
2793 lock_page(page);
2794 set_page_extent_mapped(page); 2794 set_page_extent_mapped(page);
2795 unlock_page(page);
2796 } 2795 }
2797 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 2796 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2798 if (i == 0) 2797 set_extent_dirty(tree, page_offset(page),
2799 unlock_page(page); 2798 page_offset(page) + PAGE_CACHE_SIZE -1,
2799 GFP_NOFS);
2800 unlock_page(page);
2800 } 2801 }
2801 return set_extent_dirty(tree, eb->start, 2802 return 0;
2802 eb->start + eb->len - 1, GFP_NOFS);
2803} 2803}
2804EXPORT_SYMBOL(set_extent_buffer_dirty); 2804EXPORT_SYMBOL(set_extent_buffer_dirty);
2805 2805
@@ -2952,6 +2952,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
2952 if (all_uptodate) { 2952 if (all_uptodate) {
2953 if (start_i == 0) 2953 if (start_i == 0)
2954 eb->flags |= EXTENT_UPTODATE; 2954 eb->flags |= EXTENT_UPTODATE;
2955 if (ret) {
2956 printk("all up to date but ret is %d\n", ret);
2957 }
2955 goto unlock_exit; 2958 goto unlock_exit;
2956 } 2959 }
2957 2960
@@ -2968,6 +2971,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
2968 mirror_num); 2971 mirror_num);
2969 if (err) { 2972 if (err) {
2970 ret = err; 2973 ret = err;
2974 printk("err %d from __extent_read_full_page\n", ret);
2971 } 2975 }
2972 } else { 2976 } else {
2973 unlock_page(page); 2977 unlock_page(page);
@@ -2978,12 +2982,15 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
2978 submit_one_bio(READ, bio, mirror_num); 2982 submit_one_bio(READ, bio, mirror_num);
2979 2983
2980 if (ret || !wait) { 2984 if (ret || !wait) {
2985 if (ret)
2986 printk("ret %d wait %d returning\n", ret, wait);
2981 return ret; 2987 return ret;
2982 } 2988 }
2983 for (i = start_i; i < num_pages; i++) { 2989 for (i = start_i; i < num_pages; i++) {
2984 page = extent_buffer_page(eb, i); 2990 page = extent_buffer_page(eb, i);
2985 wait_on_page_locked(page); 2991 wait_on_page_locked(page);
2986 if (!PageUptodate(page)) { 2992 if (!PageUptodate(page)) {
2993 printk("page not uptodate after wait_on_page_locked\n");
2987 ret = -EIO; 2994 ret = -EIO;
2988 } 2995 }
2989 } 2996 }
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index eb8e4556fa71..e9e86fbaa243 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -39,9 +39,10 @@
39#include "compat.h" 39#include "compat.h"
40 40
41 41
42static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, 42static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
43 struct page **prepared_pages, 43 int write_bytes,
44 const char __user * buf) 44 struct page **prepared_pages,
45 const char __user * buf)
45{ 46{
46 long page_fault = 0; 47 long page_fault = 0;
47 int i; 48 int i;
@@ -69,7 +70,7 @@ static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
69 return page_fault ? -EFAULT : 0; 70 return page_fault ? -EFAULT : 0;
70} 71}
71 72
72static void btrfs_drop_pages(struct page **pages, size_t num_pages) 73static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
73{ 74{
74 size_t i; 75 size_t i;
75 for (i = 0; i < num_pages; i++) { 76 for (i = 0; i < num_pages; i++) {
@@ -359,7 +360,7 @@ out_unlock:
359 return err; 360 return err;
360} 361}
361 362
362int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) 363int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
363{ 364{
364 struct extent_map *em; 365 struct extent_map *em;
365 struct extent_map *split = NULL; 366 struct extent_map *split = NULL;
@@ -515,7 +516,7 @@ out:
515 * it is either truncated or split. Anything entirely inside the range 516 * it is either truncated or split. Anything entirely inside the range
516 * is deleted from the tree. 517 * is deleted from the tree.
517 */ 518 */
518int btrfs_drop_extents(struct btrfs_trans_handle *trans, 519int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
519 struct btrfs_root *root, struct inode *inode, 520 struct btrfs_root *root, struct inode *inode,
520 u64 start, u64 end, u64 inline_limit, u64 *hint_byte) 521 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
521{ 522{
@@ -785,7 +786,7 @@ out:
785/* 786/*
786 * this gets pages into the page cache and locks them down 787 * this gets pages into the page cache and locks them down
787 */ 788 */
788static int prepare_pages(struct btrfs_root *root, struct file *file, 789static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
789 struct page **pages, size_t num_pages, 790 struct page **pages, size_t num_pages,
790 loff_t pos, unsigned long first_index, 791 loff_t pos, unsigned long first_index,
791 unsigned long last_index, size_t write_bytes) 792 unsigned long last_index, size_t write_bytes)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5b1b60839d21..37a8ea23e81d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -94,8 +94,8 @@ int btrfs_cleanup_fs_uuids(void)
94 return 0; 94 return 0;
95} 95}
96 96
97static struct btrfs_device *__find_device(struct list_head *head, u64 devid, 97static noinline struct btrfs_device *__find_device(struct list_head *head,
98 u8 *uuid) 98 u64 devid, u8 *uuid)
99{ 99{
100 struct btrfs_device *dev; 100 struct btrfs_device *dev;
101 struct list_head *cur; 101 struct list_head *cur;
@@ -110,7 +110,7 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
110 return NULL; 110 return NULL;
111} 111}
112 112
113static struct btrfs_fs_devices *find_fsid(u8 *fsid) 113static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
114{ 114{
115 struct list_head *cur; 115 struct list_head *cur;
116 struct btrfs_fs_devices *fs_devices; 116 struct btrfs_fs_devices *fs_devices;
@@ -134,7 +134,7 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid)
134 * the list if the block device is congested. This way, multiple devices 134 * the list if the block device is congested. This way, multiple devices
135 * can make progress from a single worker thread. 135 * can make progress from a single worker thread.
136 */ 136 */
137int run_scheduled_bios(struct btrfs_device *device) 137static int noinline run_scheduled_bios(struct btrfs_device *device)
138{ 138{
139 struct bio *pending; 139 struct bio *pending;
140 struct backing_dev_info *bdi; 140 struct backing_dev_info *bdi;
@@ -233,7 +233,7 @@ void pending_bios_fn(struct btrfs_work *work)
233 run_scheduled_bios(device); 233 run_scheduled_bios(device);
234} 234}
235 235
236static int device_list_add(const char *path, 236static noinline int device_list_add(const char *path,
237 struct btrfs_super_block *disk_super, 237 struct btrfs_super_block *disk_super,
238 u64 devid, struct btrfs_fs_devices **fs_devices_ret) 238 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
239{ 239{
@@ -480,10 +480,10 @@ error:
480 * called very infrequently and that a given device has a small number 480 * called very infrequently and that a given device has a small number
481 * of extents 481 * of extents
482 */ 482 */
483static int find_free_dev_extent(struct btrfs_trans_handle *trans, 483static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
484 struct btrfs_device *device, 484 struct btrfs_device *device,
485 struct btrfs_path *path, 485 struct btrfs_path *path,
486 u64 num_bytes, u64 *start) 486 u64 num_bytes, u64 *start)
487{ 487{
488 struct btrfs_key key; 488 struct btrfs_key key;
489 struct btrfs_root *root = device->dev_root; 489 struct btrfs_root *root = device->dev_root;
@@ -645,7 +645,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
645 return ret; 645 return ret;
646} 646}
647 647
648int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 648int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
649 struct btrfs_device *device, 649 struct btrfs_device *device,
650 u64 chunk_tree, u64 chunk_objectid, 650 u64 chunk_tree, u64 chunk_objectid,
651 u64 chunk_offset, 651 u64 chunk_offset,
@@ -693,7 +693,8 @@ err:
693 return ret; 693 return ret;
694} 694}
695 695
696static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) 696static noinline int find_next_chunk(struct btrfs_root *root,
697 u64 objectid, u64 *offset)
697{ 698{
698 struct btrfs_path *path; 699 struct btrfs_path *path;
699 int ret; 700 int ret;
@@ -735,8 +736,8 @@ error:
735 return ret; 736 return ret;
736} 737}
737 738
738static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, 739static noinline int find_next_devid(struct btrfs_root *root,
739 u64 *objectid) 740 struct btrfs_path *path, u64 *objectid)
740{ 741{
741 int ret; 742 int ret;
742 struct btrfs_key key; 743 struct btrfs_key key;
@@ -1103,8 +1104,8 @@ out_close_bdev:
1103 goto out; 1104 goto out;
1104} 1105}
1105 1106
1106int btrfs_update_device(struct btrfs_trans_handle *trans, 1107int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
1107 struct btrfs_device *device) 1108 struct btrfs_device *device)
1108{ 1109{
1109 int ret; 1110 int ret;
1110 struct btrfs_path *path; 1111 struct btrfs_path *path;
@@ -1544,8 +1545,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1544 return 0; 1545 return 0;
1545} 1546}
1546 1547
1547static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, 1548static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size,
1548 int sub_stripes) 1549 int num_stripes, int sub_stripes)
1549{ 1550{
1550 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) 1551 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1551 return calc_size; 1552 return calc_size;
@@ -2141,8 +2142,9 @@ struct async_sched {
2141 * This will add one bio to the pending list for a device and make sure 2142 * This will add one bio to the pending list for a device and make sure
2142 * the work struct is scheduled. 2143 * the work struct is scheduled.
2143 */ 2144 */
2144int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, 2145static int noinline schedule_bio(struct btrfs_root *root,
2145 int rw, struct bio *bio) 2146 struct btrfs_device *device,
2147 int rw, struct bio *bio)
2146{ 2148{
2147 int should_queue = 1; 2149 int should_queue = 1;
2148 2150