From c3b9a62c8f932f32a733d6b628f61f3f28345727 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 18 Aug 2010 05:29:12 -0400 Subject: btrfs: replace barriers with explicit flush / FUA usage Switch to the WRITE_FLUSH_FUA flag for log writes, remove the EOPNOTSUPP detection for barriers and stop setting the barrier flag for discards. Signed-off-by: Christoph Hellwig Acked-by: Chris Mason Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- fs/btrfs/disk-io.c | 19 ++++--------------- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/volumes.c | 4 ---- fs/btrfs/volumes.h | 1 - 4 files changed, 5 insertions(+), 21 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 64f10082f048..5e789f4a3ed0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2063,7 +2063,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) if (uptodate) { set_buffer_uptodate(bh); } else { - if (!buffer_eopnotsupp(bh) && printk_ratelimit()) { + if (printk_ratelimit()) { printk(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); @@ -2200,21 +2200,10 @@ static int write_dev_supers(struct btrfs_device *device, bh->b_end_io = btrfs_end_buffer_write_sync; } - if (i == last_barrier && do_barriers && device->barriers) { - ret = submit_bh(WRITE_BARRIER, bh); - if (ret == -EOPNOTSUPP) { - printk("btrfs: disabling barriers on dev %s\n", - device->name); - set_buffer_uptodate(bh); - device->barriers = 0; - /* one reference for submit_bh */ - get_bh(bh); - lock_buffer(bh); - ret = submit_bh(WRITE_SYNC, bh); - } - } else { + if (i == last_barrier && do_barriers) + ret = submit_bh(WRITE_FLUSH_FUA, bh); + else ret = submit_bh(WRITE_SYNC, bh); - } if (ret) errors++; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 32d094002a57..43dc9ea9aef6 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1696,7 +1696,7 @@ static void btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len) { blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, - BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); + BLKDEV_IFL_WAIT); } static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index dd318ff280b2..e25e46a8b4e2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -398,7 +398,6 @@ static noinline int device_list_add(const char *path, device->work.func = pending_bios_fn; memcpy(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE); - device->barriers = 1; spin_lock_init(&device->io_lock); device->name = kstrdup(path, GFP_NOFS); if (!device->name) { @@ -462,7 +461,6 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) device->devid = orig_dev->devid; device->work.func = pending_bios_fn; memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid)); - device->barriers = 1; spin_lock_init(&device->io_lock); INIT_LIST_HEAD(&device->dev_list); INIT_LIST_HEAD(&device->dev_alloc_list); @@ -1489,7 +1487,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) trans = btrfs_start_transaction(root, 0); lock_chunks(root); - device->barriers = 1; device->writeable = 1; device->work.func = pending_bios_fn; generate_random_uuid(device->uuid); @@ -3084,7 +3081,6 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root, return NULL; list_add(&device->dev_list, &fs_devices->devices); - device->barriers = 1; device->dev_root = root->fs_info->dev_root; device->devid = devid; device->work.func = pending_bios_fn; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 31b0fabdd2ea..2b638b6e4eea 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -42,7 +42,6 @@ struct btrfs_device { int running_pending; u64 generation; - int barriers; int writeable; int in_fs_metadata; -- cgit v1.2.2 From dd3932eddf428571762596e17b65f5dc92ca361b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 16 Sep 2010 20:51:46 +0200 Subject: block: remove BLKDEV_IFL_WAIT All the blkdev_issue_* helpers can only sanely be used for synchronous caller. To issue cache flushes or barriers asynchronously the caller needs to set up a bio by itself with a completion callback to move the asynchronous state machine ahead. So drop the BLKDEV_IFL_WAIT flag that is always specified when calling blkdev_issue_* and also remove the now unused flags argument to blkdev_issue_flush and blkdev_issue_zeroout. For blkdev_issue_discard we need to keep it for the secure discard flag, which gains a more descriptive name and loses the bitops vs flag confusion. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/btrfs/extent-tree.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 43dc9ea9aef6..0b81ecdb101c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1695,8 +1695,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans, static void btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len) { - blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, - BLKDEV_IFL_WAIT); + blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0); } static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, -- cgit v1.2.2 From 6038f373a3dc1f1c26496e60b6c40b164716f07e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sun, 15 Aug 2010 18:52:59 +0200 Subject: llseek: automatically add .llseek fop All file_operations should get a .llseek operation so we can make nonseekable_open the default for future file operations without a .llseek pointer. The three cases that we can automatically detect are no_llseek, seq_lseek and default_llseek. For cases where we can we can automatically prove that the file offset is always ignored, we use noop_llseek, which maintains the current behavior of not returning an error from a seek. New drivers should normally not use noop_llseek but instead use no_llseek and call nonseekable_open at open time. Existing drivers can be converted to do the same when the maintainer knows for certain that no user code relies on calling seek on the device file. The generated code is often incorrectly indented and right now contains comments that clarify for each added line why a specific variant was chosen. In the version that gets submitted upstream, the comments will be gone and I will manually fix the indentation, because there does not seem to be a way to do that using coccinelle. Some amount of new code is currently sitting in linux-next that should get the same modifications, which I will do at the end of the merge window. Many thanks to Julia Lawall for helping me learn to write a semantic patch that does all this. ===== begin semantic patch ===== // This adds an llseek= method to all file operations, // as a preparation for making no_llseek the default. // // The rules are // - use no_llseek explicitly if we do nonseekable_open // - use seq_lseek for sequential files // - use default_llseek if we know we access f_pos // - use noop_llseek if we know we don't access f_pos, // but we still want to allow users to call lseek // @ open1 exists @ identifier nested_open; @@ nested_open(...) { <+... nonseekable_open(...) ...+> } @ open exists@ identifier open_f; identifier i, f; identifier open1.nested_open; @@ int open_f(struct inode *i, struct file *f) { <+... ( nonseekable_open(...) | nested_open(...) ) ...+> } @ read disable optional_qualifier exists @ identifier read_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; expression E; identifier func; @@ ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off) { <+... ( *off = E | *off += E | func(..., off, ...) | E = *off ) ...+> } @ read_no_fpos disable optional_qualifier exists @ identifier read_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; @@ ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off) { ... when != off } @ write @ identifier write_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; expression E; identifier func; @@ ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off) { <+... ( *off = E | *off += E | func(..., off, ...) | E = *off ) ...+> } @ write_no_fpos @ identifier write_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; @@ ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off) { ... when != off } @ fops0 @ identifier fops; @@ struct file_operations fops = { ... }; @ has_llseek depends on fops0 @ identifier fops0.fops; identifier llseek_f; @@ struct file_operations fops = { ... .llseek = llseek_f, ... }; @ has_read depends on fops0 @ identifier fops0.fops; identifier read_f; @@ struct file_operations fops = { ... .read = read_f, ... }; @ has_write depends on fops0 @ identifier fops0.fops; identifier write_f; @@ struct file_operations fops = { ... .write = write_f, ... }; @ has_open depends on fops0 @ identifier fops0.fops; identifier open_f; @@ struct file_operations fops = { ... .open = open_f, ... }; // use no_llseek if we call nonseekable_open //////////////////////////////////////////// @ nonseekable1 depends on !has_llseek && has_open @ identifier fops0.fops; identifier nso ~= "nonseekable_open"; @@ struct file_operations fops = { ... .open = nso, ... +.llseek = no_llseek, /* nonseekable */ }; @ nonseekable2 depends on !has_llseek @ identifier fops0.fops; identifier open.open_f; @@ struct file_operations fops = { ... .open = open_f, ... +.llseek = no_llseek, /* open uses nonseekable */ }; // use seq_lseek for sequential files ///////////////////////////////////// @ seq depends on !has_llseek @ identifier fops0.fops; identifier sr ~= "seq_read"; @@ struct file_operations fops = { ... .read = sr, ... +.llseek = seq_lseek, /* we have seq_read */ }; // use default_llseek if there is a readdir /////////////////////////////////////////// @ fops1 depends on !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier readdir_e; @@ // any other fop is used that changes pos struct file_operations fops = { ... .readdir = readdir_e, ... +.llseek = default_llseek, /* readdir is present */ }; // use default_llseek if at least one of read/write touches f_pos ///////////////////////////////////////////////////////////////// @ fops2 depends on !fops1 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read.read_f; @@ // read fops use offset struct file_operations fops = { ... .read = read_f, ... +.llseek = default_llseek, /* read accesses f_pos */ }; @ fops3 depends on !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier write.write_f; @@ // write fops use offset struct file_operations fops = { ... .write = write_f, ... + .llseek = default_llseek, /* write accesses f_pos */ }; // Use noop_llseek if neither read nor write accesses f_pos /////////////////////////////////////////////////////////// @ fops4 depends on !fops1 && !fops2 && !fops3 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read_no_fpos.read_f; identifier write_no_fpos.write_f; @@ // write fops use offset struct file_operations fops = { ... .write = write_f, .read = read_f, ... +.llseek = noop_llseek, /* read and write both use no f_pos */ }; @ depends on has_write && !has_read && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier write_no_fpos.write_f; @@ struct file_operations fops = { ... .write = write_f, ... +.llseek = noop_llseek, /* write uses no f_pos */ }; @ depends on has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read_no_fpos.read_f; @@ struct file_operations fops = { ... .read = read_f, ... +.llseek = noop_llseek, /* read uses no f_pos */ }; @ depends on !has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; @@ struct file_operations fops = { ... +.llseek = noop_llseek, /* no read or write fn */ }; ===== End semantic patch ===== Signed-off-by: Arnd Bergmann Cc: Julia Lawall Cc: Christoph Hellwig --- fs/btrfs/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 1776dbd8dc98..144f8a5730f5 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -815,6 +815,7 @@ static const struct file_operations btrfs_ctl_fops = { .unlocked_ioctl = btrfs_control_ioctl, .compat_ioctl = btrfs_control_ioctl, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice btrfs_misc = { -- cgit v1.2.2 From a1f765061e1491d5ec467429d0d6adfd9df2f6d9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 16 Sep 2010 14:29:55 -0400 Subject: Btrfs: stop trying to shrink delalloc if there are no inodes to reclaim In very severe ENOSPC cases we can run out of inodes to do delalloc on, which means we'll just keep looping trying to shrink delalloc. Instead, if we fail to shrink delalloc 3 times in a row break out since we're not likely to make any progress. Tested this with a 100mb fs an xfstests test 13. Before the patch it would hang the box, with the patch we get -ENOSPC like we should. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 32d094002a57..c6a5d9095d5f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3115,6 +3115,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 reserved; u64 max_reclaim; u64 reclaimed = 0; + int no_reclaim = 0; int pause = 1; int ret; @@ -3131,12 +3132,16 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, while (1) { ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0); if (!ret) { + if (no_reclaim > 2) + break; + no_reclaim++; __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(pause); pause <<= 1; if (pause > HZ / 10) pause = HZ / 10; } else { + no_reclaim = 0; pause = 1; } -- cgit v1.2.2 From bf5fc093c5b625e4259203f1cee7ca73488a5620 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 29 Sep 2010 11:22:36 -0400 Subject: Btrfs: fix the df ioctl to report raid types The new ENOSPC stuff broke the df ioctl since we no longer create seperate space info's for each RAID type. So instead, loop through each space info's raid lists so we can get the right RAID information which will allow the df ioctl to tell us RAID types again. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ioctl.c | 100 ++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 76 insertions(+), 24 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 9254b3d58dbe..db0b8fc59235 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1879,6 +1879,22 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) return 0; } +static void get_block_group_info(struct list_head *groups_list, + struct btrfs_ioctl_space_info *space) +{ + struct btrfs_block_group_cache *block_group; + + space->total_bytes = 0; + space->used_bytes = 0; + space->flags = 0; + list_for_each_entry(block_group, groups_list, list) { + space->flags = block_group->flags; + space->total_bytes += block_group->key.offset; + space->used_bytes += + btrfs_block_group_used(&block_group->item); + } +} + long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) { struct btrfs_ioctl_space_args space_args; @@ -1887,27 +1903,56 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) struct btrfs_ioctl_space_info *dest_orig; struct btrfs_ioctl_space_info *user_dest; struct btrfs_space_info *info; + u64 types[] = {BTRFS_BLOCK_GROUP_DATA, + BTRFS_BLOCK_GROUP_SYSTEM, + BTRFS_BLOCK_GROUP_METADATA, + BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; + int num_types = 4; int alloc_size; int ret = 0; int slot_count = 0; + int i, c; if (copy_from_user(&space_args, (struct btrfs_ioctl_space_args __user *)arg, sizeof(space_args))) return -EFAULT; - /* first we count slots */ - rcu_read_lock(); - list_for_each_entry_rcu(info, &root->fs_info->space_info, list) - slot_count++; - rcu_read_unlock(); + for (i = 0; i < num_types; i++) { + struct btrfs_space_info *tmp; + + info = NULL; + rcu_read_lock(); + list_for_each_entry_rcu(tmp, &root->fs_info->space_info, + list) { + if (tmp->flags == types[i]) { + info = tmp; + break; + } + } + rcu_read_unlock(); + + if (!info) + continue; + + down_read(&info->groups_sem); + for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { + if (!list_empty(&info->block_groups[c])) + slot_count++; + } + up_read(&info->groups_sem); + } /* space_slots == 0 means they are asking for a count */ if (space_args.space_slots == 0) { space_args.total_spaces = slot_count; goto out; } + + slot_count = min_t(int, space_args.space_slots, slot_count); + alloc_size = sizeof(*dest) * slot_count; + /* we generally have at most 6 or so space infos, one for each raid * level. So, a whole page should be more than enough for everyone */ @@ -1921,27 +1966,34 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) dest_orig = dest; /* now we have a buffer to copy into */ - rcu_read_lock(); - list_for_each_entry_rcu(info, &root->fs_info->space_info, list) { - /* make sure we don't copy more than we allocated - * in our buffer - */ - if (slot_count == 0) - break; - slot_count--; - - /* make sure userland has enough room in their buffer */ - if (space_args.total_spaces >= space_args.space_slots) - break; + for (i = 0; i < num_types; i++) { + struct btrfs_space_info *tmp; + + info = NULL; + rcu_read_lock(); + list_for_each_entry_rcu(tmp, &root->fs_info->space_info, + list) { + if (tmp->flags == types[i]) { + info = tmp; + break; + } + } + rcu_read_unlock(); - space.flags = info->flags; - space.total_bytes = info->total_bytes; - space.used_bytes = info->bytes_used; - memcpy(dest, &space, sizeof(space)); - dest++; - space_args.total_spaces++; + if (!info) + continue; + down_read(&info->groups_sem); + for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { + if (!list_empty(&info->block_groups[c])) { + get_block_group_info(&info->block_groups[c], + &space); + memcpy(dest, &space, sizeof(space)); + dest++; + space_args.total_spaces++; + } + } + up_read(&info->groups_sem); } - rcu_read_unlock(); user_dest = (struct btrfs_ioctl_space_info *) (arg + sizeof(struct btrfs_ioctl_space_args)); -- cgit v1.2.2 From 89a55897a2fbbceb94480952784004bf23911d38 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 14 Oct 2010 14:52:27 -0400 Subject: Btrfs: fix df regression The new ENOSPC stuff breaks out the raid types which breaks the way we were reporting df to the system. This fixes it back so that Available is the total space available to data and used is the actual bytes used by the filesystem. This means that Available is Total - data used - all of the metadata space. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 5 ++++- fs/btrfs/extent-tree.c | 10 ++++++++++ fs/btrfs/super.c | 11 +++++++++-- 3 files changed, 23 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 29c20092847e..014fd52c01bf 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -675,7 +675,8 @@ struct btrfs_block_group_item { struct btrfs_space_info { u64 flags; - u64 total_bytes; /* total bytes in the space */ + u64 total_bytes; /* total bytes in the space, + this doesn't take mirrors into account */ u64 bytes_used; /* total bytes used, this does't take mirrors into account */ u64 bytes_pinned; /* total bytes pinned, will be freed when the @@ -687,6 +688,8 @@ struct btrfs_space_info { u64 bytes_may_use; /* number of bytes that may be used for delalloc/allocations */ u64 disk_used; /* total bytes used on disk */ + u64 disk_total; /* total bytes on disk, takes mirrors into + account */ int full; /* indicates that we cannot allocate any more chunks for this space */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c6a5d9095d5f..4669c6f8a44d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2763,6 +2763,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, if (found) { spin_lock(&found->lock); found->total_bytes += total_bytes; + found->disk_total += total_bytes * factor; found->bytes_used += bytes_used; found->disk_used += bytes_used * factor; found->full = 0; @@ -2782,6 +2783,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA); found->total_bytes = total_bytes; + found->disk_total = total_bytes * factor; found->bytes_used = bytes_used; found->disk_used = bytes_used * factor; found->bytes_pinned = 0; @@ -8095,6 +8097,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_free_cluster *cluster; struct btrfs_key key; int ret; + int factor; root = root->fs_info->extent_root; @@ -8103,6 +8106,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, BUG_ON(!block_group->ro); memcpy(&key, &block_group->key, sizeof(key)); + if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | + BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_RAID10)) + factor = 2; + else + factor = 1; /* make sure this block group isn't part of an allocation cluster */ cluster = &root->fs_info->data_alloc_cluster; @@ -8143,6 +8152,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, spin_lock(&block_group->space_info->lock); block_group->space_info->total_bytes -= block_group->key.offset; block_group->space_info->bytes_readonly -= block_group->key.offset; + block_group->space_info->disk_total -= block_group->key.offset * factor; spin_unlock(&block_group->space_info->lock); btrfs_clear_space_info_full(root->fs_info); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f2393b390318..afab6ca14d03 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -716,18 +716,25 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) struct list_head *head = &root->fs_info->space_info; struct btrfs_space_info *found; u64 total_used = 0; + u64 total_used_data = 0; int bits = dentry->d_sb->s_blocksize_bits; __be32 *fsid = (__be32 *)root->fs_info->fsid; rcu_read_lock(); - list_for_each_entry_rcu(found, head, list) + list_for_each_entry_rcu(found, head, list) { + if (found->flags & (BTRFS_BLOCK_GROUP_METADATA | + BTRFS_BLOCK_GROUP_SYSTEM)) + total_used_data += found->disk_total; + else + total_used_data += found->disk_used; total_used += found->disk_used; + } rcu_read_unlock(); buf->f_namelen = BTRFS_NAME_LEN; buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; buf->f_bfree = buf->f_blocks - (total_used >> bits); - buf->f_bavail = buf->f_bfree; + buf->f_bavail = buf->f_blocks - (total_used_data >> bits); buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_type = BTRFS_SUPER_MAGIC; -- cgit v1.2.2 From 6d48755d02b150de7f47e7b4753202f2fc9f990f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 15 Oct 2010 15:13:32 -0400 Subject: Btrfs: fix reservation code for mixed block groups The global reservation stuff tries to add together DATA and METADATA used in order to figure out how much to reserve for everything, but this doesn't work right for mixed block groups. Instead if we have mixed block groups just set data used to 0. Also with mixed block groups we will use bytes_may_use for keeping track of delalloc bytes, so we need to take that into account in our reservation calculations. Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4669c6f8a44d..0f27f7b48804 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3213,7 +3213,8 @@ static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv, spin_lock(&space_info->lock); unused = space_info->bytes_used + space_info->bytes_reserved + - space_info->bytes_pinned + space_info->bytes_readonly; + space_info->bytes_pinned + space_info->bytes_readonly + + space_info->bytes_may_use; if (unused < space_info->total_bytes) unused = space_info->total_bytes - unused; @@ -3507,6 +3508,8 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); spin_lock(&sinfo->lock); + if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) + data_used = 0; meta_used = sinfo->bytes_used; spin_unlock(&sinfo->lock); @@ -3534,7 +3537,8 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) block_rsv->size = num_bytes; num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + - sinfo->bytes_reserved + sinfo->bytes_readonly; + sinfo->bytes_reserved + sinfo->bytes_readonly + + sinfo->bytes_may_use; if (sinfo->total_bytes > num_bytes) { num_bytes = sinfo->total_bytes - num_bytes; -- cgit v1.2.2 From 0019f10db6f596f3e14a19f9bd7059a1b85b0853 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 15 Oct 2010 15:18:40 -0400 Subject: Btrfs: re-work delalloc flushing Currently we try and flush delalloc, but we only do that in a sort of weak way, which works fine in most cases but if we're under heavy pressure we need to be able to wait for flushing to happen. Also instead of checking the bytes reserved in the block_rsv, check the space info since it is more accurate. The sync option will be used in a future patch. Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 3 ++- fs/btrfs/extent-tree.c | 26 ++++++++++++++------------ fs/btrfs/inode.c | 24 ++++++++++++++++++++++-- 3 files changed, 38 insertions(+), 15 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 014fd52c01bf..f32404db2c5d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2376,7 +2376,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u32 min_type); int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); -int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput); +int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, + int sync); int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, struct extent_state **cached_state); int btrfs_writepages(struct address_space *mapping, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0f27f7b48804..2846cebc9427 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3111,9 +3111,10 @@ static int maybe_allocate_chunk(struct btrfs_trans_handle *trans, * shrink metadata reservation for delalloc */ static int shrink_delalloc(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 to_reclaim) + struct btrfs_root *root, u64 to_reclaim, int sync) { struct btrfs_block_rsv *block_rsv; + struct btrfs_space_info *space_info; u64 reserved; u64 max_reclaim; u64 reclaimed = 0; @@ -3122,9 +3123,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, int ret; block_rsv = &root->fs_info->delalloc_block_rsv; - spin_lock(&block_rsv->lock); - reserved = block_rsv->reserved; - spin_unlock(&block_rsv->lock); + space_info = block_rsv->space_info; + spin_lock(&space_info->lock); + reserved = space_info->bytes_reserved; + spin_unlock(&space_info->lock); if (reserved == 0) return 0; @@ -3132,7 +3134,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, max_reclaim = min(reserved, to_reclaim); while (1) { - ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0); + ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0, sync); if (!ret) { if (no_reclaim > 2) break; @@ -3147,11 +3149,11 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, pause = 1; } - spin_lock(&block_rsv->lock); - if (reserved > block_rsv->reserved) - reclaimed = reserved - block_rsv->reserved; - reserved = block_rsv->reserved; - spin_unlock(&block_rsv->lock); + spin_lock(&space_info->lock); + if (reserved > space_info->bytes_reserved) + reclaimed += reserved - space_info->bytes_reserved; + reserved = space_info->bytes_reserved; + spin_unlock(&space_info->lock); if (reserved == 0 || reclaimed >= max_reclaim) break; @@ -3180,7 +3182,7 @@ static int should_retry_reserve(struct btrfs_trans_handle *trans, if (trans && trans->transaction->in_commit) return -ENOSPC; - ret = shrink_delalloc(trans, root, num_bytes); + ret = shrink_delalloc(trans, root, num_bytes, 0); if (ret) return ret; @@ -3729,7 +3731,7 @@ again: block_rsv_add_bytes(block_rsv, to_reserve, 1); if (block_rsv->size > 512 * 1024 * 1024) - shrink_delalloc(NULL, root, to_reserve); + shrink_delalloc(NULL, root, to_reserve, 0); return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1bff92ad4744..5f9e4fc20a73 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6603,7 +6603,8 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) return 0; } -int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput) +int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, + int sync) { struct btrfs_inode *binode; struct inode *inode = NULL; @@ -6625,7 +6626,26 @@ int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput) spin_unlock(&root->fs_info->delalloc_lock); if (inode) { - write_inode_now(inode, 0); + if (sync) { + filemap_write_and_wait(inode->i_mapping); + /* + * We have to do this because compression doesn't + * actually set PG_writeback until it submits the pages + * for IO, which happens in an async thread, so we could + * race and not actually wait for any writeback pages + * because they've not been submitted yet. Technically + * this could still be the case for the ordered stuff + * since the async thread may not have started to do its + * work yet. If this becomes the case then we need to + * figure out a way to make sure that in writepage we + * wait for any async pages to be submitted before + * returning so that fdatawait does what its supposed to + * do. + */ + btrfs_wait_ordered_range(inode, 0, (u64)-1); + } else { + filemap_flush(inode->i_mapping); + } if (delay_iput) btrfs_add_delayed_iput(inode); else -- cgit v1.2.2 From 14ed0ca6e8236f2d264c4a8faec9e3a2b3d04377 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 15 Oct 2010 15:23:48 -0400 Subject: Btrfs: don't allocate chunks as aggressively Because the ENOSPC code over reserves super aggressively we end up allocating chunks way more often than we should. For example with my fs_mark tests on a 2gb fs I can end up reserved 1gb just for metadata, when only 34mb of that is being used. So instead check to see if the amount of space actually used is less than 30% of the total space, and if so don't allocate a chunk, but only if we have at least 256mb of free space to make sure we don't put too much pressure on free space. Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2846cebc9427..aca3314ef8b9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3000,8 +3000,7 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) rcu_read_unlock(); } -static int should_alloc_chunk(struct btrfs_space_info *sinfo, - u64 alloc_bytes) +static int should_alloc_chunk(struct btrfs_space_info *sinfo, u64 alloc_bytes) { u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; @@ -3013,6 +3012,10 @@ static int should_alloc_chunk(struct btrfs_space_info *sinfo, alloc_bytes < div_factor(num_bytes, 8)) return 0; + if (num_bytes > 256 * 1024 * 1024 && + sinfo->bytes_used < div_factor(num_bytes, 3)) + return 0; + return 1; } -- cgit v1.2.2 From 8bb8ab2e93f9c3c9453e13be0f37d344a32a3a6d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 15 Oct 2010 16:52:49 -0400 Subject: Btrfs: rework how we reserve metadata bytes With multi-threaded writes we were getting ENOSPC early because somebody would come in, start flushing delalloc because they couldn't make their reservation, and in the meantime other threads would come in and use the space that was getting freed up, so when the original thread went to check to see if they had space they didn't and they'd return ENOSPC. So instead if we have some free space but not enough for our reservation, take the reservation and then start doing the flushing. The only time we don't take reservations is when we've already overcommitted our space, that way we don't have people who come late to the party way overcommitting ourselves. This also moves all of the retrying and flushing code into reserve_metdata_bytes so it's all uniform. This keeps my fs_mark test from returning -ENOSPC as soon as it starts and actually lets me fill up the disk. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 4 +- fs/btrfs/extent-tree.c | 238 +++++++++++++++++++++++++++---------------------- fs/btrfs/relocation.c | 14 +-- fs/btrfs/transaction.c | 7 +- 4 files changed, 136 insertions(+), 127 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f32404db2c5d..47bc66e34da7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2082,7 +2082,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes); void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes); int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, - int num_items, int *retries); + int num_items); void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, @@ -2103,7 +2103,7 @@ void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info, int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, - u64 num_bytes, int *retries); + u64 num_bytes); int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index aca3314ef8b9..180a50146ddf 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3078,38 +3078,6 @@ out: return ret; } -static int maybe_allocate_chunk(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_space_info *sinfo, u64 num_bytes) -{ - int ret; - int end_trans = 0; - - if (sinfo->full) - return 0; - - spin_lock(&sinfo->lock); - ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024); - spin_unlock(&sinfo->lock); - if (!ret) - return 0; - - if (!trans) { - trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - end_trans = 1; - } - - ret = do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes + 2 * 1024 * 1024, - get_alloc_profile(root, sinfo->flags), 0); - - if (end_trans) - btrfs_end_transaction(trans, root); - - return ret == 1 ? 1 : 0; -} - /* * shrink metadata reservation for delalloc */ @@ -3167,79 +3135,138 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, return reclaimed >= to_reclaim; } -static int should_retry_reserve(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_block_rsv *block_rsv, - u64 num_bytes, int *retries) +/* + * Retries tells us how many times we've called reserve_metadata_bytes. The + * idea is if this is the first call (retries == 0) then we will add to our + * reserved count if we can't make the allocation in order to hold our place + * while we go and try and free up space. That way for retries > 1 we don't try + * and add space, we just check to see if the amount of unused space is >= the + * total space, meaning that our reservation is valid. + * + * However if we don't intend to retry this reservation, pass -1 as retries so + * that it short circuits this logic. + */ +static int reserve_metadata_bytes(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 orig_bytes, int flush) { struct btrfs_space_info *space_info = block_rsv->space_info; - int ret; + u64 unused; + u64 num_bytes = orig_bytes; + int retries = 0; + int ret = 0; + bool reserved = false; - if ((*retries) > 2) - return -ENOSPC; +again: + ret = -ENOSPC; + if (reserved) + num_bytes = 0; - ret = maybe_allocate_chunk(trans, root, space_info, num_bytes); - if (ret) - return 1; + spin_lock(&space_info->lock); + unused = space_info->bytes_used + space_info->bytes_reserved + + space_info->bytes_pinned + space_info->bytes_readonly + + space_info->bytes_may_use; - if (trans && trans->transaction->in_commit) - return -ENOSPC; + /* + * The idea here is that we've not already over-reserved the block group + * then we can go ahead and save our reservation first and then start + * flushing if we need to. Otherwise if we've already overcommitted + * lets start flushing stuff first and then come back and try to make + * our reservation. + */ + if (unused <= space_info->total_bytes) { + unused -= space_info->total_bytes; + if (unused >= num_bytes) { + if (!reserved) + space_info->bytes_reserved += orig_bytes; + ret = 0; + } else { + /* + * Ok set num_bytes to orig_bytes since we aren't + * overocmmitted, this way we only try and reclaim what + * we need. + */ + num_bytes = orig_bytes; + } + } else { + /* + * Ok we're over committed, set num_bytes to the overcommitted + * amount plus the amount of bytes that we need for this + * reservation. + */ + num_bytes = unused - space_info->total_bytes + + (orig_bytes * (retries + 1)); + } - ret = shrink_delalloc(trans, root, num_bytes, 0); - if (ret) - return ret; + /* + * Couldn't make our reservation, save our place so while we're trying + * to reclaim space we can actually use it instead of somebody else + * stealing it from us. + */ + if (ret && !reserved) { + space_info->bytes_reserved += orig_bytes; + reserved = true; + } - spin_lock(&space_info->lock); - if (space_info->bytes_pinned < num_bytes) - ret = 1; spin_unlock(&space_info->lock); - if (ret) - return -ENOSPC; - - (*retries)++; - if (trans) - return -EAGAIN; + if (!ret) + return 0; - trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - ret = btrfs_commit_transaction(trans, root); - BUG_ON(ret); + if (!flush) + goto out; - return 1; -} + /* + * We do synchronous shrinking since we don't actually unreserve + * metadata until after the IO is completed. + */ + ret = shrink_delalloc(trans, root, num_bytes, 1); + if (ret > 0) + return 0; + else if (ret < 0) + goto out; -static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv, - u64 num_bytes) -{ - struct btrfs_space_info *space_info = block_rsv->space_info; - u64 unused; - int ret = -ENOSPC; + /* + * So if we were overcommitted it's possible that somebody else flushed + * out enough space and we simply didn't have enough space to reclaim, + * so go back around and try again. + */ + if (retries < 2) { + retries++; + goto again; + } spin_lock(&space_info->lock); - unused = space_info->bytes_used + space_info->bytes_reserved + - space_info->bytes_pinned + space_info->bytes_readonly + - space_info->bytes_may_use; + /* + * Not enough space to be reclaimed, don't bother committing the + * transaction. + */ + if (space_info->bytes_pinned < orig_bytes) + ret = -ENOSPC; + spin_unlock(&space_info->lock); + if (ret) + goto out; - if (unused < space_info->total_bytes) - unused = space_info->total_bytes - unused; - else - unused = 0; + ret = -EAGAIN; + if (trans) + goto out; - if (unused >= num_bytes) { - if (block_rsv->priority >= 10) { - space_info->bytes_reserved += num_bytes; - ret = 0; - } else { - if ((unused + block_rsv->reserved) * - block_rsv->priority >= - (num_bytes + block_rsv->reserved) * 10) { - space_info->bytes_reserved += num_bytes; - ret = 0; - } - } + + ret = -ENOSPC; + trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + goto out; + ret = btrfs_commit_transaction(trans, root); + if (!ret) + goto again; + +out: + if (reserved) { + spin_lock(&space_info->lock); + space_info->bytes_reserved -= orig_bytes; + spin_unlock(&space_info->lock); } - spin_unlock(&space_info->lock); return ret; } @@ -3383,23 +3410,19 @@ void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info, int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, - u64 num_bytes, int *retries) + u64 num_bytes) { int ret; if (num_bytes == 0) return 0; -again: - ret = reserve_metadata_bytes(block_rsv, num_bytes); + + ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 1); return 0; } - ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries); - if (ret > 0) - goto again; - return ret; } @@ -3434,7 +3457,8 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, return 0; if (block_rsv->refill_used) { - ret = reserve_metadata_bytes(block_rsv, num_bytes); + ret = reserve_metadata_bytes(trans, root, block_rsv, + num_bytes, 0); if (!ret) { block_rsv_add_bytes(block_rsv, num_bytes, 0); return 0; @@ -3614,7 +3638,7 @@ static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, - int num_items, int *retries) + int num_items) { u64 num_bytes; int ret; @@ -3624,7 +3648,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, num_bytes = calc_trans_metadata_size(root, num_items); ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, - num_bytes, retries); + num_bytes); if (!ret) { trans->bytes_reserved += num_bytes; trans->block_rsv = &root->fs_info->trans_block_rsv; @@ -3698,14 +3722,13 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; u64 to_reserve; int nr_extents; - int retries = 0; int ret; if (btrfs_transaction_in_commit(root->fs_info)) schedule_timeout(1); num_bytes = ALIGN(num_bytes, root->sectorsize); -again: + spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; if (nr_extents > BTRFS_I(inode)->reserved_extents) { @@ -3715,18 +3738,14 @@ again: nr_extents = 0; to_reserve = 0; } + spin_unlock(&BTRFS_I(inode)->accounting_lock); to_reserve += calc_csum_metadata_size(inode, num_bytes); - ret = reserve_metadata_bytes(block_rsv, to_reserve); - if (ret) { - spin_unlock(&BTRFS_I(inode)->accounting_lock); - ret = should_retry_reserve(NULL, root, block_rsv, to_reserve, - &retries); - if (ret > 0) - goto again; + ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); + if (ret) return ret; - } + spin_lock(&BTRFS_I(inode)->accounting_lock); BTRFS_I(inode)->reserved_extents += nr_extents; atomic_inc(&BTRFS_I(inode)->outstanding_extents); spin_unlock(&BTRFS_I(inode)->accounting_lock); @@ -5325,7 +5344,8 @@ use_block_rsv(struct btrfs_trans_handle *trans, block_rsv = get_block_rsv(trans, root); if (block_rsv->size == 0) { - ret = reserve_metadata_bytes(block_rsv, blocksize); + ret = reserve_metadata_bytes(trans, root, block_rsv, + blocksize, 0); if (ret) return ERR_PTR(ret); return block_rsv; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index b37d723b9d4a..39adb68a653f 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -178,8 +178,6 @@ struct reloc_control { u64 search_start; u64 extents_found; - int block_rsv_retries; - unsigned int stage:8; unsigned int create_reloc_tree:1; unsigned int merge_reloc_tree:1; @@ -2133,7 +2131,6 @@ int prepare_to_merge(struct reloc_control *rc, int err) LIST_HEAD(reloc_roots); u64 num_bytes = 0; int ret; - int retries = 0; mutex_lock(&root->fs_info->trans_mutex); rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; @@ -2143,7 +2140,7 @@ again: if (!err) { num_bytes = rc->merging_rsv_size; ret = btrfs_block_rsv_add(NULL, root, rc->block_rsv, - num_bytes, &retries); + num_bytes); if (ret) err = ret; } @@ -2155,7 +2152,6 @@ again: btrfs_end_transaction(trans, rc->extent_root); btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes); - retries = 0; goto again; } } @@ -2405,15 +2401,13 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans, num_bytes = calcu_metadata_size(rc, node, 1) * 2; trans->block_rsv = rc->block_rsv; - ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes, - &rc->block_rsv_retries); + ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes); if (ret) { if (ret == -EAGAIN) rc->commit_transaction = 1; return ret; } - rc->block_rsv_retries = 0; return 0; } @@ -3554,8 +3548,7 @@ int prepare_to_relocate(struct reloc_control *rc) * is no reservation in transaction handle. */ ret = btrfs_block_rsv_add(NULL, rc->extent_root, rc->block_rsv, - rc->extent_root->nodesize * 256, - &rc->block_rsv_retries); + rc->extent_root->nodesize * 256); if (ret) return ret; @@ -3567,7 +3560,6 @@ int prepare_to_relocate(struct reloc_control *rc) rc->extents_found = 0; rc->nodes_relocated = 0; rc->merging_rsv_size = 0; - rc->block_rsv_retries = 0; rc->create_reloc_tree = 1; set_reloc_control(rc); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 66e4c66cc63b..abbec80aaa44 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -179,7 +179,6 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, { struct btrfs_trans_handle *h; struct btrfs_transaction *cur_trans; - int retries = 0; int ret; again: h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); @@ -212,8 +211,7 @@ again: } if (num_items > 0) { - ret = btrfs_trans_reserve_metadata(h, root, num_items, - &retries); + ret = btrfs_trans_reserve_metadata(h, root, num_items); if (ret == -EAGAIN) { btrfs_commit_transaction(h, root); goto again; @@ -836,7 +834,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct extent_buffer *tmp; struct extent_buffer *old; int ret; - int retries = 0; u64 to_reserve = 0; u64 index = 0; u64 objectid; @@ -858,7 +855,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, if (to_reserve > 0) { ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv, - to_reserve, &retries); + to_reserve); if (ret) { pending->error = ret; goto fail; -- cgit v1.2.2 From 0e78340f3c1fc603e8016c8ac304766bcc65506e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 22 Oct 2010 15:26:53 -0400 Subject: Btrfs: fix error handling in btrfs_get_sb If we failed to find the root subvol id, or the subvol=, we would deactivate the locked super and close the devices. The problem is at this point we have gotten the SB all setup, which includes setting super_operations, so when we'd deactiveate the super, we'd do a close_ctree() which closes the devices, so we'd end up closing the devices twice. So if you do something like this mount /dev/sda1 /mnt/test1 mount /dev/sda1 /mnt/test2 -o subvol=xxx umount /mnt/test1 it would blow up (if subvol xxx doesn't exist). This patch fixes that problem. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/super.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index afab6ca14d03..d1867cda92a7 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -629,7 +629,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, if (IS_ERR(root)) { error = PTR_ERR(root); deactivate_locked_super(s); - goto error; + goto error_free_subvol_name; } /* if they gave us a subvolume name bind mount into that */ if (strcmp(subvol_name, ".")) { @@ -643,14 +643,14 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, deactivate_locked_super(s); error = PTR_ERR(new_root); dput(root); - goto error_close_devices; + goto error_free_subvol_name; } if (!new_root->d_inode) { dput(root); dput(new_root); deactivate_locked_super(s); error = -ENXIO; - goto error_close_devices; + goto error_free_subvol_name; } dput(root); root = new_root; @@ -668,7 +668,6 @@ error_close_devices: btrfs_close_devices(fs_devices); error_free_subvol_name: kfree(subvol_name); -error: return error; } -- cgit v1.2.2 From 1d3382cbf02986e4833849f528d451367ea0b4cb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 23 Oct 2010 15:19:20 -0400 Subject: new helper: inode_unhashed() note: for race-free uses you inode_lock held Signed-off-by: Al Viro --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c03864406af3..f6f2a0da2695 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3849,7 +3849,7 @@ again: p = &root->inode_tree.rb_node; parent = NULL; - if (hlist_unhashed(&inode->i_hash)) + if (inode_unhashed(inode)) return; spin_lock(&root->inode_lock); -- cgit v1.2.2 From 7de9c6ee3ecffd99e1628e81a5ea5468f7581a1f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 23 Oct 2010 11:11:40 -0400 Subject: new helper: ihold() Clones an existing reference to inode; caller must already hold one. Signed-off-by: Al Viro --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f6f2a0da2695..64f99cf69ce0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4758,7 +4758,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, } btrfs_set_trans_block_group(trans, dir); - atomic_inc(&inode->i_count); + ihold(inode); err = btrfs_add_nondir(trans, dentry, inode, 1, index); -- cgit v1.2.2 From 382279336f428c80f344edfc30d53797e3e76146 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 26 Oct 2010 12:52:53 -0400 Subject: Btrfs: set trans to null in reserve_metadata_bytes if we commit the transaction btrfs_commit_transaction will free our trans, but because we pass trans to shrink_delalloc we could possibly have a use after free situation. So instead if we commit the transaction, set trans to null and set committed to true so we don't keep trying to commit a transaction. This fixes a panic I could reproduce at will. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 180a50146ddf..e2dfd4ab3b9b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3157,6 +3157,7 @@ static int reserve_metadata_bytes(struct btrfs_trans_handle *trans, int retries = 0; int ret = 0; bool reserved = false; + bool committed = false; again: ret = -ENOSPC; @@ -3249,17 +3250,19 @@ again: goto out; ret = -EAGAIN; - if (trans) + if (trans || committed) goto out; - ret = -ENOSPC; trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) goto out; ret = btrfs_commit_transaction(trans, root); - if (!ret) + if (!ret) { + trans = NULL; + committed = true; goto again; + } out: if (reserved) { -- cgit v1.2.2 From e9bb7f10d3617304ef94ff7aa8fefbce3078f08b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 26 Oct 2010 12:55:03 -0400 Subject: Btrfs: remove warn_on from use_block_rsv Because btrfs_dirty_inode does a btrfs_join_transaction, it doesn't actually reserve space. It does this so we can try and dirty the inode quickly without having to deal with the ENOSPC problems. But if it does get back ENOSPC it handles it properly. The problem is use_block_rsv does a WARN_ON whenever this case happens, even tho btrfs_dirty_inode takes it into account and actually expects to get -ENOSPC if things are particularly tight. So instead just remove the warning. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e2dfd4ab3b9b..33785333f293 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5358,11 +5358,6 @@ use_block_rsv(struct btrfs_trans_handle *trans, if (!ret) return block_rsv; - WARN_ON(1); - printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", - block_rsv->size, block_rsv->reserved, - block_rsv->freed[0], block_rsv->freed[1]); - return ERR_PTR(-ENOSPC); } -- cgit v1.2.2 From 0af3d00bad38d3bb9912a60928ad0669f17bdb76 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 21 Jun 2010 14:48:16 -0400 Subject: Btrfs: create special free space cache inode In order to save free space cache, we need an inode to hold the data, and we need a special item to point at the right inode for the right block group. So first, create a special item that will point to the right inode, and the number of extent entries we will have and the number of bitmaps we will have. We truncate and pre-allocate space everytime to make sure it's uptodate. This feature will be turned on as soon as you mount with -o space_cache, however it is safe to boot into old kernels, they will just generate the cache the old fashion way. When you boot back into a newer kernel we will notice that we modified and not the cache and automatically discard the cache. Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 74 ++++++++++++-- fs/btrfs/disk-io.c | 3 +- fs/btrfs/extent-tree.c | 231 ++++++++++++++++++++++++++++++++++++++++++-- fs/btrfs/free-space-cache.c | 155 +++++++++++++++++++++++++++++ fs/btrfs/free-space-cache.h | 11 +++ fs/btrfs/inode.c | 95 ++++++++++++++---- fs/btrfs/relocation.c | 91 ++++++++++++++++- fs/btrfs/super.c | 7 +- fs/btrfs/transaction.c | 43 ++++++--- fs/btrfs/transaction.h | 4 + 10 files changed, 668 insertions(+), 46 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index eaf286abad17..46f52e1beade 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -99,6 +99,9 @@ struct btrfs_ordered_sum; */ #define BTRFS_EXTENT_CSUM_OBJECTID -10ULL +/* For storing free space cache */ +#define BTRFS_FREE_SPACE_OBJECTID -11ULL + /* dummy objectid represents multiple objectids */ #define BTRFS_MULTIPLE_OBJECTIDS -255ULL @@ -265,6 +268,22 @@ struct btrfs_chunk { /* additional stripes go here */ } __attribute__ ((__packed__)); +#define BTRFS_FREE_SPACE_EXTENT 1 +#define BTRFS_FREE_SPACE_BITMAP 2 + +struct btrfs_free_space_entry { + __le64 offset; + __le64 bytes; + u8 type; +} __attribute__ ((__packed__)); + +struct btrfs_free_space_header { + struct btrfs_disk_key location; + __le64 generation; + __le64 num_entries; + __le64 num_bitmaps; +} __attribute__ ((__packed__)); + static inline unsigned long btrfs_chunk_item_size(int num_stripes) { BUG_ON(num_stripes == 0); @@ -365,8 +384,10 @@ struct btrfs_super_block { char label[BTRFS_LABEL_SIZE]; + __le64 cache_generation; + /* future expansion */ - __le64 reserved[32]; + __le64 reserved[31]; u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; } __attribute__ ((__packed__)); @@ -375,12 +396,12 @@ struct btrfs_super_block { * ones specified below then we will fail to mount */ #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) -#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (2ULL << 0) +#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) #define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL -#define BTRFS_FEATURE_INCOMPAT_SUPP \ - (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ +#define BTRFS_FEATURE_INCOMPAT_SUPP \ + (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL) /* @@ -750,6 +771,14 @@ enum btrfs_caching_type { BTRFS_CACHE_FINISHED = 2, }; +enum btrfs_disk_cache_state { + BTRFS_DC_WRITTEN = 0, + BTRFS_DC_ERROR = 1, + BTRFS_DC_CLEAR = 2, + BTRFS_DC_SETUP = 3, + BTRFS_DC_NEED_WRITE = 4, +}; + struct btrfs_caching_control { struct list_head list; struct mutex mutex; @@ -763,6 +792,7 @@ struct btrfs_block_group_cache { struct btrfs_key key; struct btrfs_block_group_item item; struct btrfs_fs_info *fs_info; + struct inode *inode; spinlock_t lock; u64 pinned; u64 reserved; @@ -773,8 +803,11 @@ struct btrfs_block_group_cache { int extents_thresh; int free_extents; int total_bitmaps; - int ro; - int dirty; + int ro:1; + int dirty:1; + int iref:1; + + int disk_cache_state; /* cache tracking stuff */ int cached; @@ -1192,6 +1225,7 @@ struct btrfs_root { #define BTRFS_MOUNT_NOSSD (1 << 9) #define BTRFS_MOUNT_DISCARD (1 << 10) #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) +#define BTRFS_MOUNT_SPACE_CACHE (1 << 12) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -1665,6 +1699,27 @@ static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, write_eb_member(eb, item, struct btrfs_dir_item, location, key); } +BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, + num_entries, 64); +BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, + num_bitmaps, 64); +BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, + generation, 64); + +static inline void btrfs_free_space_key(struct extent_buffer *eb, + struct btrfs_free_space_header *h, + struct btrfs_disk_key *key) +{ + read_eb_member(eb, h, struct btrfs_free_space_header, location, key); +} + +static inline void btrfs_set_free_space_key(struct extent_buffer *eb, + struct btrfs_free_space_header *h, + struct btrfs_disk_key *key) +{ + write_eb_member(eb, h, struct btrfs_free_space_header, location, key); +} + /* struct btrfs_disk_key */ BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, objectid, 64); @@ -1876,6 +1931,8 @@ BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, incompat_flags, 64); BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, csum_type, 16); +BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, + cache_generation, 64); static inline int btrfs_super_csum_size(struct btrfs_super_block *s) { @@ -2115,6 +2172,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, struct btrfs_block_group_cache *cache); int btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache); +void btrfs_put_block_group_cache(struct btrfs_fs_info *info); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); @@ -2426,6 +2484,10 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root); int btrfs_prealloc_file_range(struct inode *inode, int mode, u64 start, u64 num_bytes, u64 min_size, loff_t actual_len, u64 *alloc_hint); +int btrfs_prealloc_file_range_trans(struct inode *inode, + struct btrfs_trans_handle *trans, int mode, + u64 start, u64 num_bytes, u64 min_size, + loff_t actual_len, u64 *alloc_hint); extern const struct dentry_operations btrfs_dentry_operations; /* ioctl.c */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 64f10082f048..45cf64fc1e3e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1685,7 +1685,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, __setup_root(4096, 4096, 4096, 4096, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); - bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) goto fail_iput; @@ -1993,6 +1992,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!(sb->s_flags & MS_RDONLY)) { down_read(&fs_info->cleanup_work_sem); btrfs_orphan_cleanup(fs_info->fs_root); + btrfs_orphan_cleanup(fs_info->tree_root); up_read(&fs_info->cleanup_work_sem); } @@ -2421,6 +2421,7 @@ int close_ctree(struct btrfs_root *root) fs_info->closing = 1; smp_mb(); + btrfs_put_block_group_cache(fs_info); if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); if (ret) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 32d094002a57..aab40fb3faed 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2688,6 +2688,109 @@ next_block_group(struct btrfs_root *root, return cache; } +static int cache_save_setup(struct btrfs_block_group_cache *block_group, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + struct btrfs_root *root = block_group->fs_info->tree_root; + struct inode *inode = NULL; + u64 alloc_hint = 0; + int num_pages = 0; + int retries = 0; + int ret = 0; + + /* + * If this block group is smaller than 100 megs don't bother caching the + * block group. + */ + if (block_group->key.offset < (100 * 1024 * 1024)) { + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_WRITTEN; + spin_unlock(&block_group->lock); + return 0; + } + +again: + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { + ret = PTR_ERR(inode); + btrfs_release_path(root, path); + goto out; + } + + if (IS_ERR(inode)) { + BUG_ON(retries); + retries++; + + if (block_group->ro) + goto out_free; + + ret = create_free_space_inode(root, trans, block_group, path); + if (ret) + goto out_free; + goto again; + } + + /* + * We want to set the generation to 0, that way if anything goes wrong + * from here on out we know not to trust this cache when we load up next + * time. + */ + BTRFS_I(inode)->generation = 0; + ret = btrfs_update_inode(trans, root, inode); + WARN_ON(ret); + + if (i_size_read(inode) > 0) { + ret = btrfs_truncate_free_space_cache(root, trans, path, + inode); + if (ret) + goto out_put; + } + + spin_lock(&block_group->lock); + if (block_group->cached != BTRFS_CACHE_FINISHED) { + spin_unlock(&block_group->lock); + goto out_put; + } + spin_unlock(&block_group->lock); + + num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); + if (!num_pages) + num_pages = 1; + + /* + * Just to make absolutely sure we have enough space, we're going to + * preallocate 12 pages worth of space for each block group. In + * practice we ought to use at most 8, but we need extra space so we can + * add our header and have a terminator between the extents and the + * bitmaps. + */ + num_pages *= 16; + num_pages *= PAGE_CACHE_SIZE; + + ret = btrfs_check_data_free_space(inode, num_pages); + if (ret) + goto out_put; + + ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, + num_pages, num_pages, + &alloc_hint); + btrfs_free_reserved_data_space(inode, num_pages); +out_put: + iput(inode); +out_free: + btrfs_release_path(root, path); +out: + spin_lock(&block_group->lock); + if (ret) + block_group->disk_cache_state = BTRFS_DC_ERROR; + else + block_group->disk_cache_state = BTRFS_DC_SETUP; + spin_unlock(&block_group->lock); + + return ret; +} + int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -2700,6 +2803,25 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; +again: + while (1) { + cache = btrfs_lookup_first_block_group(root->fs_info, last); + while (cache) { + if (cache->disk_cache_state == BTRFS_DC_CLEAR) + break; + cache = next_block_group(root, cache); + } + if (!cache) { + if (last == 0) + break; + last = 0; + continue; + } + err = cache_save_setup(cache, trans, path); + last = cache->key.objectid + cache->key.offset; + btrfs_put_block_group(cache); + } + while (1) { if (last == 0) { err = btrfs_run_delayed_refs(trans, root, @@ -2709,6 +2831,11 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, cache = btrfs_lookup_first_block_group(root->fs_info, last); while (cache) { + if (cache->disk_cache_state == BTRFS_DC_CLEAR) { + btrfs_put_block_group(cache); + goto again; + } + if (cache->dirty) break; cache = next_block_group(root, cache); @@ -2883,11 +3010,16 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) struct btrfs_space_info *data_sinfo; struct btrfs_root *root = BTRFS_I(inode)->root; u64 used; - int ret = 0, committed = 0; + int ret = 0, committed = 0, alloc_chunk = 1; /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); + if (root == root->fs_info->tree_root) { + alloc_chunk = 0; + committed = 1; + } + data_sinfo = BTRFS_I(inode)->space_info; if (!data_sinfo) goto alloc; @@ -2906,7 +3038,7 @@ again: * if we don't have enough free bytes in this space then we need * to alloc a new chunk. */ - if (!data_sinfo->full) { + if (!data_sinfo->full && alloc_chunk) { u64 alloc_target; data_sinfo->force_alloc = 1; @@ -3777,12 +3909,12 @@ static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int alloc) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group_cache *cache = NULL; struct btrfs_fs_info *info = root->fs_info; - int factor; u64 total = num_bytes; u64 old_val; u64 byte_in_group; + int factor; /* block accounting for super block */ spin_lock(&info->delalloc_lock); @@ -3804,11 +3936,17 @@ static int update_block_group(struct btrfs_trans_handle *trans, factor = 2; else factor = 1; + byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); + + if (btrfs_super_cache_generation(&info->super_copy) != 0 && + cache->disk_cache_state < BTRFS_DC_CLEAR) + cache->disk_cache_state = BTRFS_DC_CLEAR; + cache->dirty = 1; old_val = btrfs_block_group_used(&cache->item); num_bytes = min(total, cache->key.offset - byte_in_group); @@ -7814,6 +7952,40 @@ out: return ret; } +void btrfs_put_block_group_cache(struct btrfs_fs_info *info) +{ + struct btrfs_block_group_cache *block_group; + u64 last = 0; + + while (1) { + struct inode *inode; + + block_group = btrfs_lookup_first_block_group(info, last); + while (block_group) { + spin_lock(&block_group->lock); + if (block_group->iref) + break; + spin_unlock(&block_group->lock); + block_group = next_block_group(info->tree_root, + block_group); + } + if (!block_group) { + if (last == 0) + break; + last = 0; + continue; + } + + inode = block_group->inode; + block_group->iref = 0; + block_group->inode = NULL; + spin_unlock(&block_group->lock); + iput(inode); + last = block_group->key.objectid + block_group->key.offset; + btrfs_put_block_group(block_group); + } +} + int btrfs_free_block_groups(struct btrfs_fs_info *info) { struct btrfs_block_group_cache *block_group; @@ -7897,6 +8069,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf; + int need_clear = 0; + u64 cache_gen; root = info->extent_root; key.objectid = 0; @@ -7906,6 +8080,11 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (!path) return -ENOMEM; + cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); + if (cache_gen != 0 && + btrfs_super_generation(&root->fs_info->super_copy) != cache_gen) + need_clear = 1; + while (1) { ret = find_first_block_group(root, path, &key); if (ret > 0) @@ -7928,6 +8107,9 @@ int btrfs_read_block_groups(struct btrfs_root *root) INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); + if (need_clear) + cache->disk_cache_state = BTRFS_DC_CLEAR; + /* * we only want to have 32k of ram per block group for keeping * track of free space, and if we pass 1/2 of that we want to @@ -8032,6 +8214,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->key.offset = size; cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; cache->sectorsize = root->sectorsize; + cache->fs_info = root->fs_info; /* * we only want to have 32k of ram per block group for keeping track @@ -8088,7 +8271,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_path *path; struct btrfs_block_group_cache *block_group; struct btrfs_free_cluster *cluster; + struct btrfs_root *tree_root = root->fs_info->tree_root; struct btrfs_key key; + struct inode *inode; int ret; root = root->fs_info->extent_root; @@ -8097,8 +8282,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, BUG_ON(!block_group); BUG_ON(!block_group->ro); - memcpy(&key, &block_group->key, sizeof(key)); - /* make sure this block group isn't part of an allocation cluster */ cluster = &root->fs_info->data_alloc_cluster; spin_lock(&cluster->refill_lock); @@ -8117,6 +8300,40 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); BUG_ON(!path); + inode = lookup_free_space_inode(root, block_group, path); + if (!IS_ERR(inode)) { + btrfs_orphan_add(trans, inode); + clear_nlink(inode); + /* One for the block groups ref */ + spin_lock(&block_group->lock); + if (block_group->iref) { + block_group->iref = 0; + block_group->inode = NULL; + spin_unlock(&block_group->lock); + iput(inode); + } else { + spin_unlock(&block_group->lock); + } + /* One for our lookup ref */ + iput(inode); + } + + key.objectid = BTRFS_FREE_SPACE_OBJECTID; + key.offset = block_group->key.objectid; + key.type = 0; + + ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); + if (ret < 0) + goto out; + if (ret > 0) + btrfs_release_path(tree_root, path); + if (ret == 0) { + ret = btrfs_del_item(trans, tree_root, path); + if (ret) + goto out; + btrfs_release_path(tree_root, path); + } + spin_lock(&root->fs_info->block_group_cache_lock); rb_erase(&block_group->cache_node, &root->fs_info->block_group_cache_tree); @@ -8140,6 +8357,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, block_group->space_info->bytes_readonly -= block_group->key.offset; spin_unlock(&block_group->space_info->lock); + memcpy(&key, &block_group->key, sizeof(key)); + btrfs_clear_space_info_full(root->fs_info); btrfs_put_block_group(block_group); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f488fac04d99..05efcc7061a7 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -23,10 +23,165 @@ #include "ctree.h" #include "free-space-cache.h" #include "transaction.h" +#include "disk-io.h" #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) +struct inode *lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_block_group_cache + *block_group, struct btrfs_path *path) +{ + struct btrfs_key key; + struct btrfs_key location; + struct btrfs_disk_key disk_key; + struct btrfs_free_space_header *header; + struct extent_buffer *leaf; + struct inode *inode = NULL; + int ret; + + spin_lock(&block_group->lock); + if (block_group->inode) + inode = igrab(block_group->inode); + spin_unlock(&block_group->lock); + if (inode) + return inode; + + key.objectid = BTRFS_FREE_SPACE_OBJECTID; + key.offset = block_group->key.objectid; + key.type = 0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + return ERR_PTR(ret); + if (ret > 0) { + btrfs_release_path(root, path); + return ERR_PTR(-ENOENT); + } + + leaf = path->nodes[0]; + header = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_free_space_header); + btrfs_free_space_key(leaf, header, &disk_key); + btrfs_disk_key_to_cpu(&location, &disk_key); + btrfs_release_path(root, path); + + inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); + if (!inode) + return ERR_PTR(-ENOENT); + if (IS_ERR(inode)) + return inode; + if (is_bad_inode(inode)) { + iput(inode); + return ERR_PTR(-ENOENT); + } + + spin_lock(&block_group->lock); + if (!root->fs_info->closing) { + block_group->inode = igrab(inode); + block_group->iref = 1; + } + spin_unlock(&block_group->lock); + + return inode; +} + +int create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + struct btrfs_key key; + struct btrfs_disk_key disk_key; + struct btrfs_free_space_header *header; + struct btrfs_inode_item *inode_item; + struct extent_buffer *leaf; + u64 objectid; + int ret; + + ret = btrfs_find_free_objectid(trans, root, 0, &objectid); + if (ret < 0) + return ret; + + ret = btrfs_insert_empty_inode(trans, root, path, objectid); + if (ret) + return ret; + + leaf = path->nodes[0]; + inode_item = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_inode_item); + btrfs_item_key(leaf, &disk_key, path->slots[0]); + memset_extent_buffer(leaf, 0, (unsigned long)inode_item, + sizeof(*inode_item)); + btrfs_set_inode_generation(leaf, inode_item, trans->transid); + btrfs_set_inode_size(leaf, inode_item, 0); + btrfs_set_inode_nbytes(leaf, inode_item, 0); + btrfs_set_inode_uid(leaf, inode_item, 0); + btrfs_set_inode_gid(leaf, inode_item, 0); + btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); + btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS | + BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); + btrfs_set_inode_nlink(leaf, inode_item, 1); + btrfs_set_inode_transid(leaf, inode_item, trans->transid); + btrfs_set_inode_block_group(leaf, inode_item, + block_group->key.objectid); + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + + key.objectid = BTRFS_FREE_SPACE_OBJECTID; + key.offset = block_group->key.objectid; + key.type = 0; + + ret = btrfs_insert_empty_item(trans, root, path, &key, + sizeof(struct btrfs_free_space_header)); + if (ret < 0) { + btrfs_release_path(root, path); + return ret; + } + leaf = path->nodes[0]; + header = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_free_space_header); + memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); + btrfs_set_free_space_key(leaf, header, &disk_key); + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + + return 0; +} + +int btrfs_truncate_free_space_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct inode *inode) +{ + loff_t oldsize; + int ret = 0; + + trans->block_rsv = root->orphan_block_rsv; + ret = btrfs_block_rsv_check(trans, root, + root->orphan_block_rsv, + 0, 5); + if (ret) + return ret; + + oldsize = i_size_read(inode); + btrfs_i_size_write(inode, 0); + truncate_pagecache(inode, oldsize, 0); + + /* + * We don't need an orphan item because truncating the free space cache + * will never be split across transactions. + */ + ret = btrfs_truncate_inode_items(trans, root, inode, + 0, BTRFS_EXTENT_DATA_KEY); + if (ret) { + WARN_ON(1); + return ret; + } + + return btrfs_update_inode(trans, root, inode); +} + static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, u64 offset) { diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 890a8e79011b..45be29e5f01e 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -27,6 +27,17 @@ struct btrfs_free_space { struct list_head list; }; +struct inode *lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_block_group_cache + *block_group, struct btrfs_path *path); +int create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path); +int btrfs_truncate_free_space_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct inode *inode); int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c03864406af3..1af1ea88e8a8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1700,6 +1700,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->len); BUG_ON(ret); } else { + BUG_ON(root == root->fs_info->tree_root); ret = insert_reserved_file_extent(trans, inode, ordered_extent->file_offset, ordered_extent->start, @@ -3196,7 +3197,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); - if (root->ref_cows) + if (root->ref_cows || root == root->fs_info->tree_root) btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); path = btrfs_alloc_path(); @@ -3344,7 +3345,8 @@ delete: } else { break; } - if (found_extent && root->ref_cows) { + if (found_extent && (root->ref_cows || + root == root->fs_info->tree_root)) { btrfs_set_path_blocking(path); ret = btrfs_free_extent(trans, root, extent_start, extent_num_bytes, 0, @@ -3675,7 +3677,8 @@ void btrfs_evict_inode(struct inode *inode) int ret; truncate_inode_pages(&inode->i_data, 0); - if (inode->i_nlink && btrfs_root_refs(&root->root_item) != 0) + if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || + root == root->fs_info->tree_root)) goto no_delete; if (is_bad_inode(inode)) { @@ -3888,7 +3891,14 @@ static void inode_tree_del(struct inode *inode) } spin_unlock(&root->inode_lock); - if (empty && btrfs_root_refs(&root->root_item) == 0) { + /* + * Free space cache has inodes in the tree root, but the tree root has a + * root_refs of 0, so this could end up dropping the tree root as a + * snapshot, so we need the extra !root->fs_info->tree_root check to + * make sure we don't drop it. + */ + if (empty && btrfs_root_refs(&root->root_item) == 0 && + root != root->fs_info->tree_root) { synchronize_srcu(&root->fs_info->subvol_srcu); spin_lock(&root->inode_lock); empty = RB_EMPTY_ROOT(&root->inode_tree); @@ -4282,14 +4292,24 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; int ret = 0; + bool nolock = false; if (BTRFS_I(inode)->dummy_inode) return 0; + smp_mb(); + nolock = (root->fs_info->closing && root == root->fs_info->tree_root); + if (wbc->sync_mode == WB_SYNC_ALL) { - trans = btrfs_join_transaction(root, 1); + if (nolock) + trans = btrfs_join_transaction_nolock(root, 1); + else + trans = btrfs_join_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); - ret = btrfs_commit_transaction(trans, root); + if (nolock) + ret = btrfs_end_transaction_nolock(trans, root); + else + ret = btrfs_commit_transaction(trans, root); } return ret; } @@ -6308,6 +6328,21 @@ void btrfs_destroy_inode(struct inode *inode) spin_unlock(&root->fs_info->ordered_extent_lock); } + if (root == root->fs_info->tree_root) { + struct btrfs_block_group_cache *block_group; + + block_group = btrfs_lookup_block_group(root->fs_info, + BTRFS_I(inode)->block_group); + if (block_group && block_group->inode == inode) { + spin_lock(&block_group->lock); + block_group->inode = NULL; + spin_unlock(&block_group->lock); + btrfs_put_block_group(block_group); + } else if (block_group) { + btrfs_put_block_group(block_group); + } + } + spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", @@ -6340,7 +6375,8 @@ int btrfs_drop_inode(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - if (btrfs_root_refs(&root->root_item) == 0) + if (btrfs_root_refs(&root->root_item) == 0 && + root != root->fs_info->tree_root) return 1; else return generic_drop_inode(inode); @@ -6757,27 +6793,33 @@ out_unlock: return err; } -int btrfs_prealloc_file_range(struct inode *inode, int mode, - u64 start, u64 num_bytes, u64 min_size, - loff_t actual_len, u64 *alloc_hint) +static int __btrfs_prealloc_file_range(struct inode *inode, int mode, + u64 start, u64 num_bytes, u64 min_size, + loff_t actual_len, u64 *alloc_hint, + struct btrfs_trans_handle *trans) { - struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key ins; u64 cur_offset = start; int ret = 0; + bool own_trans = true; + if (trans) + own_trans = false; while (num_bytes > 0) { - trans = btrfs_start_transaction(root, 3); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - break; + if (own_trans) { + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + break; + } } ret = btrfs_reserve_extent(trans, root, num_bytes, min_size, 0, *alloc_hint, (u64)-1, &ins, 1); if (ret) { - btrfs_end_transaction(trans, root); + if (own_trans) + btrfs_end_transaction(trans, root); break; } @@ -6810,11 +6852,30 @@ int btrfs_prealloc_file_range(struct inode *inode, int mode, ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); - btrfs_end_transaction(trans, root); + if (own_trans) + btrfs_end_transaction(trans, root); } return ret; } +int btrfs_prealloc_file_range(struct inode *inode, int mode, + u64 start, u64 num_bytes, u64 min_size, + loff_t actual_len, u64 *alloc_hint) +{ + return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, + min_size, actual_len, alloc_hint, + NULL); +} + +int btrfs_prealloc_file_range_trans(struct inode *inode, + struct btrfs_trans_handle *trans, int mode, + u64 start, u64 num_bytes, u64 min_size, + loff_t actual_len, u64 *alloc_hint) +{ + return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, + min_size, actual_len, alloc_hint, trans); +} + static long btrfs_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) { diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index b37d723b9d4a..af339eee55b8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -29,6 +29,7 @@ #include "locking.h" #include "btrfs_inode.h" #include "async-thread.h" +#include "free-space-cache.h" /* * backref_node, mapping_node and tree_block start with this @@ -3191,6 +3192,54 @@ static int block_use_full_backref(struct reloc_control *rc, return ret; } +static int delete_block_group_cache(struct btrfs_fs_info *fs_info, + struct inode *inode, u64 ino) +{ + struct btrfs_key key; + struct btrfs_path *path; + struct btrfs_root *root = fs_info->tree_root; + struct btrfs_trans_handle *trans; + unsigned long nr; + int ret = 0; + + if (inode) + goto truncate; + + key.objectid = ino; + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; + + inode = btrfs_iget(fs_info->sb, &key, root, NULL); + if (!inode || IS_ERR(inode) || is_bad_inode(inode)) { + if (inode && !IS_ERR(inode)) + iput(inode); + return -ENOENT; + } + +truncate: + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto out; + } + + trans = btrfs_join_transaction(root, 0); + if (IS_ERR(trans)) { + btrfs_free_path(path); + goto out; + } + + ret = btrfs_truncate_free_space_cache(root, trans, path, inode); + + btrfs_free_path(path); + nr = trans->blocks_used; + btrfs_end_transaction(trans, root); + btrfs_btree_balance_dirty(root, nr); +out: + iput(inode); + return ret; +} + /* * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY * this function scans fs tree to find blocks reference the data extent @@ -3217,15 +3266,27 @@ static int find_data_references(struct reloc_control *rc, int counted; int ret; - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - ref_root = btrfs_extent_data_ref_root(leaf, ref); ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); ref_offset = btrfs_extent_data_ref_offset(leaf, ref); ref_count = btrfs_extent_data_ref_count(leaf, ref); + /* + * This is an extent belonging to the free space cache, lets just delete + * it and redo the search. + */ + if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { + ret = delete_block_group_cache(rc->extent_root->fs_info, + NULL, ref_objectid); + if (ret != -ENOENT) + return ret; + ret = 0; + } + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + root = read_fs_root(rc->extent_root->fs_info, ref_root); if (IS_ERR(root)) { err = PTR_ERR(root); @@ -3860,6 +3921,8 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) { struct btrfs_fs_info *fs_info = extent_root->fs_info; struct reloc_control *rc; + struct inode *inode; + struct btrfs_path *path; int ret; int rw = 0; int err = 0; @@ -3882,6 +3945,26 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) rw = 1; } + path = btrfs_alloc_path(); + if (!path) { + err = -ENOMEM; + goto out; + } + + inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group, + path); + btrfs_free_path(path); + + if (!IS_ERR(inode)) + ret = delete_block_group_cache(fs_info, inode, 0); + else + ret = PTR_ERR(inode); + + if (ret && ret != -ENOENT) { + err = ret; + goto out; + } + rc->data_inode = create_reloc_inode(fs_info, rc->block_group); if (IS_ERR(rc->data_inode)) { err = PTR_ERR(rc->data_inode); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 1776dbd8dc98..5c23eb8d6ba3 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -68,7 +68,7 @@ enum { Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit, - Opt_discard, Opt_err, + Opt_discard, Opt_space_cache, Opt_err, }; static match_table_t tokens = { @@ -92,6 +92,7 @@ static match_table_t tokens = { {Opt_flushoncommit, "flushoncommit"}, {Opt_ratio, "metadata_ratio=%d"}, {Opt_discard, "discard"}, + {Opt_space_cache, "space_cache"}, {Opt_err, NULL}, }; @@ -235,6 +236,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_discard: btrfs_set_opt(info->mount_opt, DISCARD); break; + case Opt_space_cache: + printk(KERN_INFO "btrfs: enabling disk space caching\n"); + btrfs_set_opt(info->mount_opt, SPACE_CACHE); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 66e4c66cc63b..e7144c48ed79 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -163,6 +163,7 @@ enum btrfs_trans_type { TRANS_START, TRANS_JOIN, TRANS_USERSPACE, + TRANS_JOIN_NOLOCK, }; static int may_wait_transaction(struct btrfs_root *root, int type) @@ -186,7 +187,8 @@ again: if (!h) return ERR_PTR(-ENOMEM); - mutex_lock(&root->fs_info->trans_mutex); + if (type != TRANS_JOIN_NOLOCK) + mutex_lock(&root->fs_info->trans_mutex); if (may_wait_transaction(root, type)) wait_current_trans(root); @@ -195,7 +197,8 @@ again: cur_trans = root->fs_info->running_transaction; cur_trans->use_count++; - mutex_unlock(&root->fs_info->trans_mutex); + if (type != TRANS_JOIN_NOLOCK) + mutex_unlock(&root->fs_info->trans_mutex); h->transid = cur_trans->transid; h->transaction = cur_trans; @@ -224,9 +227,11 @@ again: } } - mutex_lock(&root->fs_info->trans_mutex); + if (type != TRANS_JOIN_NOLOCK) + mutex_lock(&root->fs_info->trans_mutex); record_root_in_trans(h, root); - mutex_unlock(&root->fs_info->trans_mutex); + if (type != TRANS_JOIN_NOLOCK) + mutex_unlock(&root->fs_info->trans_mutex); if (!current->journal_info && type != TRANS_USERSPACE) current->journal_info = h; @@ -244,6 +249,12 @@ struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, return start_transaction(root, 0, TRANS_JOIN); } +struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, + int num_blocks) +{ + return start_transaction(root, 0, TRANS_JOIN_NOLOCK); +} + struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, int num_blocks) { @@ -348,7 +359,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, } static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, - struct btrfs_root *root, int throttle) + struct btrfs_root *root, int throttle, int lock) { struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_fs_info *info = root->fs_info; @@ -376,18 +387,19 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_metadata(trans, root); - if (!root->fs_info->open_ioctl_trans && + if (lock && !root->fs_info->open_ioctl_trans && should_end_transaction(trans, root)) trans->transaction->blocked = 1; - if (cur_trans->blocked && !cur_trans->in_commit) { + if (lock && cur_trans->blocked && !cur_trans->in_commit) { if (throttle) return btrfs_commit_transaction(trans, root); else wake_up_process(info->transaction_kthread); } - mutex_lock(&info->trans_mutex); + if (lock) + mutex_lock(&info->trans_mutex); WARN_ON(cur_trans != info->running_transaction); WARN_ON(cur_trans->num_writers < 1); cur_trans->num_writers--; @@ -395,7 +407,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, if (waitqueue_active(&cur_trans->writer_wait)) wake_up(&cur_trans->writer_wait); put_transaction(cur_trans); - mutex_unlock(&info->trans_mutex); + if (lock) + mutex_unlock(&info->trans_mutex); if (current->journal_info == trans) current->journal_info = NULL; @@ -411,13 +424,19 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0); + return __btrfs_end_transaction(trans, root, 0, 1); } int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 1); + return __btrfs_end_transaction(trans, root, 1, 1); +} + +int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + return __btrfs_end_transaction(trans, root, 0, 0); } /* @@ -966,6 +985,8 @@ static void update_super_roots(struct btrfs_root *root) super->root = root_item->bytenr; super->generation = root_item->generation; super->root_level = root_item->level; + if (super->cache_generation != 0 || btrfs_test_opt(root, SPACE_CACHE)) + super->cache_generation = root_item->generation; } int btrfs_transaction_in_commit(struct btrfs_fs_info *info) diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e104986d0bfd..15f83e1c1ef7 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -87,10 +87,14 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, + struct btrfs_root *root); struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, int num_items); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, int num_blocks); +struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, + int num_blocks); struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, int num_blocks); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, -- cgit v1.2.2 From 061dbc6b9010bc1a30ef9a1da5469aefa83abd7f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 26 Jul 2010 16:21:33 +0400 Subject: convert btrfs Signed-off-by: Al Viro --- fs/btrfs/super.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 144f8a5730f5..ebe46c628748 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -560,8 +560,8 @@ static int btrfs_test_super(struct super_block *s, void *data) * Note: This is based on get_sb_bdev from fs/super.c with a few additions * for multiple device setup. Make sure to keep it in sync. */ -static int btrfs_get_sb(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, struct vfsmount *mnt) +static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) { struct block_device *bdev = NULL; struct super_block *s; @@ -580,7 +580,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, &subvol_name, &subvol_objectid, &fs_devices); if (error) - return error; + return ERR_PTR(error); error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices); if (error) @@ -656,11 +656,8 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, root = new_root; } - mnt->mnt_sb = s; - mnt->mnt_root = root; - kfree(subvol_name); - return 0; + return root; error_s: error = PTR_ERR(s); @@ -669,7 +666,7 @@ error_close_devices: error_free_subvol_name: kfree(subvol_name); error: - return error; + return ERR_PTR(error); } static int btrfs_remount(struct super_block *sb, int *flags, char *data) @@ -746,7 +743,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) static struct file_system_type btrfs_fs_type = { .owner = THIS_MODULE, .name = "btrfs", - .get_sb = btrfs_get_sb, + .mount = btrfs_mount, .kill_sb = kill_anon_super, .fs_flags = FS_REQUIRES_DEV, }; -- cgit v1.2.2 From 0cb59c9953171e9adf6da8142a5c85ceb77bb60d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 2 Jul 2010 12:14:14 -0400 Subject: Btrfs: write out free space cache This is a simple bit, just dump the free space cache out to our preallocated inode when we're writing out dirty block groups. There are a bunch of changes in inode.c in order to account for special cases. Mostly when we're doing the writeout we're holding trans_mutex, so we need to use the nolock transacation functions. Also we can't do asynchronous completions since the async thread could be blocked on already completed IO waiting for the transaction lock. This has been tested with xfstests and btrfs filesystem balance, as well as my ENOSPC tests. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 17 ++- fs/btrfs/extent-tree.c | 48 +++++++ fs/btrfs/free-space-cache.c | 302 ++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/free-space-cache.h | 5 + fs/btrfs/inode.c | 60 +++++++-- 6 files changed, 420 insertions(+), 13 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 46f52e1beade..2c06b37cda75 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -982,6 +982,7 @@ struct btrfs_fs_info { struct btrfs_workers endio_meta_workers; struct btrfs_workers endio_meta_write_workers; struct btrfs_workers endio_write_workers; + struct btrfs_workers endio_freespace_worker; struct btrfs_workers submit_workers; /* * fixup workers take dirty pages that didn't properly go through diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 45cf64fc1e3e..77e5dabfd45a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -481,9 +481,12 @@ static void end_workqueue_bio(struct bio *bio, int err) end_io_wq->work.flags = 0; if (bio->bi_rw & REQ_WRITE) { - if (end_io_wq->metadata) + if (end_io_wq->metadata == 1) btrfs_queue_worker(&fs_info->endio_meta_write_workers, &end_io_wq->work); + else if (end_io_wq->metadata == 2) + btrfs_queue_worker(&fs_info->endio_freespace_worker, + &end_io_wq->work); else btrfs_queue_worker(&fs_info->endio_write_workers, &end_io_wq->work); @@ -497,6 +500,13 @@ static void end_workqueue_bio(struct bio *bio, int err) } } +/* + * For the metadata arg you want + * + * 0 - if data + * 1 - if normal metadta + * 2 - if writing to the free space cache area + */ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int metadata) { @@ -1774,6 +1784,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", fs_info->thread_pool_size, &fs_info->generic_worker); + btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", + 1, &fs_info->generic_worker); /* * endios are largely parallel and should have a very @@ -1794,6 +1806,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->endio_meta_workers, 1); btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); btrfs_start_workers(&fs_info->endio_write_workers, 1); + btrfs_start_workers(&fs_info->endio_freespace_worker, 1); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, @@ -2035,6 +2048,7 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); + btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); fail_iput: invalidate_inode_pages2(fs_info->btree_inode->i_mapping); @@ -2468,6 +2482,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); + btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); btrfs_close_devices(fs_info->fs_devices); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index aab40fb3faed..d5455a2bf60b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2847,6 +2847,8 @@ again: continue; } + if (cache->disk_cache_state == BTRFS_DC_SETUP) + cache->disk_cache_state = BTRFS_DC_NEED_WRITE; cache->dirty = 0; last = cache->key.objectid + cache->key.offset; @@ -2855,6 +2857,52 @@ again: btrfs_put_block_group(cache); } + while (1) { + /* + * I don't think this is needed since we're just marking our + * preallocated extent as written, but just in case it can't + * hurt. + */ + if (last == 0) { + err = btrfs_run_delayed_refs(trans, root, + (unsigned long)-1); + BUG_ON(err); + } + + cache = btrfs_lookup_first_block_group(root->fs_info, last); + while (cache) { + /* + * Really this shouldn't happen, but it could if we + * couldn't write the entire preallocated extent and + * splitting the extent resulted in a new block. + */ + if (cache->dirty) { + btrfs_put_block_group(cache); + goto again; + } + if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) + break; + cache = next_block_group(root, cache); + } + if (!cache) { + if (last == 0) + break; + last = 0; + continue; + } + + btrfs_write_out_cache(root, trans, cache, path); + + /* + * If we didn't have an error then the cache state is still + * NEED_WRITE, so we can set it to WRITTEN. + */ + if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) + cache->disk_cache_state = BTRFS_DC_WRITTEN; + last = cache->key.objectid + cache->key.offset; + btrfs_put_block_group(cache); + } + btrfs_free_path(path); return 0; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 05efcc7061a7..7f972e59cc04 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -28,6 +28,11 @@ #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) +static void recalculate_thresholds(struct btrfs_block_group_cache + *block_group); +static int link_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info); + struct inode *lookup_free_space_inode(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) @@ -182,6 +187,303 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, return btrfs_update_inode(trans, root, inode); } +int btrfs_write_out_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + struct btrfs_free_space_header *header; + struct extent_buffer *leaf; + struct inode *inode; + struct rb_node *node; + struct list_head *pos, *n; + struct page *page; + struct extent_state *cached_state = NULL; + struct list_head bitmap_list; + struct btrfs_key key; + u64 bytes = 0; + u32 *crc, *checksums; + pgoff_t index = 0, last_index = 0; + unsigned long first_page_offset; + int num_checksums; + int entries = 0; + int bitmaps = 0; + int ret = 0; + + root = root->fs_info->tree_root; + + INIT_LIST_HEAD(&bitmap_list); + + spin_lock(&block_group->lock); + if (block_group->disk_cache_state < BTRFS_DC_SETUP) { + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) + return 0; + + if (!i_size_read(inode)) { + iput(inode); + return 0; + } + + last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + filemap_write_and_wait(inode->i_mapping); + btrfs_wait_ordered_range(inode, inode->i_size & + ~(root->sectorsize - 1), (u64)-1); + + /* We need a checksum per page. */ + num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; + crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS); + if (!crc) { + iput(inode); + return 0; + } + + /* Since the first page has all of our checksums and our generation we + * need to calculate the offset into the page that we can start writing + * our entries. + */ + first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); + + node = rb_first(&block_group->free_space_offset); + if (!node) + goto out_free; + + /* + * Lock all pages first so we can lock the extent safely. + * + * NOTE: Because we hold the ref the entire time we're going to write to + * the page find_get_page should never fail, so we don't do a check + * after find_get_page at this point. Just putting this here so people + * know and don't freak out. + */ + while (index <= last_index) { + page = grab_cache_page(inode->i_mapping, index); + if (!page) { + pgoff_t i = 0; + + while (i < index) { + page = find_get_page(inode->i_mapping, i); + unlock_page(page); + page_cache_release(page); + page_cache_release(page); + i++; + } + goto out_free; + } + index++; + } + + index = 0; + lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, + 0, &cached_state, GFP_NOFS); + + /* Write out the extent entries */ + do { + struct btrfs_free_space_entry *entry; + void *addr; + unsigned long offset = 0; + unsigned long start_offset = 0; + + if (index == 0) { + start_offset = first_page_offset; + offset = start_offset; + } + + page = find_get_page(inode->i_mapping, index); + + addr = kmap(page); + entry = addr + start_offset; + + memset(addr, 0, PAGE_CACHE_SIZE); + while (1) { + struct btrfs_free_space *e; + + e = rb_entry(node, struct btrfs_free_space, offset_index); + entries++; + + entry->offset = cpu_to_le64(e->offset); + entry->bytes = cpu_to_le64(e->bytes); + if (e->bitmap) { + entry->type = BTRFS_FREE_SPACE_BITMAP; + list_add_tail(&e->list, &bitmap_list); + bitmaps++; + } else { + entry->type = BTRFS_FREE_SPACE_EXTENT; + } + node = rb_next(node); + if (!node) + break; + offset += sizeof(struct btrfs_free_space_entry); + if (offset + sizeof(struct btrfs_free_space_entry) >= + PAGE_CACHE_SIZE) + break; + entry++; + } + *crc = ~(u32)0; + *crc = btrfs_csum_data(root, addr + start_offset, *crc, + PAGE_CACHE_SIZE - start_offset); + kunmap(page); + + btrfs_csum_final(*crc, (char *)crc); + crc++; + + bytes += PAGE_CACHE_SIZE; + + ClearPageChecked(page); + set_page_extent_mapped(page); + SetPageUptodate(page); + set_page_dirty(page); + + /* + * We need to release our reference we got for grab_cache_page, + * except for the first page which will hold our checksums, we + * do that below. + */ + if (index != 0) { + unlock_page(page); + page_cache_release(page); + } + + page_cache_release(page); + + index++; + } while (node); + + /* Write out the bitmaps */ + list_for_each_safe(pos, n, &bitmap_list) { + void *addr; + struct btrfs_free_space *entry = + list_entry(pos, struct btrfs_free_space, list); + + page = find_get_page(inode->i_mapping, index); + + addr = kmap(page); + memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); + *crc = ~(u32)0; + *crc = btrfs_csum_data(root, addr, *crc, PAGE_CACHE_SIZE); + kunmap(page); + btrfs_csum_final(*crc, (char *)crc); + crc++; + bytes += PAGE_CACHE_SIZE; + + ClearPageChecked(page); + set_page_extent_mapped(page); + SetPageUptodate(page); + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); + page_cache_release(page); + list_del_init(&entry->list); + index++; + } + + /* Zero out the rest of the pages just to make sure */ + while (index <= last_index) { + void *addr; + + page = find_get_page(inode->i_mapping, index); + + addr = kmap(page); + memset(addr, 0, PAGE_CACHE_SIZE); + kunmap(page); + ClearPageChecked(page); + set_page_extent_mapped(page); + SetPageUptodate(page); + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); + page_cache_release(page); + bytes += PAGE_CACHE_SIZE; + index++; + } + + btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state); + + /* Write the checksums and trans id to the first page */ + { + void *addr; + u64 *gen; + + page = find_get_page(inode->i_mapping, 0); + + addr = kmap(page); + memcpy(addr, checksums, sizeof(u32) * num_checksums); + gen = addr + (sizeof(u32) * num_checksums); + *gen = trans->transid; + kunmap(page); + ClearPageChecked(page); + set_page_extent_mapped(page); + SetPageUptodate(page); + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); + page_cache_release(page); + } + BTRFS_I(inode)->generation = trans->transid; + + unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, &cached_state, GFP_NOFS); + + filemap_write_and_wait(inode->i_mapping); + + key.objectid = BTRFS_FREE_SPACE_OBJECTID; + key.offset = block_group->key.objectid; + key.type = 0; + + ret = btrfs_search_slot(trans, root, &key, path, 1, 1); + if (ret < 0) { + ret = 0; + clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); + goto out_free; + } + leaf = path->nodes[0]; + if (ret > 0) { + struct btrfs_key found_key; + BUG_ON(!path->slots[0]); + path->slots[0]--; + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); + if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || + found_key.offset != block_group->key.objectid) { + ret = 0; + clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, NULL, + GFP_NOFS); + btrfs_release_path(root, path); + goto out_free; + } + } + header = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_free_space_header); + btrfs_set_free_space_entries(leaf, header, entries); + btrfs_set_free_space_bitmaps(leaf, header, bitmaps); + btrfs_set_free_space_generation(leaf, header, trans->transid); + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + + ret = 1; + +out_free: + if (ret == 0) { + invalidate_inode_pages2_range(inode->i_mapping, 0, index); + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_ERROR; + spin_unlock(&block_group->lock); + BTRFS_I(inode)->generation = 0; + } + kfree(checksums); + btrfs_update_inode(trans, root, inode); + iput(inode); + return ret; +} + static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, u64 offset) { diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 45be29e5f01e..189f740bd3c0 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -34,10 +34,15 @@ int create_free_space_inode(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); + int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_path *path, struct inode *inode); +int btrfs_write_out_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path); int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1af1ea88e8a8..f2fb974ed8f0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -764,6 +764,7 @@ static noinline int cow_file_range(struct inode *inode, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; + BUG_ON(root == root->fs_info->tree_root); trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); @@ -1035,10 +1036,16 @@ static noinline int run_delalloc_nocow(struct inode *inode, int type; int nocow; int check_prev = 1; + bool nolock = false; path = btrfs_alloc_path(); BUG_ON(!path); - trans = btrfs_join_transaction(root, 1); + if (root == root->fs_info->tree_root) { + nolock = true; + trans = btrfs_join_transaction_nolock(root, 1); + } else { + trans = btrfs_join_transaction(root, 1); + } BUG_ON(!trans); cow_start = (u64)-1; @@ -1211,8 +1218,13 @@ out_check: BUG_ON(ret); } - ret = btrfs_end_transaction(trans, root); - BUG_ON(ret); + if (nolock) { + ret = btrfs_end_transaction_nolock(trans, root); + BUG_ON(ret); + } else { + ret = btrfs_end_transaction(trans, root); + BUG_ON(ret); + } btrfs_free_path(path); return 0; } @@ -1289,6 +1301,8 @@ static int btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; + int do_list = (root->root_key.objectid != + BTRFS_ROOT_TREE_OBJECTID); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1298,7 +1312,7 @@ static int btrfs_set_bit_hook(struct inode *inode, spin_lock(&root->fs_info->delalloc_lock); BTRFS_I(inode)->delalloc_bytes += len; root->fs_info->delalloc_bytes += len; - if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { + if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) { list_add_tail(&BTRFS_I(inode)->delalloc_inodes, &root->fs_info->delalloc_inodes); } @@ -1321,6 +1335,8 @@ static int btrfs_clear_bit_hook(struct inode *inode, if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; + int do_list = (root->root_key.objectid != + BTRFS_ROOT_TREE_OBJECTID); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1330,14 +1346,15 @@ static int btrfs_clear_bit_hook(struct inode *inode, if (*bits & EXTENT_DO_ACCOUNTING) btrfs_delalloc_release_metadata(inode, len); - if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) + if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID + && do_list) btrfs_free_reserved_data_space(inode, len); spin_lock(&root->fs_info->delalloc_lock); root->fs_info->delalloc_bytes -= len; BTRFS_I(inode)->delalloc_bytes -= len; - if (BTRFS_I(inode)->delalloc_bytes == 0 && + if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { list_del_init(&BTRFS_I(inode)->delalloc_inodes); } @@ -1426,7 +1443,10 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + if (root == root->fs_info->tree_root) + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); + else + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); if (!(rw & REQ_WRITE)) { @@ -1662,6 +1682,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_state *cached_state = NULL; int compressed = 0; int ret; + bool nolock = false; ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1); @@ -1669,11 +1690,17 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) return 0; BUG_ON(!ordered_extent); + nolock = (root == root->fs_info->tree_root); + if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); if (!ret) { - trans = btrfs_join_transaction(root, 1); + if (nolock) + trans = btrfs_join_transaction_nolock(root, 1); + else + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); @@ -1686,7 +1713,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->file_offset + ordered_extent->len - 1, 0, &cached_state, GFP_NOFS); - trans = btrfs_join_transaction(root, 1); + if (nolock) + trans = btrfs_join_transaction_nolock(root, 1); + else + trans = btrfs_join_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1725,9 +1755,15 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); out: - btrfs_delalloc_release_metadata(inode, ordered_extent->len); - if (trans) - btrfs_end_transaction(trans, root); + if (nolock) { + if (trans) + btrfs_end_transaction_nolock(trans, root); + } else { + btrfs_delalloc_release_metadata(inode, ordered_extent->len); + if (trans) + btrfs_end_transaction(trans, root); + } + /* once for us */ btrfs_put_ordered_extent(ordered_extent); /* once for the tree */ -- cgit v1.2.2 From 9d66e233c7042da27ec699453770f41e567a0442 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 25 Aug 2010 16:54:15 -0400 Subject: Btrfs: load free space cache if it exists This patch actually loads the free space cache if it exists. The only thing that really changes here is that we need to cache the block group if we're going to remove an extent from it. Previously we did not do this since the caching kthread would pick it up. With the on disk cache we don't have this luxury so we need to make sure we read the on disk cache in first, and then remove the extent, that way when the extent is unpinned the free space is added to the block group. This has been tested with all sorts of things. Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 50 +++++++- fs/btrfs/free-space-cache.c | 296 ++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/free-space-cache.h | 2 + 3 files changed, 345 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d5455a2bf60b..9a325e465ad9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -421,7 +421,9 @@ err: return 0; } -static int cache_block_group(struct btrfs_block_group_cache *cache) +static int cache_block_group(struct btrfs_block_group_cache *cache, + struct btrfs_trans_handle *trans, + int load_cache_only) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_caching_control *caching_ctl; @@ -432,6 +434,36 @@ static int cache_block_group(struct btrfs_block_group_cache *cache) if (cache->cached != BTRFS_CACHE_NO) return 0; + /* + * We can't do the read from on-disk cache during a commit since we need + * to have the normal tree locking. + */ + if (!trans->transaction->in_commit) { + spin_lock(&cache->lock); + if (cache->cached != BTRFS_CACHE_NO) { + spin_unlock(&cache->lock); + return 0; + } + cache->cached = BTRFS_CACHE_STARTED; + spin_unlock(&cache->lock); + + ret = load_free_space_cache(fs_info, cache); + + spin_lock(&cache->lock); + if (ret == 1) { + cache->cached = BTRFS_CACHE_FINISHED; + cache->last_byte_to_unpin = (u64)-1; + } else { + cache->cached = BTRFS_CACHE_NO; + } + spin_unlock(&cache->lock); + if (ret == 1) + return 0; + } + + if (load_cache_only) + return 0; + caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL); BUG_ON(!caching_ctl); @@ -3984,6 +4016,14 @@ static int update_block_group(struct btrfs_trans_handle *trans, factor = 2; else factor = 1; + /* + * If this block group has free space cache written out, we + * need to make sure to load it if we are removing space. This + * is because we need the unpinning stage to actually add the + * space back to the block group, otherwise we will leak space. + */ + if (!alloc && cache->cached == BTRFS_CACHE_NO) + cache_block_group(cache, trans, 1); byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); @@ -4828,6 +4868,10 @@ have_block_group: if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { u64 free_percent; + ret = cache_block_group(block_group, trans, 1); + if (block_group->cached == BTRFS_CACHE_FINISHED) + goto have_block_group; + free_percent = btrfs_block_group_used(&block_group->item); free_percent *= 100; free_percent = div64_u64(free_percent, @@ -4848,7 +4892,7 @@ have_block_group: if (loop > LOOP_CACHING_NOWAIT || (loop > LOOP_FIND_IDEAL && atomic_read(&space_info->caching_threads) < 2)) { - ret = cache_block_group(block_group); + ret = cache_block_group(block_group, trans, 0); BUG_ON(ret); } found_uncached_bg = true; @@ -5405,7 +5449,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, u64 num_bytes = ins->offset; block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); - cache_block_group(block_group); + cache_block_group(block_group, trans, 0); caching_ctl = get_caching_control(block_group); if (!caching_ctl) { diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 7f972e59cc04..baa193423fb8 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -187,6 +187,302 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, return btrfs_update_inode(trans, root, inode); } +static int readahead_cache(struct inode *inode) +{ + struct file_ra_state *ra; + unsigned long last_index; + + ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; + + file_ra_state_init(ra, inode->i_mapping); + last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + + page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); + + kfree(ra); + + return 0; +} + +int load_free_space_cache(struct btrfs_fs_info *fs_info, + struct btrfs_block_group_cache *block_group) +{ + struct btrfs_root *root = fs_info->tree_root; + struct inode *inode; + struct btrfs_free_space_header *header; + struct extent_buffer *leaf; + struct page *page; + struct btrfs_path *path; + u32 *checksums = NULL, *crc; + char *disk_crcs = NULL; + struct btrfs_key key; + struct list_head bitmaps; + u64 num_entries; + u64 num_bitmaps; + u64 generation; + u32 cur_crc = ~(u32)0; + pgoff_t index = 0; + unsigned long first_page_offset; + int num_checksums; + int ret = 0; + + /* + * If we're unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + /* + * If this block group has been marked to be cleared for one reason or + * another then we can't trust the on disk cache, so just return. + */ + spin_lock(&block_group->lock); + if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { + printk(KERN_ERR "not reading block group %llu, dcs is %d\n", block_group->key.objectid, + block_group->disk_cache_state); + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + INIT_LIST_HEAD(&bitmaps); + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) { + btrfs_free_path(path); + return 0; + } + + /* Nothing in the space cache, goodbye */ + if (!i_size_read(inode)) { + btrfs_free_path(path); + goto out; + } + + key.objectid = BTRFS_FREE_SPACE_OBJECTID; + key.offset = block_group->key.objectid; + key.type = 0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret) { + btrfs_free_path(path); + goto out; + } + + leaf = path->nodes[0]; + header = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_free_space_header); + num_entries = btrfs_free_space_entries(leaf, header); + num_bitmaps = btrfs_free_space_bitmaps(leaf, header); + generation = btrfs_free_space_generation(leaf, header); + btrfs_free_path(path); + + if (BTRFS_I(inode)->generation != generation) { + printk(KERN_ERR "btrfs: free space inode generation (%llu) did" + " not match free space cache generation (%llu) for " + "block group %llu\n", + (unsigned long long)BTRFS_I(inode)->generation, + (unsigned long long)generation, + (unsigned long long)block_group->key.objectid); + goto out; + } + + if (!num_entries) + goto out; + + /* Setup everything for doing checksumming */ + num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; + checksums = crc = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS); + if (!checksums) + goto out; + first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); + disk_crcs = kzalloc(first_page_offset, GFP_NOFS); + if (!disk_crcs) + goto out; + + ret = readahead_cache(inode); + if (ret) { + ret = 0; + goto out; + } + + while (1) { + struct btrfs_free_space_entry *entry; + struct btrfs_free_space *e; + void *addr; + unsigned long offset = 0; + unsigned long start_offset = 0; + int need_loop = 0; + + if (!num_entries && !num_bitmaps) + break; + + if (index == 0) { + start_offset = first_page_offset; + offset = start_offset; + } + + page = grab_cache_page(inode->i_mapping, index); + if (!page) { + ret = 0; + goto free_cache; + } + + if (!PageUptodate(page)) { + btrfs_readpage(NULL, page); + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + page_cache_release(page); + printk(KERN_ERR "btrfs: error reading free " + "space cache: %llu\n", + (unsigned long long) + block_group->key.objectid); + goto free_cache; + } + } + addr = kmap(page); + + if (index == 0) { + u64 *gen; + + memcpy(disk_crcs, addr, first_page_offset); + gen = addr + (sizeof(u32) * num_checksums); + if (*gen != BTRFS_I(inode)->generation) { + printk(KERN_ERR "btrfs: space cache generation" + " (%llu) does not match inode (%llu) " + "for block group %llu\n", + (unsigned long long)*gen, + (unsigned long long) + BTRFS_I(inode)->generation, + (unsigned long long) + block_group->key.objectid); + kunmap(page); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } + crc = (u32 *)disk_crcs; + } + entry = addr + start_offset; + + /* First lets check our crc before we do anything fun */ + cur_crc = ~(u32)0; + cur_crc = btrfs_csum_data(root, addr + start_offset, cur_crc, + PAGE_CACHE_SIZE - start_offset); + btrfs_csum_final(cur_crc, (char *)&cur_crc); + if (cur_crc != *crc) { + printk(KERN_ERR "btrfs: crc mismatch for page %lu in " + "block group %llu\n", index, + (unsigned long long)block_group->key.objectid); + kunmap(page); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } + crc++; + + while (1) { + if (!num_entries) + break; + + need_loop = 1; + e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + if (!e) { + kunmap(page); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } + + e->offset = le64_to_cpu(entry->offset); + e->bytes = le64_to_cpu(entry->bytes); + if (!e->bytes) { + kunmap(page); + kfree(e); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } + + if (entry->type == BTRFS_FREE_SPACE_EXTENT) { + spin_lock(&block_group->tree_lock); + ret = link_free_space(block_group, e); + spin_unlock(&block_group->tree_lock); + BUG_ON(ret); + } else { + e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + if (!e->bitmap) { + kunmap(page); + kfree(e); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } + spin_lock(&block_group->tree_lock); + ret = link_free_space(block_group, e); + block_group->total_bitmaps++; + recalculate_thresholds(block_group); + spin_unlock(&block_group->tree_lock); + list_add_tail(&e->list, &bitmaps); + } + + num_entries--; + offset += sizeof(struct btrfs_free_space_entry); + if (offset + sizeof(struct btrfs_free_space_entry) >= + PAGE_CACHE_SIZE) + break; + entry++; + } + + /* + * We read an entry out of this page, we need to move on to the + * next page. + */ + if (need_loop) { + kunmap(page); + goto next; + } + + /* + * We add the bitmaps at the end of the entries in order that + * the bitmap entries are added to the cache. + */ + e = list_entry(bitmaps.next, struct btrfs_free_space, list); + list_del_init(&e->list); + memcpy(e->bitmap, addr, PAGE_CACHE_SIZE); + kunmap(page); + num_bitmaps--; +next: + unlock_page(page); + page_cache_release(page); + index++; + } + + ret = 1; +out: + kfree(checksums); + kfree(disk_crcs); + iput(inode); + return ret; + +free_cache: + /* This cache is bogus, make sure it gets cleared */ + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_CLEAR; + spin_unlock(&block_group->lock); + btrfs_remove_free_space_cache(block_group); + goto out; +} + int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 189f740bd3c0..e49ca5c321b5 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -39,6 +39,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_path *path, struct inode *inode); +int load_free_space_cache(struct btrfs_fs_info *fs_info, + struct btrfs_block_group_cache *block_group); int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, -- cgit v1.2.2 From dde5abee12327d59f968bbfc8151e1b04082a2c4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 16 Sep 2010 16:17:03 -0400 Subject: Btrfs: check cache->caching_ctl before returning if caching has started With the free space disk caching we can mark the block group as started with the caching, but we don't have a caching ctl. This can race with anybody else who tries to get the caching ctl before we cache (this is very hard to do btw). So instead check to see if cache->caching_ctl is set, and if not return NULL. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9a325e465ad9..5c9ef3ac25e1 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -242,6 +242,12 @@ get_caching_control(struct btrfs_block_group_cache *cache) return NULL; } + /* We're loading it the fast way, so we don't have a caching_ctl. */ + if (!cache->caching_ctl) { + spin_unlock(&cache->lock); + return NULL; + } + ctl = cache->caching_ctl; atomic_inc(&ctl->count); spin_unlock(&cache->lock); -- cgit v1.2.2 From 67377734fd24c32cbdfeb697c2e2bd7fed519e75 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 16 Sep 2010 16:19:09 -0400 Subject: Btrfs: add support for mixed data+metadata block groups There are just a few things that need to be fixed in the kernel to support mixed data+metadata block groups. Mostly we just need to make sure that if we are using mixed block groups that we continue to allocate mixed block groups as we need them. Also we need to make sure __find_space_info will find our space info if we search for DATA or METADATA only. Tested this with xfstests and it works nicely. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 10 +++++++++- fs/btrfs/extent-tree.c | 22 +++++++++++++++++++--- 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2c06b37cda75..b155a0e49eeb 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -397,12 +397,14 @@ struct btrfs_super_block { */ #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) +#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) #define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_INCOMPAT_SUPP \ (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ - BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL) + BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ + BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) /* * A leaf is full of items. offset and size tell us where to find @@ -2046,6 +2048,12 @@ static inline struct dentry *fdentry(struct file *file) return file->f_path.dentry; } +static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) +{ + return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && + (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); +} + /* extent-tree.c */ void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5c9ef3ac25e1..137833e1fc26 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -547,7 +547,7 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { - if (found->flags == flags) { + if (found->flags & flags) { rcu_read_unlock(); return found; } @@ -3266,6 +3266,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, } spin_unlock(&space_info->lock); + /* + * If we have mixed data/metadata chunks we want to make sure we keep + * allocating mixed chunks instead of individual chunks. + */ + if (btrfs_mixed_space_info(space_info)) + flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); + /* * if we're doing a data chunk, go ahead and make sure that * we keep a reasonable number of metadata chunks allocated in the @@ -4787,6 +4794,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, bool found_uncached_bg = false; bool failed_cluster_refill = false; bool failed_alloc = false; + bool use_cluster = true; u64 ideal_cache_percent = 0; u64 ideal_cache_offset = 0; @@ -4801,16 +4809,24 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, return -ENOSPC; } + /* + * If the space info is for both data and metadata it means we have a + * small filesystem and we can't use the clustering stuff. + */ + if (btrfs_mixed_space_info(space_info)) + use_cluster = false; + if (orig_root->ref_cows || empty_size) allowed_chunk_alloc = 1; - if (data & BTRFS_BLOCK_GROUP_METADATA) { + if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) { last_ptr = &root->fs_info->meta_alloc_cluster; if (!btrfs_test_opt(root, SSD)) empty_cluster = 64 * 1024; } - if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) { + if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster && + btrfs_test_opt(root, SSD)) { last_ptr = &root->fs_info->data_alloc_cluster; } -- cgit v1.2.2 From 88c2ba3b069f1e0f4694124d02985fa7620a19f1 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 21 Sep 2010 14:21:34 -0400 Subject: Btrfs: Add a clear_cache mount option If something goes wrong with the free space cache we need a way to make sure it's not loaded on mount and that it's cleared for everybody. When you pass the clear_cache option it will make it so all block groups are setup to be cleared, which keeps them from being loaded and then they will be truncated when the transaction is committed. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 1 + fs/btrfs/extent-tree.c | 2 ++ fs/btrfs/free-space-cache.c | 2 -- fs/btrfs/super.c | 6 +++++- 4 files changed, 8 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b155a0e49eeb..633e559e000e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1229,6 +1229,7 @@ struct btrfs_root { #define BTRFS_MOUNT_DISCARD (1 << 10) #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) +#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 137833e1fc26..1a94ee4c4fbb 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8198,6 +8198,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (cache_gen != 0 && btrfs_super_generation(&root->fs_info->super_copy) != cache_gen) need_clear = 1; + if (btrfs_test_opt(root, CLEAR_CACHE)) + need_clear = 1; while (1) { ret = find_first_block_group(root, path, &key); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index baa193423fb8..22ee0dc2e6b8 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -242,8 +242,6 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, */ spin_lock(&block_group->lock); if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { - printk(KERN_ERR "not reading block group %llu, dcs is %d\n", block_group->key.objectid, - block_group->disk_cache_state); spin_unlock(&block_group->lock); return 0; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 5c23eb8d6ba3..5f56213908e7 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -68,7 +68,7 @@ enum { Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit, - Opt_discard, Opt_space_cache, Opt_err, + Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_err, }; static match_table_t tokens = { @@ -93,6 +93,7 @@ static match_table_t tokens = { {Opt_ratio, "metadata_ratio=%d"}, {Opt_discard, "discard"}, {Opt_space_cache, "space_cache"}, + {Opt_clear_cache, "clear_cache"}, {Opt_err, NULL}, }; @@ -239,6 +240,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_space_cache: printk(KERN_INFO "btrfs: enabling disk space caching\n"); btrfs_set_opt(info->mount_opt, SPACE_CACHE); + case Opt_clear_cache: + printk(KERN_INFO "btrfs: force clearing of disk cache\n"); + btrfs_set_opt(info->mount_opt, CLEAR_CACHE); break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " -- cgit v1.2.2 From 8216ef866df1119fd5a72372b8b29bce49c18590 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 28 Oct 2010 16:55:47 -0400 Subject: Btrfs: let the user know space caching is enabled If you mount -o space_cache, the option will be persistent across mounts, but to make sure the user knows that they did this, emit a message telling them if they didn't mount with -o space_cache but the feature is still used. Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1a94ee4c4fbb..d2a7ff53e99a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8200,6 +8200,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) need_clear = 1; if (btrfs_test_opt(root, CLEAR_CACHE)) need_clear = 1; + if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen) + printk(KERN_INFO "btrfs: disk space caching is enabled\n"); while (1) { ret = find_first_block_group(root, path, &key); -- cgit v1.2.2 From cb44921a09221f0a90217b44044448f63190f3e5 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sun, 24 Oct 2010 11:01:27 -0400 Subject: Btrfs: don't loop forever on bad btree blocks When btrfs discovers the generation number in a btree block is incorrect, it can loop forever without forcing the RAID code to try a valid mirror, and without returning EIO. This changes things to properly kick out the EIO. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c3df14ce2cc2..6921231e0efb 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1577,13 +1577,33 @@ read_block_for_search(struct btrfs_trans_handle *trans, blocksize = btrfs_level_size(root, level - 1); tmp = btrfs_find_tree_block(root, blocknr, blocksize); - if (tmp && btrfs_buffer_uptodate(tmp, gen)) { - /* - * we found an up to date block without sleeping, return - * right away - */ - *eb_ret = tmp; - return 0; + if (tmp) { + if (btrfs_buffer_uptodate(tmp, 0)) { + if (btrfs_buffer_uptodate(tmp, gen)) { + /* + * we found an up to date block without + * sleeping, return + * right away + */ + *eb_ret = tmp; + return 0; + } + /* the pages were up to date, but we failed + * the generation number check. Do a full + * read for the generation number that is correct. + * We must do this without dropping locks so + * we can trust our generation number + */ + free_extent_buffer(tmp); + tmp = read_tree_block(root, blocknr, blocksize, gen); + if (tmp && btrfs_buffer_uptodate(tmp, gen)) { + *eb_ret = tmp; + return 0; + } + free_extent_buffer(tmp); + btrfs_release_path(NULL, p); + return -EIO; + } } /* @@ -1596,8 +1616,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, btrfs_unlock_up_safe(p, level + 1); btrfs_set_path_blocking(p); - if (tmp) - free_extent_buffer(tmp); + free_extent_buffer(tmp); if (p->reada) reada_for_search(root, p, level, slot, key->objectid); -- cgit v1.2.2 From e5bc2458293b2af6c0b94435965c68cc70974b56 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 26 Oct 2010 13:37:56 -0400 Subject: Btrfs: tune the chunk allocation to 5% of the FS as metadata An earlier commit tried to keep us from allocating too many empty metadata chunks. It was somewhat too restrictive and could lead to ENOSPC errors on empty filesystems. This increases the limits to about 5% of the FS size, allowing more metadata chunks to be preallocated. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 372fd224a11d..980d6a3c342c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -580,6 +580,15 @@ static u64 div_factor(u64 num, int factor) return num; } +static u64 div_factor_fine(u64 num, int factor) +{ + if (factor == 100) + return num; + num *= factor; + do_div(num, 100); + return num; +} + u64 btrfs_find_block_group(struct btrfs_root *root, u64 search_start, u64 search_hint, int owner) { @@ -3218,9 +3227,11 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) rcu_read_unlock(); } -static int should_alloc_chunk(struct btrfs_space_info *sinfo, u64 alloc_bytes) +static int should_alloc_chunk(struct btrfs_root *root, + struct btrfs_space_info *sinfo, u64 alloc_bytes) { u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; + u64 thresh; if (sinfo->bytes_used + sinfo->bytes_reserved + alloc_bytes + 256 * 1024 * 1024 < num_bytes) @@ -3230,8 +3241,10 @@ static int should_alloc_chunk(struct btrfs_space_info *sinfo, u64 alloc_bytes) alloc_bytes < div_factor(num_bytes, 8)) return 0; - if (num_bytes > 256 * 1024 * 1024 && - sinfo->bytes_used < div_factor(num_bytes, 3)) + thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); + thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); + + if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) return 0; return 1; @@ -3265,7 +3278,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, goto out; } - if (!force && !should_alloc_chunk(space_info, alloc_bytes)) { + if (!force && !should_alloc_chunk(extent_root, space_info, + alloc_bytes)) { spin_unlock(&space_info->lock); goto out; } -- cgit v1.2.2 From bf9022e06af553553bc8f4e21ce36147ca6eae68 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 26 Oct 2010 13:40:45 -0400 Subject: Btrfs: use the flusher threads for delalloc throttling We have a fairly complex set of loops around walking our list of delalloc inodes when we find metadata delalloc space running low. It doesn't work very well, can use large amounts of CPU and doesn't do very efficient writeback. This switches us to kick the bdi flusher threads instead. All dirty data in btrfs is accounted as delalloc data, so this is very similar in terms of what it writes, but we're able to just kick off the IO and wait for progress. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 980d6a3c342c..59c8daaacf0c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3328,15 +3328,14 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 reserved; u64 max_reclaim; u64 reclaimed = 0; - int no_reclaim = 0; int pause = 1; - int ret; + int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; block_rsv = &root->fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; - spin_lock(&space_info->lock); + + smp_mb(); reserved = space_info->bytes_reserved; - spin_unlock(&space_info->lock); if (reserved == 0) return 0; @@ -3344,20 +3343,11 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, max_reclaim = min(reserved, to_reclaim); while (1) { - ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0, sync); - if (!ret) { - if (no_reclaim > 2) - break; - no_reclaim++; - __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(pause); - pause <<= 1; - if (pause > HZ / 10) - pause = HZ / 10; - } else { - no_reclaim = 0; - pause = 1; - } + /* have the flusher threads jump in and do some IO */ + smp_mb(); + nr_pages = min_t(unsigned long, nr_pages, + root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT); + writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); spin_lock(&space_info->lock); if (reserved > space_info->bytes_reserved) @@ -3370,6 +3360,13 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, if (trans && trans->transaction->blocked) return -EAGAIN; + + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(pause); + pause <<= 1; + if (pause > HZ / 10) + pause = HZ / 10; + } return reclaimed >= to_reclaim; } -- cgit v1.2.2 From 897ca6e9b4fef86d5dfb6b31fd9f592ce6a47a42 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Tue, 26 Oct 2010 20:57:29 -0400 Subject: Btrfs: restructure try_release_extent_buffer() restructure try_release_extent_buffer() and write a function to release the extent buffer. It will be used later. Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 48 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 11 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d74e6af9b53a..6e3b326346a7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3104,6 +3104,39 @@ static void __free_extent_buffer(struct extent_buffer *eb) kmem_cache_free(extent_buffer_cache, eb); } +/* + * Helper for releasing extent buffer page. + */ +static void btrfs_release_extent_buffer_page(struct extent_buffer *eb, + unsigned long start_idx) +{ + unsigned long index; + struct page *page; + + if (!eb->first_page) + return; + + index = num_extent_pages(eb->start, eb->len); + if (start_idx >= index) + return; + + do { + index--; + page = extent_buffer_page(eb, index); + if (page) + page_cache_release(page); + } while (index != start_idx); +} + +/* + * Helper for releasing the extent buffer. + */ +static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) +{ + btrfs_release_extent_buffer_page(eb, 0); + __free_extent_buffer(eb); +} + struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, struct page *page0, @@ -3181,10 +3214,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, free_eb: if (!atomic_dec_and_test(&eb->refs)) return exists; - for (index = 1; index < i; index++) - page_cache_release(extent_buffer_page(eb, index)); - page_cache_release(extent_buffer_page(eb, 0)); - __free_extent_buffer(eb); + btrfs_release_extent_buffer(eb); return exists; } @@ -3838,8 +3868,6 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) u64 start = page_offset(page); struct extent_buffer *eb; int ret = 1; - unsigned long i; - unsigned long num_pages; spin_lock(&tree->buffer_lock); eb = buffer_search(tree, start); @@ -3854,12 +3882,10 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) ret = 0; goto out; } - /* at this point we can safely release the extent buffer */ - num_pages = num_extent_pages(eb->start, eb->len); - for (i = 0; i < num_pages; i++) - page_cache_release(extent_buffer_page(eb, i)); + rb_erase(&eb->rb_node, &tree->buffer); - __free_extent_buffer(eb); + /* at this point we can safely release the extent buffer */ + btrfs_release_extent_buffer(eb); out: spin_unlock(&tree->buffer_lock); return ret; -- cgit v1.2.2 From 19fe0a8b787d7c7f9318975b5a8c6e7e5e54e925 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Tue, 26 Oct 2010 20:57:29 -0400 Subject: Btrfs: Switch the extent buffer rbtree into a radix tree This patch reduces the CPU time spent in the extent buffer search by using the radix tree instead of the rbtree and using the rcu lock instead of the spin lock. I did a quick test by the benchmark tool[1] and found the patch improve the file creation/deletion performance problem that I have reported[2]. Before applying this patch: Create files: Total files: 50000 Total time: 0.971531 Average time: 0.000019 Delete files: Total files: 50000 Total time: 1.366761 Average time: 0.000027 After applying this patch: Create files: Total files: 50000 Total time: 0.927455 Average time: 0.000019 Delete files: Total files: 50000 Total time: 1.292280 Average time: 0.000026 [1] http://marc.info/?l=linux-btrfs&m=128212635122920&q=p3 [2] http://marc.info/?l=linux-btrfs&m=128212635122920&w=2 Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 114 +++++++++++++++++++++------------------------------ fs/btrfs/extent_io.h | 4 +- 2 files changed, 49 insertions(+), 69 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6e3b326346a7..4c639e156296 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -104,7 +104,7 @@ void extent_io_tree_init(struct extent_io_tree *tree, struct address_space *mapping, gfp_t mask) { tree->state = RB_ROOT; - tree->buffer = RB_ROOT; + INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); tree->ops = NULL; tree->dirty_bytes = 0; spin_lock_init(&tree->lock); @@ -235,50 +235,6 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree, return ret; } -static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree, - u64 offset, struct rb_node *node) -{ - struct rb_root *root = &tree->buffer; - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct extent_buffer *eb; - - while (*p) { - parent = *p; - eb = rb_entry(parent, struct extent_buffer, rb_node); - - if (offset < eb->start) - p = &(*p)->rb_left; - else if (offset > eb->start) - p = &(*p)->rb_right; - else - return eb; - } - - rb_link_node(node, parent, p); - rb_insert_color(node, root); - return NULL; -} - -static struct extent_buffer *buffer_search(struct extent_io_tree *tree, - u64 offset) -{ - struct rb_root *root = &tree->buffer; - struct rb_node *n = root->rb_node; - struct extent_buffer *eb; - - while (n) { - eb = rb_entry(n, struct extent_buffer, rb_node); - if (offset < eb->start) - n = n->rb_left; - else if (offset > eb->start) - n = n->rb_right; - else - return eb; - } - return NULL; -} - static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, struct extent_state *other) { @@ -3082,6 +3038,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, eb->len = len; spin_lock_init(&eb->lock); init_waitqueue_head(&eb->lock_wq); + INIT_RCU_HEAD(&eb->rcu_head); #if LEAK_DEBUG spin_lock_irqsave(&leak_lock, flags); @@ -3150,16 +3107,16 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, struct page *p; struct address_space *mapping = tree->mapping; int uptodate = 1; + int ret; - spin_lock(&tree->buffer_lock); - eb = buffer_search(tree, start); - if (eb) { - atomic_inc(&eb->refs); - spin_unlock(&tree->buffer_lock); + rcu_read_lock(); + eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); + if (eb && atomic_inc_not_zero(&eb->refs)) { + rcu_read_unlock(); mark_page_accessed(eb->first_page); return eb; } - spin_unlock(&tree->buffer_lock); + rcu_read_unlock(); eb = __alloc_extent_buffer(tree, start, len, mask); if (!eb) @@ -3198,17 +3155,25 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, if (uptodate) set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); + ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + if (ret) + goto free_eb; + spin_lock(&tree->buffer_lock); - exists = buffer_tree_insert(tree, start, &eb->rb_node); - if (exists) { + ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb); + if (ret == -EEXIST) { + exists = radix_tree_lookup(&tree->buffer, + start >> PAGE_CACHE_SHIFT); /* add one reference for the caller */ atomic_inc(&exists->refs); spin_unlock(&tree->buffer_lock); + radix_tree_preload_end(); goto free_eb; } /* add one reference for the tree */ atomic_inc(&eb->refs); spin_unlock(&tree->buffer_lock); + radix_tree_preload_end(); return eb; free_eb: @@ -3224,16 +3189,16 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, { struct extent_buffer *eb; - spin_lock(&tree->buffer_lock); - eb = buffer_search(tree, start); - if (eb) - atomic_inc(&eb->refs); - spin_unlock(&tree->buffer_lock); - - if (eb) + rcu_read_lock(); + eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); + if (eb && atomic_inc_not_zero(&eb->refs)) { + rcu_read_unlock(); mark_page_accessed(eb->first_page); + return eb; + } + rcu_read_unlock(); - return eb; + return NULL; } void free_extent_buffer(struct extent_buffer *eb) @@ -3863,6 +3828,14 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, } } +static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head) +{ + struct extent_buffer *eb = + container_of(head, struct extent_buffer, rcu_head); + + btrfs_release_extent_buffer(eb); +} + int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) { u64 start = page_offset(page); @@ -3870,23 +3843,30 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) int ret = 1; spin_lock(&tree->buffer_lock); - eb = buffer_search(tree, start); + eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); if (!eb) goto out; - if (atomic_read(&eb->refs) > 1) { + if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { ret = 0; goto out; } - if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { + + /* + * set @eb->refs to 0 if it is already 1, and then release the @eb. + * Or go back. + */ + if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) { ret = 0; goto out; } - rb_erase(&eb->rb_node, &tree->buffer); - /* at this point we can safely release the extent buffer */ - btrfs_release_extent_buffer(eb); + radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT); out: spin_unlock(&tree->buffer_lock); + + /* at this point we can safely release the extent buffer */ + if (atomic_read(&eb->refs) == 0) + call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); return ret; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 5691c7b590da..1c6d4f342ef7 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -85,7 +85,7 @@ struct extent_io_ops { struct extent_io_tree { struct rb_root state; - struct rb_root buffer; + struct radix_tree_root buffer; struct address_space *mapping; u64 dirty_bytes; spinlock_t lock; @@ -123,7 +123,7 @@ struct extent_buffer { unsigned long bflags; atomic_t refs; struct list_head leak_list; - struct rb_node rb_node; + struct rcu_head rcu_head; /* the spinlock is used to protect most operations */ spinlock_t lock; -- cgit v1.2.2 From 18e503d695ff8ff9a43768555aa74575bf6b77f3 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 28 Oct 2010 15:30:42 -0400 Subject: Btrfs: fix raid code for removing missing drives When btrfs is mounted in degraded mode, it has some internal structures to track the missing devices. This missing device is setup as readonly, but the mapping code can get upset when we try to write to it. This changes the mapping code to return -EIO instead of oops when we try to write to the readonly device. Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index dd318ff280b2..28681e729b1d 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3034,8 +3034,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, } bio->bi_sector = multi->stripes[dev_nr].physical >> 9; dev = multi->stripes[dev_nr].dev; - BUG_ON(rw == WRITE && !dev->writeable); - if (dev && dev->bdev) { + if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { bio->bi_bdev = dev->bdev; if (async_submit) schedule_bio(root, dev, rw, bio); -- cgit v1.2.2 From 2354d08fe9aeec3e451b85cb5387a6b28dbca0b1 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 29 Oct 2010 15:14:18 -0400 Subject: Btrfs: use memdup_user helpers Use memdup_user when user data is immediately copied into the allocated region. The semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ expression from,to,size,flag; position p; identifier l1,l2; @@ - to = \(kmalloc@p\|kzalloc@p\)(size,flag); + to = memdup_user(from,size); if ( - to==NULL + IS_ERR(to) || ...) { <+... when != goto l1; - -ENOMEM + PTR_ERR(to) ...+> } - if (copy_from_user(to, from, size) != 0) { - <+... when != goto l2; - -EFAULT - ...+> - } // Signed-off-by: Julia Lawall Cc: Chris Mason Signed-off-by: Andrew Morton Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index db0b8fc59235..8079ebfeaf50 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1073,14 +1073,10 @@ static noinline int btrfs_ioctl_tree_search(struct file *file, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - args = kmalloc(sizeof(*args), GFP_KERNEL); - if (!args) - return -ENOMEM; + args = memdup_user(argp, sizeof(*args)); + if (IS_ERR(args)) + return PTR_ERR(args); - if (copy_from_user(args, argp, sizeof(*args))) { - kfree(args); - return -EFAULT; - } inode = fdentry(file)->d_inode; ret = search_ioctl(inode, args); if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) @@ -1188,14 +1184,10 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - args = kmalloc(sizeof(*args), GFP_KERNEL); - if (!args) - return -ENOMEM; + args = memdup_user(argp, sizeof(*args)); + if (IS_ERR(args)) + return PTR_ERR(args); - if (copy_from_user(args, argp, sizeof(*args))) { - kfree(args); - return -EFAULT; - } inode = fdentry(file)->d_inode; if (args->treeid == 0) -- cgit v1.2.2 From d0b678cb0a26783ab7238784f1e7e608e5caafa3 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 29 Oct 2010 15:14:23 -0400 Subject: Btrfs: Use ERR_CAST helpers Use ERR_CAST(x) rather than ERR_PTR(PTR_ERR(x)). The former makes more clear what is the purpose of the operation, which otherwise looks like a no-op. The semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ type T; T x; identifier f; @@ T f (...) { <+... - ERR_PTR(PTR_ERR(x)) + x ...+> } @@ expression x; @@ - ERR_PTR(PTR_ERR(x)) + ERR_CAST(x) // Signed-off-by: Julia Lawall Cc: Chris Mason Signed-off-by: Andrew Morton Signed-off-by: Chris Mason --- fs/btrfs/extent_map.c | 4 ++-- fs/btrfs/super.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 454ca52d6451..23cb8da3ff66 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -335,7 +335,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, goto out; } if (IS_ERR(rb_node)) { - em = ERR_PTR(PTR_ERR(rb_node)); + em = ERR_CAST(rb_node); goto out; } em = rb_entry(rb_node, struct extent_map, rb_node); @@ -384,7 +384,7 @@ struct extent_map *search_extent_mapping(struct extent_map_tree *tree, goto out; } if (IS_ERR(rb_node)) { - em = ERR_PTR(PTR_ERR(rb_node)); + em = ERR_CAST(rb_node); goto out; } em = rb_entry(rb_node, struct extent_map, rb_node); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 65b62daa3f80..d7fb2733d028 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -389,7 +389,7 @@ static struct dentry *get_default_root(struct super_block *sb, find_root: new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); if (IS_ERR(new_root)) - return ERR_PTR(PTR_ERR(new_root)); + return ERR_CAST(new_root); if (btrfs_root_refs(&new_root->root_item) == 0) return ERR_PTR(-ENOENT); -- cgit v1.2.2 From 411fc6bcef54f828a5458f4730c68abdf13c6bf0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 29 Oct 2010 15:14:31 -0400 Subject: Btrfs: Fix variables set but not read (bugs found by gcc 4.6) These are all the cases where a variable is set, but not read which are really bugs. - Couple of incorrect error handling fixed. - One incorrect use of a allocation policy - Some other things Still needs more review. Found by gcc 4.6's new warnings. [akpm@linux-foundation.org: fix build. Might have been bitrot] Signed-off-by: Andi Kleen Cc: Chris Mason Signed-off-by: Andrew Morton Signed-off-by: Chris Mason --- fs/btrfs/dir-item.c | 2 +- fs/btrfs/extent_io.c | 2 ++ fs/btrfs/inode.c | 6 +++--- fs/btrfs/relocation.c | 4 +++- fs/btrfs/tree-log.c | 2 +- 5 files changed, 10 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index e9103b3baa49..f0cad5ae5be7 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -427,5 +427,5 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, ret = btrfs_truncate_item(trans, root, path, item_len - sub_item_len, 1); } - return 0; + return ret; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 4c639e156296..7dc31c39ca59 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2779,6 +2779,8 @@ int extent_prepare_write(struct extent_io_tree *tree, NULL, 1, end_bio_extent_preparewrite, 0, 0, 0); + if (ret && !err) + err = ret; iocount++; block_start = block_start + iosize; } else { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9f08136b10c4..0aa24717cd58 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1389,7 +1389,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, if (map_length < length + size) return 1; - return 0; + return ret; } /* @@ -2709,8 +2709,8 @@ static int check_path_shared(struct btrfs_root *root, { struct extent_buffer *eb; int level; - int ret; u64 refs = 1; + int uninitialized_var(ret); for (level = 0; level < BTRFS_MAX_LEVEL; level++) { if (!path->nodes[level]) @@ -2723,7 +2723,7 @@ static int check_path_shared(struct btrfs_root *root, if (refs > 1) return 1; } - return 0; + return ret; /* XXX callers? */ } /* diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index fd0714475db7..045c9c2b2d7e 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3094,6 +3094,8 @@ static int add_tree_block(struct reloc_control *rc, BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0)); ret = get_ref_objectid_v0(rc, path, extent_key, &ref_owner, NULL); + if (ret < 0) + return ret; BUG_ON(ref_owner >= BTRFS_MAX_LEVEL); level = (int)ref_owner; /* FIXME: get real generation */ @@ -4218,7 +4220,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) btrfs_add_ordered_sum(inode, ordered, sums); } btrfs_put_ordered_extent(ordered); - return 0; + return ret; } void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index fb102a9aee9c..224fb5b3daad 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2273,7 +2273,7 @@ fail: } btrfs_end_log_trans(root); - return 0; + return err; } /* see comments for btrfs_del_dir_entries_in_log */ -- cgit v1.2.2 From 559af8211433b8c0b20e6c43c61409cb9c9c2996 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 29 Oct 2010 15:14:37 -0400 Subject: Btrfs: cleanup warnings from gcc 4.6 (nonbugs) These are all the cases where a variable is set, but not read which are not bugs as far as I can see, but simply leftovers. Still needs more review. Found by gcc 4.6's new warnings Signed-off-by: Andi Kleen Cc: Chris Mason Signed-off-by: Andrew Morton Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 2 -- fs/btrfs/ctree.c | 20 ++------------------ fs/btrfs/disk-io.c | 11 ----------- fs/btrfs/extent-tree.c | 2 -- fs/btrfs/extent_io.c | 9 --------- fs/btrfs/inode.c | 14 -------------- fs/btrfs/ioctl.c | 2 -- fs/btrfs/ordered-data.c | 2 -- fs/btrfs/root-tree.c | 2 -- fs/btrfs/super.c | 6 ++---- fs/btrfs/tree-defrag.c | 2 -- fs/btrfs/tree-log.c | 15 --------------- fs/btrfs/volumes.c | 4 ---- fs/btrfs/xattr.c | 2 -- fs/btrfs/zlib.c | 5 ----- 15 files changed, 4 insertions(+), 94 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 396039b3a8a2..7845d1f7d1d9 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -163,7 +163,6 @@ fail: */ static void end_compressed_bio_read(struct bio *bio, int err) { - struct extent_io_tree *tree; struct compressed_bio *cb = bio->bi_private; struct inode *inode; struct page *page; @@ -187,7 +186,6 @@ static void end_compressed_bio_read(struct bio *bio, int err) /* ok, we're the last bio for this extent, lets start * the decompression. */ - tree = &BTRFS_I(inode)->io_tree; ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, cb->start, cb->orig_bio->bi_io_vec, diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6921231e0efb..9ac171599258 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -200,7 +200,6 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, struct extent_buffer **cow_ret, u64 new_root_objectid) { struct extent_buffer *cow; - u32 nritems; int ret = 0; int level; struct btrfs_disk_key disk_key; @@ -210,7 +209,6 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, WARN_ON(root->ref_cows && trans->transid != root->last_trans); level = btrfs_header_level(buf); - nritems = btrfs_header_nritems(buf); if (level == 0) btrfs_item_key(buf, &disk_key, 0); else @@ -1008,7 +1006,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, int wret; int pslot; int orig_slot = path->slots[level]; - int err_on_enospc = 0; u64 orig_ptr; if (level == 0) @@ -1071,8 +1068,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, BTRFS_NODEPTRS_PER_BLOCK(root) / 4) return 0; - if (btrfs_header_nritems(mid) < 2) - err_on_enospc = 1; + btrfs_header_nritems(mid); left = read_node_slot(root, parent, pslot - 1); if (left) { @@ -1103,8 +1099,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, wret = push_node_left(trans, root, left, mid, 1); if (wret < 0) ret = wret; - if (btrfs_header_nritems(mid) < 2) - err_on_enospc = 1; + btrfs_header_nritems(mid); } /* @@ -1224,14 +1219,12 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, int wret; int pslot; int orig_slot = path->slots[level]; - u64 orig_ptr; if (level == 0) return 1; mid = path->nodes[level]; WARN_ON(btrfs_header_generation(mid) != trans->transid); - orig_ptr = btrfs_node_blockptr(mid, orig_slot); if (level < BTRFS_MAX_LEVEL - 1) parent = path->nodes[level + 1]; @@ -2567,7 +2560,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, { struct btrfs_disk_key disk_key; struct extent_buffer *right = path->nodes[0]; - int slot; int i; int push_space = 0; int push_items = 0; @@ -2579,8 +2571,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, u32 this_item_size; u32 old_left_item_size; - slot = path->slots[1]; - if (empty) nr = min(right_nritems, max_slot); else @@ -3349,7 +3339,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, { int ret = 0; int slot; - int slot_orig; struct extent_buffer *leaf; struct btrfs_item *item; u32 nritems; @@ -3359,7 +3348,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, unsigned int size_diff; int i; - slot_orig = path->slots[0]; leaf = path->nodes[0]; slot = path->slots[0]; @@ -3464,7 +3452,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, { int ret = 0; int slot; - int slot_orig; struct extent_buffer *leaf; struct btrfs_item *item; u32 nritems; @@ -3473,7 +3460,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, unsigned int old_size; int i; - slot_orig = path->slots[0]; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); @@ -3806,7 +3792,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_key *cpu_key, u32 *data_size, int nr) { - struct extent_buffer *leaf; int ret = 0; int slot; int i; @@ -3823,7 +3808,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, if (ret < 0) goto out; - leaf = path->nodes[0]; slot = path->slots[0]; BUG_ON(slot < 0); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 77e5dabfd45a..e163424c7fce 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -338,7 +338,6 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) struct extent_io_tree *tree; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; u64 found_start; - int found_level; unsigned long len; struct extent_buffer *eb; int ret; @@ -369,8 +368,6 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) WARN_ON(1); goto err; } - found_level = btrfs_header_level(eb); - csum_tree_block(root, eb, 0); err: free_extent_buffer(eb); @@ -543,11 +540,9 @@ int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) static void run_one_async_start(struct btrfs_work *work) { - struct btrfs_fs_info *fs_info; struct async_submit_bio *async; async = container_of(work, struct async_submit_bio, work); - fs_info = BTRFS_I(async->inode)->root->fs_info; async->submit_bio_start(async->inode, async->rw, async->bio, async->mirror_num, async->bio_flags, async->bio_offset); @@ -860,12 +855,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, u64 parent_transid) { struct extent_buffer *buf = NULL; - struct inode *btree_inode = root->fs_info->btree_inode; - struct extent_io_tree *io_tree; int ret; - io_tree = &BTRFS_I(btree_inode)->io_tree; - buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return NULL; @@ -1387,7 +1378,6 @@ static int bio_ready_for_csum(struct bio *bio) u64 start = 0; struct page *page; struct extent_io_tree *io_tree = NULL; - struct btrfs_fs_info *info = NULL; struct bio_vec *bvec; int i; int ret; @@ -1406,7 +1396,6 @@ static int bio_ready_for_csum(struct bio *bio) buf_len = page->private >> 2; start = page_offset(page) + bvec->bv_offset; io_tree = &BTRFS_I(page->mapping->host)->io_tree; - info = BTRFS_I(page->mapping->host)->root->fs_info; } /* are we fully contained in this bio? */ if (buf_len <= length) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 59c8daaacf0c..df754108b952 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5719,7 +5719,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans, u64 generation; u64 refs; u64 flags; - u64 last = 0; u32 nritems; u32 blocksize; struct btrfs_key key; @@ -5787,7 +5786,6 @@ reada: generation); if (ret) break; - last = bytenr + blocksize; nread++; } wc->reada_slot = slot; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7dc31c39ca59..3b7eaee0f912 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1857,10 +1857,8 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, struct page *page = bvec->bv_page; struct extent_io_tree *tree = bio->bi_private; u64 start; - u64 end; start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; - end = start + bvec->bv_len - 1; bio->bi_private = NULL; @@ -2160,7 +2158,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 last_byte = i_size_read(inode); u64 block_start; u64 iosize; - u64 unlock_start; sector_t sector; struct extent_state *cached_state = NULL; struct extent_map *em; @@ -2285,7 +2282,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, if (tree->ops && tree->ops->writepage_end_io_hook) tree->ops->writepage_end_io_hook(page, start, page_end, NULL, 1); - unlock_start = page_end + 1; goto done; } @@ -2296,7 +2292,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, if (tree->ops && tree->ops->writepage_end_io_hook) tree->ops->writepage_end_io_hook(page, cur, page_end, NULL, 1); - unlock_start = page_end + 1; break; } em = epd->get_extent(inode, page, pg_offset, cur, @@ -2343,7 +2338,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, cur += iosize; pg_offset += iosize; - unlock_start = cur; continue; } /* leave this out until we have a page_mkwrite call */ @@ -2429,7 +2423,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, pgoff_t index; pgoff_t end; /* Inclusive */ int scanned = 0; - int range_whole = 0; pagevec_init(&pvec, 0); if (wbc->range_cyclic) { @@ -2438,8 +2431,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) - range_whole = 1; scanned = 1; } retry: diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0aa24717cd58..609f3bbbd1ed 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -319,8 +319,6 @@ static noinline int compress_file_range(struct inode *inode, struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; u64 num_bytes; - u64 orig_start; - u64 disk_num_bytes; u64 blocksize = root->sectorsize; u64 actual_end; u64 isize = i_size_read(inode); @@ -335,8 +333,6 @@ static noinline int compress_file_range(struct inode *inode, int i; int will_compress; - orig_start = start; - actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; @@ -371,7 +367,6 @@ again: total_compressed = min(total_compressed, max_uncompressed); num_bytes = (end - start + blocksize) & ~(blocksize - 1); num_bytes = max(blocksize, num_bytes); - disk_num_bytes = num_bytes; total_in = 0; ret = 0; @@ -467,7 +462,6 @@ again: if (total_compressed >= total_in) { will_compress = 0; } else { - disk_num_bytes = total_compressed; num_bytes = total_in; } } @@ -757,8 +751,6 @@ static noinline int cow_file_range(struct inode *inode, u64 disk_num_bytes; u64 cur_alloc_size; u64 blocksize = root->sectorsize; - u64 actual_end; - u64 isize = i_size_read(inode); struct btrfs_key ins; struct extent_map *em; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; @@ -770,8 +762,6 @@ static noinline int cow_file_range(struct inode *inode, btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; - actual_end = min_t(u64, isize, end + 1); - num_bytes = (end - start + blocksize) & ~(blocksize - 1); num_bytes = max(blocksize, num_bytes); disk_num_bytes = num_bytes; @@ -2274,7 +2264,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) { struct btrfs_path *path; struct extent_buffer *leaf; - struct btrfs_item *item; struct btrfs_key key, found_key; struct btrfs_trans_handle *trans; struct inode *inode; @@ -2312,7 +2301,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) /* pull out the item */ leaf = path->nodes[0]; - item = btrfs_item_nr(leaf, path->slots[0]); btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); /* make sure the item matches what we want */ @@ -5701,7 +5689,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_dio_private *dip; struct bio_vec *bvec = bio->bi_io_vec; - u64 start; int skip_sum; int write = rw & REQ_WRITE; int ret = 0; @@ -5727,7 +5714,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, dip->inode = inode; dip->logical_offset = file_offset; - start = dip->logical_offset; dip->bytes = 0; do { dip->bytes += bvec->bv_len; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 8079ebfeaf50..60f662c4778b 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -708,7 +708,6 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, char *sizestr; char *devstr = NULL; int ret = 0; - int namelen; int mod = 0; if (root->fs_info->sb->s_flags & MS_RDONLY) @@ -722,7 +721,6 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, return PTR_ERR(vol_args); vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - namelen = strlen(vol_args->name); mutex_lock(&root->fs_info->volume_mutex); sizestr = vol_args->name; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index e56c72bc5add..f4621f6deca1 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -526,7 +526,6 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) { u64 end; u64 orig_end; - u64 wait_end; struct btrfs_ordered_extent *ordered; int found; @@ -537,7 +536,6 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) if (orig_end > INT_LIMIT(loff_t)) orig_end = INT_LIMIT(loff_t); } - wait_end = orig_end; again: /* start IO across the range first to instantiate any delalloc * extents diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 2d958be761c8..6a1086e83ffc 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -181,7 +181,6 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid) { struct btrfs_root *dead_root; - struct btrfs_item *item; struct btrfs_root_item *ri; struct btrfs_key key; struct btrfs_key found_key; @@ -214,7 +213,6 @@ again: nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; } - item = btrfs_item_nr(leaf, slot); btrfs_item_key_to_cpu(leaf, &key, slot); if (btrfs_key_type(&key) != BTRFS_ROOT_ITEM_KEY) goto next; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index d7fb2733d028..0002e6d1a16f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -61,6 +61,8 @@ static void btrfs_put_super(struct super_block *sb) ret = close_ctree(root); sb->s_fs_info = NULL; + + (void)ret; /* FIXME: need to fix VFS to return error? */ } enum { @@ -445,7 +447,6 @@ static int btrfs_fill_super(struct super_block *sb, { struct inode *inode; struct dentry *root_dentry; - struct btrfs_super_block *disk_super; struct btrfs_root *tree_root; struct btrfs_key key; int err; @@ -467,7 +468,6 @@ static int btrfs_fill_super(struct super_block *sb, return PTR_ERR(tree_root); } sb->s_fs_info = tree_root; - disk_super = &tree_root->fs_info->super_copy; key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.type = BTRFS_INODE_ITEM_KEY; @@ -580,7 +580,6 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, char *subvol_name = NULL; u64 subvol_objectid = 0; int error = 0; - int found = 0; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; @@ -616,7 +615,6 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, goto error_close_devices; } - found = 1; btrfs_close_devices(fs_devices); } else { char b[BDEVNAME_SIZE]; diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index f7ac8e013ed7..992ab425599d 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -36,7 +36,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, int ret = 0; int wret; int level; - int orig_level; int is_extent = 0; int next_key_ret = 0; u64 last_ret = 0; @@ -64,7 +63,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, return -ENOMEM; level = btrfs_header_level(root->node); - orig_level = level; if (level == 0) goto out; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 224fb5b3daad..a29f19384a27 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -786,7 +786,6 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, { struct inode *dir; int ret; - struct btrfs_key location; struct btrfs_inode_ref *ref; struct btrfs_dir_item *di; struct inode *inode; @@ -795,10 +794,6 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, unsigned long ref_ptr; unsigned long ref_end; - location.objectid = key->objectid; - location.type = BTRFS_INODE_ITEM_KEY; - location.offset = 0; - /* * it is possible that we didn't log all the parent directories * for a given inode. If we don't find the dir, just don't @@ -1583,7 +1578,6 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, struct btrfs_path *path; struct btrfs_root *root = wc->replay_dest; struct btrfs_key key; - u32 item_size; int level; int i; int ret; @@ -1601,7 +1595,6 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, nritems = btrfs_header_nritems(eb); for (i = 0; i < nritems; i++) { btrfs_item_key_to_cpu(eb, &key, i); - item_size = btrfs_item_size_nr(eb, i); /* inode keys are done during the first stage */ if (key.type == BTRFS_INODE_ITEM_KEY && @@ -1668,7 +1661,6 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, struct walk_control *wc) { u64 root_owner; - u64 root_gen; u64 bytenr; u64 ptr_gen; struct extent_buffer *next; @@ -1698,7 +1690,6 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, parent = path->nodes[*level]; root_owner = btrfs_header_owner(parent); - root_gen = btrfs_header_generation(parent); next = btrfs_find_create_tree_block(root, bytenr, blocksize); @@ -1749,7 +1740,6 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, struct walk_control *wc) { u64 root_owner; - u64 root_gen; int i; int slot; int ret; @@ -1757,8 +1747,6 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { slot = path->slots[i]; if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { - struct extent_buffer *node; - node = path->nodes[i]; path->slots[i]++; *level = i; WARN_ON(*level == 0); @@ -1771,7 +1759,6 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, parent = path->nodes[*level + 1]; root_owner = btrfs_header_owner(parent); - root_gen = btrfs_header_generation(parent); wc->process_func(root, path->nodes[*level], wc, btrfs_header_generation(path->nodes[*level])); if (wc->free) { @@ -2729,7 +2716,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_key max_key; struct btrfs_root *log = root->log_root; struct extent_buffer *src = NULL; - u32 size; int err = 0; int ret; int nritems; @@ -2793,7 +2779,6 @@ again: break; src = path->nodes[0]; - size = btrfs_item_size_nr(src, path->slots[0]); if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { ins_nr++; goto next_slot; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 28681e729b1d..91851b555e2e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1901,7 +1901,6 @@ int btrfs_balance(struct btrfs_root *dev_root) u64 size_to_free; struct btrfs_path *path; struct btrfs_key key; - struct btrfs_chunk *chunk; struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root; struct btrfs_trans_handle *trans; struct btrfs_key found_key; @@ -1965,9 +1964,6 @@ int btrfs_balance(struct btrfs_root *dev_root) if (found_key.objectid != key.objectid) break; - chunk = btrfs_item_ptr(path->nodes[0], - path->slots[0], - struct btrfs_chunk); /* chunk zero is special */ if (found_key.offset == 0) break; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 88ecbb215878..698fdd2c739c 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -178,7 +178,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) struct inode *inode = dentry->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path; - struct btrfs_item *item; struct extent_buffer *leaf; struct btrfs_dir_item *di; int ret = 0, slot, advance; @@ -234,7 +233,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) } advance = 1; - item = btrfs_item_nr(leaf, slot); btrfs_item_key_to_cpu(leaf, &found_key, slot); /* check to make sure this item is what we want */ diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 3e2b90eaa239..b9cd5445f71c 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -199,8 +199,6 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, int nr_pages = 0; struct page *in_page = NULL; struct page *out_page = NULL; - int out_written = 0; - int in_read = 0; unsigned long bytes_left; *out_pages = 0; @@ -233,9 +231,6 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, workspace->def_strm.avail_out = PAGE_CACHE_SIZE; workspace->def_strm.avail_in = min(len, PAGE_CACHE_SIZE); - out_written = 0; - in_read = 0; - while (workspace->def_strm.total_in < len) { ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH); if (ret != Z_OK) { -- cgit v1.2.2 From d8e39c457bc1ca2a7304bc086c7b0f0c10854921 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 29 Oct 2010 15:17:41 -0400 Subject: Btrfs: drop unused variable in block_alloc_rsv The alloc_target variable is not really used. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index df754108b952..a541bc87f04c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3607,18 +3607,14 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) { struct btrfs_block_rsv *block_rsv; struct btrfs_fs_info *fs_info = root->fs_info; - u64 alloc_target; block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); if (!block_rsv) return NULL; btrfs_init_block_rsv(block_rsv); - - alloc_target = btrfs_get_alloc_profile(root, 0); block_rsv->space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); - return block_rsv; } -- cgit v1.2.2 From 9a019196ecaa57780141ef5d1f0bb31050d6ed5b Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:37:33 -0400 Subject: Btrfs: fix delalloc checks in clone ioctl The lookup_first_ordered_extent() was done on the wrong inode, and the ->delalloc_bytes test was wrong, as the following btrfs_wait_ordered_range() would only invoke a range write and wouldn't write the entire file data range. Also, a bad parameter was passed to btrfs_wait_ordered_range(). Signed-off-by: Yehuda Sadeh Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 60f662c4778b..d94bef5179fc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1520,13 +1520,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, while (1) { struct btrfs_ordered_extent *ordered; lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); - ordered = btrfs_lookup_first_ordered_extent(inode, off+len); - if (BTRFS_I(src)->delalloc_bytes == 0 && !ordered) + ordered = btrfs_lookup_first_ordered_extent(src, off+len); + if (!ordered && + !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, + EXTENT_DELALLOC, 0, NULL)) break; unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); if (ordered) btrfs_put_ordered_extent(ordered); - btrfs_wait_ordered_range(src, off, off+len); + btrfs_wait_ordered_range(src, off, len); } /* clone data */ -- cgit v1.2.2 From 050006a753bab8ba05f2113cc57ba49398cd5521 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:37:33 -0400 Subject: Btrfs: fix clone ioctl where range is adjacent to extent We had an edge case issue where the requested range was just following an existing extent. Instead of skipping to the next extent, we used the previous one which lead to having zero sized extents. Signed-off-by: Yehuda Sadeh Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d94bef5179fc..3fe15e435b5c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1597,7 +1597,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, } btrfs_release_path(root, path); - if (key.offset + datal < off || + if (key.offset + datal <= off || key.offset >= off+len) goto next; -- cgit v1.2.2 From fccdae435c1b295cca546f23f6f43126a28ffac3 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:37:33 -0400 Subject: Btrfs: fix lockdep warning on clone ioctl I'm no lockdep expert, but this appears to make the lockdep warning go away for the i_mutex locking in the clone ioctl. Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 3fe15e435b5c..93d69b32028e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1492,11 +1492,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, path->reada = 2; if (inode < src) { - mutex_lock(&inode->i_mutex); - mutex_lock(&src->i_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&src->i_mutex, I_MUTEX_CHILD); } else { - mutex_lock(&src->i_mutex); - mutex_lock(&inode->i_mutex); + mutex_lock_nested(&src->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); } /* determine range to clone */ -- cgit v1.2.2 From 99d16cbcaf694c803a1b6bf7e851694ffe1d255d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:37:34 -0400 Subject: Btrfs: fix deadlock in btrfs_commit_transaction We calculate timeout (either 1 or MAX_SCHEDULE_TIMEOUT) based on whether num_writers > 1 or should_grow at the top of the loop. Then, much much later, we wait for that timeout if either num_writers or should_grow is true. However, it's possible for a racing process (calling btrfs_end_transaction()) to decrement num_writers such that we wait forever instead of for 1. Fix this by deciding how long to wait when we wait. Include a smp_mb() before checking if the waitqueue is active to ensure the num_writers is visible. Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/transaction.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 325d9a5f0128..700dc4b34ada 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -402,6 +402,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, WARN_ON(cur_trans->num_writers < 1); cur_trans->num_writers--; + smp_mb(); if (waitqueue_active(&cur_trans->writer_wait)) wake_up(&cur_trans->writer_wait); put_transaction(cur_trans); @@ -1010,7 +1011,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { unsigned long joined = 0; - unsigned long timeout = 1; struct btrfs_transaction *cur_trans; struct btrfs_transaction *prev_trans = NULL; DEFINE_WAIT(wait); @@ -1081,11 +1081,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, snap_pending = 1; WARN_ON(cur_trans != trans->transaction); - if (cur_trans->num_writers > 1) - timeout = MAX_SCHEDULE_TIMEOUT; - else if (should_grow) - timeout = 1; - mutex_unlock(&root->fs_info->trans_mutex); if (flush_on_commit || snap_pending) { @@ -1107,8 +1102,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, TASK_UNINTERRUPTIBLE); smp_mb(); - if (cur_trans->num_writers > 1 || should_grow) - schedule_timeout(timeout); + if (cur_trans->num_writers > 1) + schedule_timeout(MAX_SCHEDULE_TIMEOUT); + else if (should_grow) + schedule_timeout(1); mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); -- cgit v1.2.2 From bb9c12c945cbd1b0eaa1589546dde772ccabeeba Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:37:34 -0400 Subject: Btrfs: async transaction commit Add support for an async transaction commit that is ordered such that any subsequent operations will join the following transaction, but does not wait until the current commit is fully on disk. This avoids much of the latency associated with the btrfs_commit_transaction for callers concerned with serialization and not safety. The wait_for_unblock flag controls whether we wait for the 'middle' portion of commit_transaction to complete, which is necessary if the caller expects some of the modifications contained in the commit to be available (this is the case for subvol/snapshot creation). Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 1 + fs/btrfs/transaction.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/transaction.h | 3 ++ 4 files changed, 124 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 88c0fb7e12d2..e5d66b13c175 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -901,6 +901,7 @@ struct btrfs_fs_info { struct btrfs_transaction *running_transaction; wait_queue_head_t transaction_throttle; wait_queue_head_t transaction_wait; + wait_queue_head_t transaction_blocked_wait; wait_queue_head_t async_submit_wait; struct btrfs_super_block super_copy; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e163424c7fce..b40dfe48017b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1679,6 +1679,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, init_waitqueue_head(&fs_info->transaction_throttle); init_waitqueue_head(&fs_info->transaction_wait); + init_waitqueue_head(&fs_info->transaction_blocked_wait); init_waitqueue_head(&fs_info->async_submit_wait); __setup_root(4096, 4096, 4096, 4096, tree_root, diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 700dc4b34ada..9f40bfc9c45c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1007,6 +1007,123 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info) return ret; } +/* + * wait for the current transaction commit to start and block subsequent + * transaction joins + */ +static void wait_current_trans_commit_start(struct btrfs_root *root, + struct btrfs_transaction *trans) +{ + DEFINE_WAIT(wait); + + if (trans->in_commit) + return; + + while (1) { + prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait, + TASK_UNINTERRUPTIBLE); + if (trans->in_commit) { + finish_wait(&root->fs_info->transaction_blocked_wait, + &wait); + break; + } + mutex_unlock(&root->fs_info->trans_mutex); + schedule(); + mutex_lock(&root->fs_info->trans_mutex); + finish_wait(&root->fs_info->transaction_blocked_wait, &wait); + } +} + +/* + * wait for the current transaction to start and then become unblocked. + * caller holds ref. + */ +static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, + struct btrfs_transaction *trans) +{ + DEFINE_WAIT(wait); + + if (trans->commit_done || (trans->in_commit && !trans->blocked)) + return; + + while (1) { + prepare_to_wait(&root->fs_info->transaction_wait, &wait, + TASK_UNINTERRUPTIBLE); + if (trans->commit_done || + (trans->in_commit && !trans->blocked)) { + finish_wait(&root->fs_info->transaction_wait, + &wait); + break; + } + mutex_unlock(&root->fs_info->trans_mutex); + schedule(); + mutex_lock(&root->fs_info->trans_mutex); + finish_wait(&root->fs_info->transaction_wait, + &wait); + } +} + +/* + * commit transactions asynchronously. once btrfs_commit_transaction_async + * returns, any subsequent transaction will not be allowed to join. + */ +struct btrfs_async_commit { + struct btrfs_trans_handle *newtrans; + struct btrfs_root *root; + struct delayed_work work; +}; + +static void do_async_commit(struct work_struct *work) +{ + struct btrfs_async_commit *ac = + container_of(work, struct btrfs_async_commit, work.work); + + btrfs_commit_transaction(ac->newtrans, ac->root); + kfree(ac); +} + +int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + int wait_for_unblock) +{ + struct btrfs_async_commit *ac; + struct btrfs_transaction *cur_trans; + + ac = kmalloc(sizeof(*ac), GFP_NOFS); + BUG_ON(!ac); + + INIT_DELAYED_WORK(&ac->work, do_async_commit); + ac->root = root; + ac->newtrans = btrfs_join_transaction(root, 0); + + /* take transaction reference */ + mutex_lock(&root->fs_info->trans_mutex); + cur_trans = trans->transaction; + cur_trans->use_count++; + mutex_unlock(&root->fs_info->trans_mutex); + + btrfs_end_transaction(trans, root); + schedule_delayed_work(&ac->work, 0); + + /* wait for transaction to start and unblock */ + mutex_lock(&root->fs_info->trans_mutex); + if (wait_for_unblock) + wait_current_trans_commit_start_and_unblock(root, cur_trans); + else + wait_current_trans_commit_start(root, cur_trans); + put_transaction(cur_trans); + mutex_unlock(&root->fs_info->trans_mutex); + + return 0; +} + +/* + * btrfs_transaction state sequence: + * in_commit = 0, blocked = 0 (initial) + * in_commit = 1, blocked = 1 + * blocked = 0 + * commit_done = 1 + */ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -1057,6 +1174,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, trans->transaction->in_commit = 1; trans->transaction->blocked = 1; + wake_up(&root->fs_info->transaction_blocked_wait); + if (cur_trans->list.prev != &root->fs_info->trans_list) { prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 15f83e1c1ef7..e1908e6872fe 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -108,6 +108,9 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); int btrfs_clean_old_snapshots(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + int wait_for_unblock); int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, -- cgit v1.2.2 From 462045928bda777c86919a396a42991fcf235378 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:41:32 -0400 Subject: Btrfs: add START_SYNC, WAIT_SYNC ioctls START_SYNC will start a sync/commit, but not wait for it to complete. Any modification started after the ioctl returns is guaranteed not to be included in the commit. If a non-NULL pointer is passed, the transaction id will be returned to userspace. WAIT_SYNC will wait for any in-progress commit to complete. If a transaction id is specified, the ioctl will block and then return (success) when the specified transaction has committed. If it has already committed when we call the ioctl, it returns immediately. If the specified transaction doesn't exist, it returns EINVAL. If no transaction id is specified, WAIT_SYNC will wait for the currently committing transaction to finish it's commit to disk. If there is no currently committing transaction, it returns success. These ioctls are useful for applications which want to impose an ordering on when fs modifications reach disk, but do not want to wait for the full (slow) commit process to do so. Picky callers can take the transid returned by START_SYNC and feed it to WAIT_SYNC, and be certain to wait only as long as necessary for the transaction _they_ started to reach disk. Sloppy callers can START_SYNC and WAIT_SYNC without a transid, and provided they didn't wait too long between the calls, they will get the same result. However, if a second commit starts before they call WAIT_SYNC, they may end up waiting longer for it to commit as well. Even so, a START_SYNC+WAIT_SYNC still guarantees that any operation completed before the START_SYNC reaches disk. Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 34 +++++++++++++++++++++++++++++++++ fs/btrfs/ioctl.h | 2 ++ fs/btrfs/transaction.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/transaction.h | 1 + 4 files changed, 89 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 93d69b32028e..dc5a19ed07f3 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2028,6 +2028,36 @@ long btrfs_ioctl_trans_end(struct file *file) return 0; } +static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp) +{ + struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root; + struct btrfs_trans_handle *trans; + u64 transid; + + trans = btrfs_start_transaction(root, 0); + transid = trans->transid; + btrfs_commit_transaction_async(trans, root, 0); + + if (argp) + if (copy_to_user(argp, &transid, sizeof(transid))) + return -EFAULT; + return 0; +} + +static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp) +{ + struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root; + u64 transid; + + if (argp) { + if (copy_from_user(&transid, argp, sizeof(transid))) + return -EFAULT; + } else { + transid = 0; /* current trans */ + } + return btrfs_wait_for_commit(root, transid); +} + long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -2078,6 +2108,10 @@ long btrfs_ioctl(struct file *file, unsigned int case BTRFS_IOC_SYNC: btrfs_sync_fs(file->f_dentry->d_sb, 1); return 0; + case BTRFS_IOC_START_SYNC: + return btrfs_ioctl_start_sync(file, argp); + case BTRFS_IOC_WAIT_SYNC: + return btrfs_ioctl_wait_sync(file, argp); } return -ENOTTY; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 424694aa517f..16e1442523b7 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -178,4 +178,6 @@ struct btrfs_ioctl_space_args { #define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64) #define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \ struct btrfs_ioctl_space_args) +#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) +#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 9f40bfc9c45c..1fffbc017bdf 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -279,6 +279,58 @@ static noinline int wait_for_commit(struct btrfs_root *root, return 0; } +int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) +{ + struct btrfs_transaction *cur_trans = NULL, *t; + int ret; + + mutex_lock(&root->fs_info->trans_mutex); + + ret = 0; + if (transid) { + if (transid <= root->fs_info->last_trans_committed) + goto out_unlock; + + /* find specified transaction */ + list_for_each_entry(t, &root->fs_info->trans_list, list) { + if (t->transid == transid) { + cur_trans = t; + break; + } + if (t->transid > transid) + break; + } + ret = -EINVAL; + if (!cur_trans) + goto out_unlock; /* bad transid */ + } else { + /* find newest transaction that is committing | committed */ + list_for_each_entry_reverse(t, &root->fs_info->trans_list, + list) { + if (t->in_commit) { + if (t->commit_done) + goto out_unlock; + cur_trans = t; + break; + } + } + if (!cur_trans) + goto out_unlock; /* nothing committing|committed */ + } + + cur_trans->use_count++; + mutex_unlock(&root->fs_info->trans_mutex); + + wait_for_commit(root, cur_trans); + + mutex_lock(&root->fs_info->trans_mutex); + put_transaction(cur_trans); + ret = 0; +out_unlock: + mutex_unlock(&root->fs_info->trans_mutex); + return ret; +} + #if 0 /* * rate limit against the drop_snapshot code. This helps to slow down new diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e1908e6872fe..f104b57ad4ef 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -97,6 +97,7 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root int num_blocks); struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, int num_blocks); +int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, -- cgit v1.2.2 From 72fd032e94240d001b1d22f2c1dfd2592b02e44e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:41:32 -0400 Subject: Btrfs: add SNAP_CREATE_ASYNC ioctl Create a snap without waiting for it to commit to disk. The ioctl is ordered such that subsequent operations will not be contained by the created snapshot, and the commit is initiated, but the ioctl does not wait for the snapshot to commit to disk. We return the specific transid to userspace so that an application can wait for this specific snapshot creation to commit via the WAIT_SYNC ioctl. Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 107 ++++++++++++++++++++++++++++++++++++++++++------------- fs/btrfs/ioctl.h | 11 +++++- 2 files changed, 93 insertions(+), 25 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index dc5a19ed07f3..e8a26a3aac3e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -224,7 +224,8 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg) static noinline int create_subvol(struct btrfs_root *root, struct dentry *dentry, - char *name, int namelen) + char *name, int namelen, + u64 *async_transid) { struct btrfs_trans_handle *trans; struct btrfs_key key; @@ -338,13 +339,19 @@ static noinline int create_subvol(struct btrfs_root *root, d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); fail: - err = btrfs_commit_transaction(trans, root); + if (async_transid) { + *async_transid = trans->transid; + err = btrfs_commit_transaction_async(trans, root, 1); + } else { + err = btrfs_commit_transaction(trans, root); + } if (err && !ret) ret = err; return ret; } -static int create_snapshot(struct btrfs_root *root, struct dentry *dentry) +static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, + char *name, int namelen, u64 *async_transid) { struct inode *inode; struct btrfs_pending_snapshot *pending_snapshot; @@ -373,7 +380,14 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry) list_add(&pending_snapshot->list, &trans->transaction->pending_snapshots); - ret = btrfs_commit_transaction(trans, root->fs_info->extent_root); + if (async_transid) { + *async_transid = trans->transid; + ret = btrfs_commit_transaction_async(trans, + root->fs_info->extent_root, 1); + } else { + ret = btrfs_commit_transaction(trans, + root->fs_info->extent_root); + } BUG_ON(ret); ret = pending_snapshot->error; @@ -412,7 +426,8 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) */ static noinline int btrfs_mksubvol(struct path *parent, char *name, int namelen, - struct btrfs_root *snap_src) + struct btrfs_root *snap_src, + u64 *async_transid) { struct inode *dir = parent->dentry->d_inode; struct dentry *dentry; @@ -443,10 +458,11 @@ static noinline int btrfs_mksubvol(struct path *parent, goto out_up_read; if (snap_src) { - error = create_snapshot(snap_src, dentry); + error = create_snapshot(snap_src, dentry, + name, namelen, async_transid); } else { error = create_subvol(BTRFS_I(dir)->root, dentry, - name, namelen); + name, namelen, async_transid); } if (!error) fsnotify_mkdir(dir, dentry); @@ -799,11 +815,13 @@ out_unlock: return ret; } -static noinline int btrfs_ioctl_snap_create(struct file *file, - void __user *arg, int subvol) +static noinline int btrfs_ioctl_snap_create_transid(struct file *file, + char *name, + unsigned long fd, + int subvol, + u64 *transid) { struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; - struct btrfs_ioctl_vol_args *vol_args; struct file *src_file; int namelen; int ret = 0; @@ -811,23 +829,18 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, if (root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; - vol_args = memdup_user(arg, sizeof(*vol_args)); - if (IS_ERR(vol_args)) - return PTR_ERR(vol_args); - - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - namelen = strlen(vol_args->name); - if (strchr(vol_args->name, '/')) { + namelen = strlen(name); + if (strchr(name, '/')) { ret = -EINVAL; goto out; } if (subvol) { - ret = btrfs_mksubvol(&file->f_path, vol_args->name, namelen, - NULL); + ret = btrfs_mksubvol(&file->f_path, name, namelen, + NULL, transid); } else { struct inode *src_inode; - src_file = fget(vol_args->fd); + src_file = fget(fd); if (!src_file) { ret = -EINVAL; goto out; @@ -841,12 +854,56 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, fput(src_file); goto out; } - ret = btrfs_mksubvol(&file->f_path, vol_args->name, namelen, - BTRFS_I(src_inode)->root); + ret = btrfs_mksubvol(&file->f_path, name, namelen, + BTRFS_I(src_inode)->root, + transid); fput(src_file); } out: + return ret; +} + +static noinline int btrfs_ioctl_snap_create(struct file *file, + void __user *arg, int subvol, + int async) +{ + struct btrfs_ioctl_vol_args *vol_args = NULL; + struct btrfs_ioctl_async_vol_args *async_vol_args = NULL; + char *name; + u64 fd; + u64 transid = 0; + int ret; + + if (async) { + async_vol_args = memdup_user(arg, sizeof(*async_vol_args)); + if (IS_ERR(async_vol_args)) + return PTR_ERR(async_vol_args); + + name = async_vol_args->name; + fd = async_vol_args->fd; + async_vol_args->name[BTRFS_SNAPSHOT_NAME_MAX] = '\0'; + } else { + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); + name = vol_args->name; + fd = vol_args->fd; + vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; + } + + ret = btrfs_ioctl_snap_create_transid(file, name, fd, + subvol, &transid); + + if (!ret && async) { + if (copy_to_user(arg + + offsetof(struct btrfs_ioctl_async_vol_args, + transid), &transid, sizeof(transid))) + return -EFAULT; + } + kfree(vol_args); + kfree(async_vol_args); + return ret; } @@ -2072,9 +2129,11 @@ long btrfs_ioctl(struct file *file, unsigned int case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); case BTRFS_IOC_SNAP_CREATE: - return btrfs_ioctl_snap_create(file, argp, 0); + return btrfs_ioctl_snap_create(file, argp, 0, 0); + case BTRFS_IOC_SNAP_CREATE_ASYNC: + return btrfs_ioctl_snap_create(file, argp, 0, 1); case BTRFS_IOC_SUBVOL_CREATE: - return btrfs_ioctl_snap_create(file, argp, 1); + return btrfs_ioctl_snap_create(file, argp, 1, 0); case BTRFS_IOC_SNAP_DESTROY: return btrfs_ioctl_snap_destroy(file, argp); case BTRFS_IOC_DEFAULT_SUBVOL: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 16e1442523b7..17c99ebdf960 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -22,14 +22,21 @@ #define BTRFS_IOCTL_MAGIC 0x94 #define BTRFS_VOL_NAME_MAX 255 -#define BTRFS_PATH_NAME_MAX 4087 /* this should be 4k */ +#define BTRFS_PATH_NAME_MAX 4087 struct btrfs_ioctl_vol_args { __s64 fd; char name[BTRFS_PATH_NAME_MAX + 1]; }; +#define BTRFS_SNAPSHOT_NAME_MAX 4079 +struct btrfs_ioctl_async_vol_args { + __s64 fd; + __u64 transid; + char name[BTRFS_SNAPSHOT_NAME_MAX + 1]; +}; + #define BTRFS_INO_LOOKUP_PATH_MAX 4080 struct btrfs_ioctl_ino_lookup_args { __u64 treeid; @@ -180,4 +187,6 @@ struct btrfs_ioctl_space_args { struct btrfs_ioctl_space_args) #define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) #define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) +#define BTRFS_IOC_SNAP_CREATE_ASYNC _IOW(BTRFS_IOCTL_MAGIC, 23, \ + struct btrfs_ioctl_async_vol_args) #endif -- cgit v1.2.2 From 531cb13f1e417c060b54f979e1659ecd69bea650 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:41:32 -0400 Subject: Btrfs: make SNAP_DESTROY async There is no reason to force an immediate commit when deleting a snapshot. Users have some expectation that space from a deleted snapshot be freed immediately, but even if we do commit the reclaim is a background process. If users _do_ want the deletion to be durable, they can call 'sync'. Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e8a26a3aac3e..fdd88f2f1ece 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1351,7 +1351,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, BUG_ON(ret); } - ret = btrfs_commit_transaction(trans, root); + ret = btrfs_end_transaction(trans, root); BUG_ON(ret); inode->i_flags |= S_DEAD; out_up_write: -- cgit v1.2.2 From 4260f7c7516f4c209cf0ca34fda99cc9a0847772 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 29 Oct 2010 15:46:43 -0400 Subject: Btrfs: allow subvol deletion by unprivileged user with -o user_subvol_rm_allowed Add a mount option user_subvol_rm_allowed that allows users to delete a (potentially non-empty!) subvol when they would otherwise we allowed to do an rmdir(2). We duplicate the may_delete() checks from the core VFS code to implement identical security checks (minus the directory size check). We additionally require that the user has write+exec permission on the subvol root inode. Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/ioctl.c | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- fs/btrfs/super.c | 5 +++ 3 files changed, 116 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e5d66b13c175..8db9234f6b41 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1234,6 +1234,7 @@ struct btrfs_root { #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) +#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index fdd88f2f1ece..463d91b4dd3a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -409,6 +409,76 @@ fail: return ret; } +/* copy of check_sticky in fs/namei.c() +* It's inline, so penalty for filesystems that don't use sticky bit is +* minimal. +*/ +static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode) +{ + uid_t fsuid = current_fsuid(); + + if (!(dir->i_mode & S_ISVTX)) + return 0; + if (inode->i_uid == fsuid) + return 0; + if (dir->i_uid == fsuid) + return 0; + return !capable(CAP_FOWNER); +} + +/* copy of may_delete in fs/namei.c() + * Check whether we can remove a link victim from directory dir, check + * whether the type of victim is right. + * 1. We can't do it if dir is read-only (done in permission()) + * 2. We should have write and exec permissions on dir + * 3. We can't remove anything from append-only dir + * 4. We can't do anything with immutable dir (done in permission()) + * 5. If the sticky bit on dir is set we should either + * a. be owner of dir, or + * b. be owner of victim, or + * c. have CAP_FOWNER capability + * 6. If the victim is append-only or immutable we can't do antyhing with + * links pointing to it. + * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. + * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. + * 9. We can't remove a root or mountpoint. + * 10. We don't allow removal of NFS sillyrenamed files; it's handled by + * nfs_async_unlink(). + */ + +static int btrfs_may_delete(struct inode *dir,struct dentry *victim,int isdir) +{ + int error; + + if (!victim->d_inode) + return -ENOENT; + + BUG_ON(victim->d_parent->d_inode != dir); + audit_inode_child(victim, dir); + + error = inode_permission(dir, MAY_WRITE | MAY_EXEC); + if (error) + return error; + if (IS_APPEND(dir)) + return -EPERM; + if (btrfs_check_sticky(dir, victim->d_inode)|| + IS_APPEND(victim->d_inode)|| + IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) + return -EPERM; + if (isdir) { + if (!S_ISDIR(victim->d_inode->i_mode)) + return -ENOTDIR; + if (IS_ROOT(victim)) + return -EBUSY; + } else if (S_ISDIR(victim->d_inode->i_mode)) + return -EISDIR; + if (IS_DEADDIR(dir)) + return -ENOENT; + if (victim->d_flags & DCACHE_NFSFS_RENAMED) + return -EBUSY; + return 0; +} + /* copy of may_create in fs/namei.c() */ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) { @@ -1274,9 +1344,6 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, int ret; int err = 0; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); @@ -1306,13 +1373,51 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, } inode = dentry->d_inode; + dest = BTRFS_I(inode)->root; + if (!capable(CAP_SYS_ADMIN)){ + /* + * Regular user. Only allow this with a special mount + * option, when the user has write+exec access to the + * subvol root, and when rmdir(2) would have been + * allowed. + * + * Note that this is _not_ check that the subvol is + * empty or doesn't contain data that we wouldn't + * otherwise be able to delete. + * + * Users who want to delete empty subvols should try + * rmdir(2). + */ + err = -EPERM; + if (!btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) + goto out_dput; + + /* + * Do not allow deletion if the parent dir is the same + * as the dir to be deleted. That means the ioctl + * must be called on the dentry referencing the root + * of the subvol, not a random directory contained + * within it. + */ + err = -EINVAL; + if (root == dest) + goto out_dput; + + err = inode_permission(inode, MAY_WRITE | MAY_EXEC); + if (err) + goto out_dput; + + /* check if subvolume may be deleted by a non-root user */ + err = btrfs_may_delete(dir, dentry, 1); + if (err) + goto out_dput; + } + if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { err = -EINVAL; goto out_dput; } - dest = BTRFS_I(inode)->root; - mutex_lock(&inode->i_mutex); err = d_invalidate(dentry); if (err) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0002e6d1a16f..718b10de2049 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -71,6 +71,7 @@ enum { Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_err, + Opt_user_subvol_rm_allowed, }; static match_table_t tokens = { @@ -96,6 +97,7 @@ static match_table_t tokens = { {Opt_discard, "discard"}, {Opt_space_cache, "space_cache"}, {Opt_clear_cache, "clear_cache"}, + {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, {Opt_err, NULL}, }; @@ -246,6 +248,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) printk(KERN_INFO "btrfs: force clearing of disk cache\n"); btrfs_set_opt(info->mount_opt, CLEAR_CACHE); break; + case Opt_user_subvol_rm_allowed: + btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); -- cgit v1.2.2 From 6418c96107a2b399848bb8cfc6e29f11ca74fb94 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sat, 30 Oct 2010 07:34:24 -0400 Subject: Btrfs: deal with errors from updating the tree log During unlink we remove any references to the inode from the tree log. It can return -ENOENT and other errors, and this changes the unlink code to deal with it. Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 609f3bbbd1ed..5132c9af888a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2676,7 +2676,8 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir, index); - BUG_ON(ret); + if (ret == -ENOENT) + ret = 0; err: btrfs_free_path(path); if (ret) -- cgit v1.2.2 From 37004c42f7240035bc2726c340c4efa726b4818e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:17 +0100 Subject: btrfs: close_bdev_exclusive() should use the same @flags as the matching open_bdev_exclusive() In the failure path of __btrfs_open_devices(), close_bdev_exclusive() is called with @flags which doesn't match the one used during open_bdev_exclusive(). Fix it. Signed-off-by: Tejun Heo Cc: Chris Mason --- fs/btrfs/volumes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index cc04dc1445d6..d39596224d21 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -638,7 +638,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, error_brelse: brelse(bh); error_close: - close_bdev_exclusive(bdev, FMODE_READ); + close_bdev_exclusive(bdev, flags); error: continue; } -- cgit v1.2.2 From e525fd89d380c4a94c0d63913a1dd1a593ed25e7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:17 +0100 Subject: block: make blkdev_get/put() handle exclusive access Over time, block layer has accumulated a set of APIs dealing with bdev open, close, claim and release. * blkdev_get/put() are the primary open and close functions. * bd_claim/release() deal with exclusive open. * open/close_bdev_exclusive() are combination of open and claim and the other way around, respectively. * bd_link/unlink_disk_holder() to create and remove holder/slave symlinks. * open_by_devnum() wraps bdget() + blkdev_get(). The interface is a bit confusing and the decoupling of open and claim makes it impossible to properly guarantee exclusive access as in-kernel open + claim sequence can disturb the existing exclusive open even before the block layer knows the current open if for another exclusive access. Reorganize the interface such that, * blkdev_get() is extended to include exclusive access management. @holder argument is added and, if is @FMODE_EXCL specified, it will gain exclusive access atomically w.r.t. other exclusive accesses. * blkdev_put() is similarly extended. It now takes @mode argument and if @FMODE_EXCL is set, it releases an exclusive access. Also, when the last exclusive claim is released, the holder/slave symlinks are removed automatically. * bd_claim/release() and close_bdev_exclusive() are no longer necessary and either made static or removed. * bd_link_disk_holder() remains the same but bd_unlink_disk_holder() is no longer necessary and removed. * open_bdev_exclusive() becomes a simple wrapper around lookup_bdev() and blkdev_get(). It also has an unexpected extra bdev_read_only() test which probably should be moved into blkdev_get(). * open_by_devnum() is modified to take @holder argument and pass it to blkdev_get(). Most of bdev open/close operations are unified into blkdev_get/put() and most exclusive accesses are tested atomically at the open time (as it should). This cleans up code and removes some, both valid and invalid, but unnecessary all the same, corner cases. open_bdev_exclusive() and open_by_devnum() can use further cleanup - rename to blkdev_get_by_path() and blkdev_get_by_devt() and drop special features. Well, let's leave them for another day. Most conversions are straight-forward. drbd conversion is a bit more involved as there was some reordering, but the logic should stay the same. Signed-off-by: Tejun Heo Acked-by: Neil Brown Acked-by: Ryusuke Konishi Acked-by: Mike Snitzer Acked-by: Philipp Reisner Cc: Peter Osterlund Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Jan Kara Cc: Andrew Morton Cc: Andreas Dilger Cc: "Theodore Ts'o" Cc: Mark Fasheh Cc: Joel Becker Cc: Alex Elder Cc: Christoph Hellwig Cc: dm-devel@redhat.com Cc: drbd-dev@lists.linbit.com Cc: Leo Chen Cc: Scott Branden Cc: Chris Mason Cc: Steven Whitehouse Cc: Dave Kleikamp Cc: Joern Engel Cc: reiserfs-devel@vger.kernel.org Cc: Alexander Viro --- fs/btrfs/volumes.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d39596224d21..f1b729d3b883 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -489,7 +489,7 @@ again: continue; if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode | FMODE_EXCL); device->bdev = NULL; fs_devices->open_devices--; } @@ -523,7 +523,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode | FMODE_EXCL); fs_devices->open_devices--; } if (device->writeable) { @@ -638,7 +638,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, error_brelse: brelse(bh); error_close: - close_bdev_exclusive(bdev, flags); + blkdev_put(bdev, flags | FMODE_EXCL); error: continue; } @@ -716,7 +716,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, brelse(bh); error_close: - close_bdev_exclusive(bdev, flags); + blkdev_put(bdev, flags | FMODE_EXCL); error: mutex_unlock(&uuid_mutex); return ret; @@ -1244,7 +1244,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) root->fs_info->fs_devices->latest_bdev = next_device->bdev; if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode | FMODE_EXCL); device->bdev = NULL; device->fs_devices->open_devices--; } @@ -1287,7 +1287,7 @@ error_brelse: brelse(bh); error_close: if (bdev) - close_bdev_exclusive(bdev, FMODE_READ); + blkdev_put(bdev, FMODE_READ | FMODE_EXCL); out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); @@ -1565,7 +1565,7 @@ out: mutex_unlock(&root->fs_info->volume_mutex); return ret; error: - close_bdev_exclusive(bdev, 0); + blkdev_put(bdev, FMODE_EXCL); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); -- cgit v1.2.2 From d4d77629953eabd3c14f6fa5746f6b28babfc55f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:18 +0100 Subject: block: clean up blkdev_get() wrappers and their users After recent blkdev_get() modifications, open_by_devnum() and open_bdev_exclusive() are simple wrappers around blkdev_get(). Replace them with blkdev_get_by_dev() and blkdev_get_by_path(). blkdev_get_by_dev() is identical to open_by_devnum(). blkdev_get_by_path() is slightly different in that it doesn't automatically add %FMODE_EXCL to @mode. All users are converted. Most conversions are mechanical and don't introduce any behavior difference. There are several exceptions. * btrfs now sets FMODE_EXCL in btrfs_device->mode, so there's no reason to OR it explicitly on blkdev_put(). * gfs2, nilfs2 and the generic mount_bdev() now set FMODE_EXCL in sb->s_mode. * With the above changes, sb->s_mode now always should contain FMODE_EXCL. WARN_ON_ONCE() added to kill_block_super() to detect errors. The new blkdev_get_*() functions are with proper docbook comments. While at it, add function description to blkdev_get() too. Signed-off-by: Tejun Heo Cc: Philipp Reisner Cc: Neil Brown Cc: Mike Snitzer Cc: Joern Engel Cc: Chris Mason Cc: Jan Kara Cc: "Theodore Ts'o" Cc: KONISHI Ryusuke Cc: reiserfs-devel@vger.kernel.org Cc: xfs-masters@oss.sgi.com Cc: Alexander Viro --- fs/btrfs/volumes.c | 24 ++++++++++++++---------- fs/btrfs/volumes.h | 2 +- 2 files changed, 15 insertions(+), 11 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f1b729d3b883..95324e9f9280 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -489,7 +489,7 @@ again: continue; if (device->bdev) { - blkdev_put(device->bdev, device->mode | FMODE_EXCL); + blkdev_put(device->bdev, device->mode); device->bdev = NULL; fs_devices->open_devices--; } @@ -523,7 +523,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { - blkdev_put(device->bdev, device->mode | FMODE_EXCL); + blkdev_put(device->bdev, device->mode); fs_devices->open_devices--; } if (device->writeable) { @@ -580,13 +580,15 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int seeding = 1; int ret = 0; + flags |= FMODE_EXCL; + list_for_each_entry(device, head, dev_list) { if (device->bdev) continue; if (!device->name) continue; - bdev = open_bdev_exclusive(device->name, flags, holder); + bdev = blkdev_get_by_path(device->name, flags, holder); if (IS_ERR(bdev)) { printk(KERN_INFO "open %s failed\n", device->name); goto error; @@ -638,7 +640,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, error_brelse: brelse(bh); error_close: - blkdev_put(bdev, flags | FMODE_EXCL); + blkdev_put(bdev, flags); error: continue; } @@ -684,7 +686,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, mutex_lock(&uuid_mutex); - bdev = open_bdev_exclusive(path, flags, holder); + flags |= FMODE_EXCL; + bdev = blkdev_get_by_path(path, flags, holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); @@ -716,7 +719,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, brelse(bh); error_close: - blkdev_put(bdev, flags | FMODE_EXCL); + blkdev_put(bdev, flags); error: mutex_unlock(&uuid_mutex); return ret; @@ -1179,8 +1182,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) goto out; } } else { - bdev = open_bdev_exclusive(device_path, FMODE_READ, - root->fs_info->bdev_holder); + bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL, + root->fs_info->bdev_holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; @@ -1244,7 +1247,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) root->fs_info->fs_devices->latest_bdev = next_device->bdev; if (device->bdev) { - blkdev_put(device->bdev, device->mode | FMODE_EXCL); + blkdev_put(device->bdev, device->mode); device->bdev = NULL; device->fs_devices->open_devices--; } @@ -1439,7 +1442,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) return -EINVAL; - bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); + bdev = blkdev_get_by_path(device_path, FMODE_EXCL, + root->fs_info->bdev_holder); if (IS_ERR(bdev)) return PTR_ERR(bdev); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 2b638b6e4eea..856e75770304 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -49,7 +49,7 @@ struct btrfs_device { struct block_device *bdev; - /* the mode sent to open_bdev_exclusive */ + /* the mode sent to blkdev_get */ fmode_t mode; char *name; -- cgit v1.2.2 From 784b4e29a26617589edd290dd2919735e190c06e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sun, 21 Nov 2010 22:20:49 -0500 Subject: Btrfs: add migrate page for metadata inode Migrate page will directly call the btrfs btree writepage function, which isn't actually allowed. Our writepage assumes that you have locked the extent_buffer and flagged the block as written. Without doing these steps, we can corrupt metadata blocks. A later commit will remove the btree writepage function since it is really only safely used internally by btrfs. We use writepages for everything else. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b40dfe48017b..a67b98d58c2a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -355,6 +356,8 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, btrfs_header_generation(eb)); BUG_ON(ret); + WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN)); + found_start = btrfs_header_bytenr(eb); if (found_start != start) { WARN_ON(1); @@ -693,6 +696,26 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, __btree_submit_bio_done); } +static int btree_migratepage(struct address_space *mapping, + struct page *newpage, struct page *page) +{ + /* + * we can't safely write a btree page from here, + * we haven't done the locking hook + */ + if (PageDirty(page)) + return -EAGAIN; + /* + * Buffers may be managed in a filesystem specific way. + * We must have no buffers or drop them. + */ + if (page_has_private(page) && + !try_to_release_page(page, GFP_KERNEL)) + return -EAGAIN; + + return migrate_page(mapping, newpage, page); +} + static int btree_writepage(struct page *page, struct writeback_control *wbc) { struct extent_io_tree *tree; @@ -707,8 +730,7 @@ static int btree_writepage(struct page *page, struct writeback_control *wbc) } redirty_page_for_writepage(wbc, page); - eb = btrfs_find_tree_block(root, page_offset(page), - PAGE_CACHE_SIZE); + eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE); WARN_ON(!eb); was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); @@ -799,6 +821,7 @@ static const struct address_space_operations btree_aops = { .releasepage = btree_releasepage, .invalidatepage = btree_invalidatepage, .sync_page = block_sync_page, + .migratepage = btree_migratepage, }; int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, -- cgit v1.2.2 From 0c56fa9662927354255f2f64617d1de61fc03db9 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Mon, 22 Nov 2010 03:01:39 +0000 Subject: btrfs: fix free dip and dip->csums twice bio_endio() will free dip and dip->csums, so dip and dip->csums twice will be freed twice. Fix it. Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5132c9af888a..8c027aa0020a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5731,7 +5731,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); if (ret) - goto out_err; + goto free_ordered; if (write && !skip_sum) { ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, @@ -5740,7 +5740,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, __btrfs_submit_bio_start_direct_io, __btrfs_submit_bio_done); if (ret) - goto out_err; + goto free_ordered; return; } else if (!skip_sum) btrfs_lookup_bio_sums_dio(root, inode, bio, @@ -5748,11 +5748,8 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, ret = btrfs_map_bio(root, rw, bio, 0, 1); if (ret) - goto out_err; + goto free_ordered; return; -out_err: - kfree(dip->csums); - kfree(dip); free_ordered: /* * If this is a write, we need to clean up the reserved space and kill -- cgit v1.2.2 From 88f794ede7fadd4b63135b94d1561c1f2d5eb5f5 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Mon, 22 Nov 2010 03:02:55 +0000 Subject: btrfs: cleanup duplicate bio allocating functions extent_bio_alloc() and compressed_bio_alloc() are similar, cleanup similar source code. Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 15 +-------------- fs/btrfs/extent_io.c | 8 ++++---- fs/btrfs/extent_io.h | 3 +++ 3 files changed, 8 insertions(+), 18 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 7845d1f7d1d9..b50bc4bd5c56 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -91,23 +91,10 @@ static inline int compressed_bio_size(struct btrfs_root *root, static struct bio *compressed_bio_alloc(struct block_device *bdev, u64 first_byte, gfp_t gfp_flags) { - struct bio *bio; int nr_vecs; nr_vecs = bio_get_nr_vecs(bdev); - bio = bio_alloc(gfp_flags, nr_vecs); - - if (bio == NULL && (current->flags & PF_MEMALLOC)) { - while (!bio && (nr_vecs /= 2)) - bio = bio_alloc(gfp_flags, nr_vecs); - } - - if (bio) { - bio->bi_size = 0; - bio->bi_bdev = bdev; - bio->bi_sector = first_byte >> 9; - } - return bio; + return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags); } static int check_compressed_csum(struct inode *inode, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3b7eaee0f912..f60aa3c35c23 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1828,9 +1828,9 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) bio_put(bio); } -static struct bio * -extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, - gfp_t gfp_flags) +struct bio * +btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, + gfp_t gfp_flags) { struct bio *bio; @@ -1919,7 +1919,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, else nr = bio_get_nr_vecs(bdev); - bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); + bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 1c6d4f342ef7..4183c8178f01 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -310,4 +310,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, struct extent_io_tree *tree, u64 start, u64 end, struct page *locked_page, unsigned long op); +struct bio * +btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, + gfp_t gfp_flags); #endif -- cgit v1.2.2 From e65e1535542931e51189832264cd282e5899e4b9 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Mon, 22 Nov 2010 03:04:43 +0000 Subject: btrfs: fix panic caused by direct IO btrfs paniced when we write >64KB data by direct IO at one time. Reproduce steps: # mkfs.btrfs /dev/sda5 /dev/sda6 # mount /dev/sda5 /mnt # dd if=/dev/zero of=/mnt/tmpfile bs=100K count=1 oflag=direct Then btrfs paniced: mapping failed logical 1103155200 bio len 69632 len 12288 ------------[ cut here ]------------ kernel BUG at fs/btrfs/volumes.c:3010! [SNIP] Pid: 1992, comm: btrfs-worker-0 Not tainted 2.6.37-rc1 #1 D2399/PRIMERGY RIP: 0010:[] [] btrfs_map_bio+0x202/0x210 [btrfs] [SNIP] Call Trace: [] __btrfs_submit_bio_done+0x1b/0x20 [btrfs] [] run_one_async_done+0x9f/0xb0 [btrfs] [] run_ordered_completions+0x80/0xc0 [btrfs] [] worker_loop+0x154/0x5f0 [btrfs] [] ? worker_loop+0x0/0x5f0 [btrfs] [] ? worker_loop+0x0/0x5f0 [btrfs] [] kthread+0x96/0xa0 [] kernel_thread_helper+0x4/0x10 [] ? kthread+0x0/0xa0 [] ? kernel_thread_helper+0x0/0x10 We fix this problem by splitting bios when we submit bios. Reported-by: Tsutomu Itoh Signed-off-by: Miao Xie Tested-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 205 +++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 184 insertions(+), 21 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8c027aa0020a..a47e4faa8c46 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5535,13 +5535,21 @@ struct btrfs_dio_private { u64 bytes; u32 *csums; void *private; + + /* number of bios pending for this dio */ + atomic_t pending_bios; + + /* IO errors */ + int errors; + + struct bio *orig_bio; }; static void btrfs_endio_direct_read(struct bio *bio, int err) { + struct btrfs_dio_private *dip = bio->bi_private; struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec; - struct btrfs_dio_private *dip = bio->bi_private; struct inode *inode = dip->inode; struct btrfs_root *root = BTRFS_I(inode)->root; u64 start; @@ -5684,6 +5692,176 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, return 0; } +static void btrfs_end_dio_bio(struct bio *bio, int err) +{ + struct btrfs_dio_private *dip = bio->bi_private; + + if (err) { + printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " + "disk_bytenr %lu len %u err no %d\n", + dip->inode->i_ino, bio->bi_rw, bio->bi_sector, + bio->bi_size, err); + dip->errors = 1; + + /* + * before atomic variable goto zero, we must make sure + * dip->errors is perceived to be set. + */ + smp_mb__before_atomic_dec(); + } + + /* if there are more bios still pending for this dio, just exit */ + if (!atomic_dec_and_test(&dip->pending_bios)) + goto out; + + if (dip->errors) + bio_io_error(dip->orig_bio); + else { + set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); + bio_endio(dip->orig_bio, 0); + } +out: + bio_put(bio); +} + +static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, + u64 first_sector, gfp_t gfp_flags) +{ + int nr_vecs = bio_get_nr_vecs(bdev); + return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); +} + +static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, + int rw, u64 file_offset, int skip_sum, + u32 *csums) +{ + int write = rw & REQ_WRITE; + struct btrfs_root *root = BTRFS_I(inode)->root; + int ret; + + bio_get(bio); + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + if (ret) + goto err; + + if (write && !skip_sum) { + ret = btrfs_wq_submit_bio(root->fs_info, + inode, rw, bio, 0, 0, + file_offset, + __btrfs_submit_bio_start_direct_io, + __btrfs_submit_bio_done); + goto err; + } else if (!skip_sum) + btrfs_lookup_bio_sums_dio(root, inode, bio, + file_offset, csums); + + ret = btrfs_map_bio(root, rw, bio, 0, 1); +err: + bio_put(bio); + return ret; +} + +static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, + int skip_sum) +{ + struct inode *inode = dip->inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; + struct bio *bio; + struct bio *orig_bio = dip->orig_bio; + struct bio_vec *bvec = orig_bio->bi_io_vec; + u64 start_sector = orig_bio->bi_sector; + u64 file_offset = dip->logical_offset; + u64 submit_len = 0; + u64 map_length; + int nr_pages = 0; + u32 *csums = dip->csums; + int ret = 0; + + bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); + if (!bio) + return -ENOMEM; + bio->bi_private = dip; + bio->bi_end_io = btrfs_end_dio_bio; + atomic_inc(&dip->pending_bios); + + map_length = orig_bio->bi_size; + ret = btrfs_map_block(map_tree, READ, start_sector << 9, + &map_length, NULL, 0); + if (ret) { + bio_put(bio); + return -EIO; + } + + while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { + if (unlikely(map_length < submit_len + bvec->bv_len || + bio_add_page(bio, bvec->bv_page, bvec->bv_len, + bvec->bv_offset) < bvec->bv_len)) { + /* + * inc the count before we submit the bio so + * we know the end IO handler won't happen before + * we inc the count. Otherwise, the dip might get freed + * before we're done setting it up + */ + atomic_inc(&dip->pending_bios); + ret = __btrfs_submit_dio_bio(bio, inode, rw, + file_offset, skip_sum, + csums); + if (ret) { + bio_put(bio); + atomic_dec(&dip->pending_bios); + goto out_err; + } + + if (!skip_sum) + csums = csums + nr_pages; + start_sector += submit_len >> 9; + file_offset += submit_len; + + submit_len = 0; + nr_pages = 0; + + bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, + start_sector, GFP_NOFS); + if (!bio) + goto out_err; + bio->bi_private = dip; + bio->bi_end_io = btrfs_end_dio_bio; + + map_length = orig_bio->bi_size; + ret = btrfs_map_block(map_tree, READ, start_sector << 9, + &map_length, NULL, 0); + if (ret) { + bio_put(bio); + goto out_err; + } + } else { + submit_len += bvec->bv_len; + nr_pages ++; + bvec++; + } + } + + ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, + csums); + if (!ret) + return 0; + + bio_put(bio); +out_err: + dip->errors = 1; + /* + * before atomic variable goto zero, we must + * make sure dip->errors is perceived to be set. + */ + smp_mb__before_atomic_dec(); + if (atomic_dec_and_test(&dip->pending_bios)) + bio_io_error(dip->orig_bio); + + /* bio_end_io() will handle error, so we needn't return it */ + return 0; +} + static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, loff_t file_offset) { @@ -5723,33 +5901,18 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, dip->disk_bytenr = (u64)bio->bi_sector << 9; bio->bi_private = dip; + dip->errors = 0; + dip->orig_bio = bio; + atomic_set(&dip->pending_bios, 0); if (write) bio->bi_end_io = btrfs_endio_direct_write; else bio->bi_end_io = btrfs_endio_direct_read; - ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); - if (ret) - goto free_ordered; - - if (write && !skip_sum) { - ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, - inode, rw, bio, 0, 0, - dip->logical_offset, - __btrfs_submit_bio_start_direct_io, - __btrfs_submit_bio_done); - if (ret) - goto free_ordered; + ret = btrfs_submit_direct_hook(rw, dip, skip_sum); + if (!ret) return; - } else if (!skip_sum) - btrfs_lookup_bio_sums_dio(root, inode, bio, - dip->logical_offset, dip->csums); - - ret = btrfs_map_bio(root, rw, bio, 0, 1); - if (ret) - goto free_ordered; - return; free_ordered: /* * If this is a write, we need to clean up the reserved space and kill -- cgit v1.2.2 From 6f33434850ed87dc5e56b60ebbad3d3cf405f296 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Fri, 12 Nov 2010 23:17:56 +0000 Subject: btrfs: Fix early enospc because 'unused' calculated with wrong sign. 'unused' calculated with wrong sign in reserve_metadata_bytes(). This might have lead to unwanted over-reservations. Signed-off-by: Arne Jansen Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a541bc87f04c..ddaf6340fe7f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3413,7 +3413,7 @@ again: * our reservation. */ if (unused <= space_info->total_bytes) { - unused -= space_info->total_bytes; + unused = space_info->total_bytes - unused; if (unused >= num_bytes) { if (!reserved) space_info->bytes_reserved += orig_bytes; -- cgit v1.2.2 From 0de90876c6cb774d4a424dafc1fc9ec50071b81b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Nov 2010 13:40:41 +0000 Subject: Btrfs: handle the space_cache option properly When I added the clear_cache option I screwed up and took the break out of the space_cache case statement, so whenever you mount with space_cache you also get clear_cache, which does you no good if you say set space_cache in fstab so it always gets set. This patch adds the break back in properly. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 718b10de2049..66e4612a7916 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -244,6 +244,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_space_cache: printk(KERN_INFO "btrfs: enabling disk space caching\n"); btrfs_set_opt(info->mount_opt, SPACE_CACHE); + break; case Opt_clear_cache: printk(KERN_INFO "btrfs: force clearing of disk cache\n"); btrfs_set_opt(info->mount_opt, CLEAR_CACHE); -- cgit v1.2.2 From 2a6b8daedaf3682bed3fc1d4e2390491f6e19c49 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 19 Nov 2010 01:36:10 +0000 Subject: btrfs: Check if dest_offset is block-size aligned before cloning file We've done the check for src_offset and src_length, and We should also check dest_offset, otherwise we'll corrupt the destination file: (After cloning file1 to file2 with unaligned dest_offset) # cat /mnt/file2 cat: /mnt/file2: Input/output error Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 463d91b4dd3a..81b47bd8a55a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1669,12 +1669,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, olen = len = src->i_size - off; /* if we extend to eof, continue to block boundary */ if (off + len == src->i_size) - len = ((src->i_size + bs-1) & ~(bs-1)) - - off; + len = ALIGN(src->i_size, bs) - off; /* verify the end result is block aligned */ - if ((off & (bs-1)) || - ((off + len) & (bs-1))) + if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) || + !IS_ALIGNED(destoff, bs)) goto out_unlock; /* do any pending delalloc/csum calc on src, one way or -- cgit v1.2.2 From 5f3888ff6f0b9dce60705765752b788a92557644 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 19 Nov 2010 01:36:34 +0000 Subject: btrfs: Set file size correctly in file clone Set src_offset = 0, src_length = 20K, dest_offset = 20K. And the original filesize of the dest file 'file2' is 30K: # ls -l /mnt/file2 -rw-r--r-- 1 root root 30720 Nov 18 16:42 /mnt/file2 Now clone file1 to file2, the dest file should be 40K, but it still shows 30K: # ls -l /mnt/file2 -rw-r--r-- 1 root root 30720 Nov 18 16:42 /mnt/file2 Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 81b47bd8a55a..6b4bfa72bf8d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1873,8 +1873,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, * but shouldn't round up the file size */ endoff = new_key.offset + datal; - if (endoff > off+olen) - endoff = off+olen; + if (endoff > destoff+olen) + endoff = destoff+olen; if (endoff > inode->i_size) btrfs_i_size_write(inode, endoff); -- cgit v1.2.2 From f209561ad83c5ffd561dc4bc3a3c90b704fe9231 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 19 Nov 2010 02:05:24 +0000 Subject: btrfs: Show device attr correctly for symlinks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Symlinks and files of other types show different device numbers, though they are on the same partition: $ touch tmp; ln -s tmp tmp2; stat tmp tmp2 File: `tmp' Size: 0 Blocks: 0 IO Block: 4096 regular empty file Device: 15h/21d Inode: 984027 Links: 1 --- snip --- File: `tmp2' -> `tmp' Size: 3 Blocks: 0 IO Block: 4096 symbolic link Device: 13h/19d Inode: 984028 Links: 1 Reported-by: Toke Høiland-Jørgensen Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a47e4faa8c46..eed357ff6c99 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7299,6 +7299,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, + .getattr = btrfs_getattr, .permission = btrfs_permission, .setxattr = btrfs_setxattr, .getxattr = btrfs_getxattr, -- cgit v1.2.2 From 0410c94aff109c02b6774a0ed00114987cda7ce5 Mon Sep 17 00:00:00 2001 From: Mariusz Kozlowski Date: Sat, 20 Nov 2010 12:03:07 +0000 Subject: btrfs: make 1-bit signed fileds unsigned Fixes these sparse warnings: fs/btrfs/ctree.h:811:17: error: dubious one-bit signed bitfield fs/btrfs/ctree.h:812:20: error: dubious one-bit signed bitfield fs/btrfs/ctree.h:813:19: error: dubious one-bit signed bitfield Signed-off-by: Mariusz Kozlowski Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8db9234f6b41..af52f6d7a4d8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -808,9 +808,9 @@ struct btrfs_block_group_cache { int extents_thresh; int free_extents; int total_bitmaps; - int ro:1; - int dirty:1; - int iref:1; + unsigned int ro:1; + unsigned int dirty:1; + unsigned int iref:1; int disk_cache_state; -- cgit v1.2.2 From 2ede0daf01549cecf4bb0962c46dc47382047523 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 17 Nov 2010 18:54:54 +0000 Subject: Btrfs: handle NFS lookups properly People kept reporting NFS issues, specifically getting ESTALE alot. I figured out how to reproduce the problem SERVER mkfs.btrfs /dev/sda1 mount /dev/sda1 /mnt/btrfs-test btrfs subvol create /mnt/btrfs-test/foo service nfs start CLIENT mount server:/mnt/btrfs /mnt/test cd /mnt/test/foo ls SERVER echo 3 > /proc/sys/vm/drop_caches CLIENT ls <-- get an ESTALE here This is because the standard way to lookup a name in nfsd is to use readdir, and what it does is do a readdir on the parent directory looking for the inode of the child. So in this case the parent being / and the child being foo. Well subvols all have the same inode number, so doing a readdir of / looking for inode 256 will return '.', which obviously doesn't match foo. So instead we need to have our own .get_name so that we can find the right name. Our .get_name will either lookup the inode backref or the root backref, whichever we're looking for, and return the name we find. Running the above reproducer with this patch results in everything acting the way its supposed to. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/export.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 951ef09b82f4..6f0444473594 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -232,9 +232,85 @@ fail: return ERR_PTR(ret); } +static int btrfs_get_name(struct dentry *parent, char *name, + struct dentry *child) +{ + struct inode *inode = child->d_inode; + struct inode *dir = parent->d_inode; + struct btrfs_path *path; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_inode_ref *iref; + struct btrfs_root_ref *rref; + struct extent_buffer *leaf; + unsigned long name_ptr; + struct btrfs_key key; + int name_len; + int ret; + + if (!dir || !inode) + return -EINVAL; + + if (!S_ISDIR(dir->i_mode)) + return -EINVAL; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + key.objectid = BTRFS_I(inode)->root->root_key.objectid; + key.type = BTRFS_ROOT_BACKREF_KEY; + key.offset = (u64)-1; + root = root->fs_info->tree_root; + } else { + key.objectid = inode->i_ino; + key.offset = dir->i_ino; + key.type = BTRFS_INODE_REF_KEY; + } + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) { + btrfs_free_path(path); + return ret; + } else if (ret > 0) { + if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + path->slots[0]--; + } else { + btrfs_free_path(path); + return -ENOENT; + } + } + leaf = path->nodes[0]; + + if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + rref = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_root_ref); + name_ptr = (unsigned long)(rref + 1); + name_len = btrfs_root_ref_name_len(leaf, rref); + } else { + iref = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_inode_ref); + name_ptr = (unsigned long)(iref + 1); + name_len = btrfs_inode_ref_name_len(leaf, iref); + } + + read_extent_buffer(leaf, name, name_ptr, name_len); + btrfs_free_path(path); + + /* + * have to add the null termination to make sure that reconnect_path + * gets the right len for strlen + */ + name[name_len] = '\0'; + + return 0; +} + const struct export_operations btrfs_export_ops = { .encode_fh = btrfs_encode_fh, .fh_to_dentry = btrfs_fh_to_dentry, .fh_to_parent = btrfs_fh_to_parent, .get_parent = btrfs_get_parent, + .get_name = btrfs_get_name, }; -- cgit v1.2.2 From 76195853903ca613ba722203db9b747d70478fc7 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Nov 2010 02:18:02 +0000 Subject: Btrfs: fix more ESTALE problems with NFS When creating new inodes we don't setup inode->i_generation. So if we generate an fh with a newly created inode we save the generation of 0, but if we flush the inode to disk and have to read it back when getting the inode on the server we'll have the right i_generation, so gens wont match and we get ESTALE. This patch properly sets inode->i_generation when we create the new inode and now I'm no longer getting ESTALE. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index eed357ff6c99..fc22f556aa24 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4501,6 +4501,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, BTRFS_I(inode)->index_cnt = 2; BTRFS_I(inode)->root = root; BTRFS_I(inode)->generation = trans->transid; + inode->i_generation = BTRFS_I(inode)->generation; btrfs_set_inode_space_info(root, inode); if (mode & S_IFDIR) -- cgit v1.2.2 From 6a912213046ecb6511fdf35531a0c7de3de963c9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Sat, 20 Nov 2010 09:48:00 +0000 Subject: Btrfs: use dget_parent where we can UPDATED There are lots of places where we do dentry->d_parent->d_inode without holding the dentry->d_lock. This could cause problems with rename. So instead we need to use dget_parent() and hold the reference to the parent as long as we are going to use it's inode and then dput it at the end. Signed-off-by: Josef Bacik Cc: raven@themaw.net Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 9 ++++++--- fs/btrfs/ioctl.c | 20 ++++++++++++++++---- fs/btrfs/transaction.c | 5 ++++- fs/btrfs/tree-log.c | 21 +++++++++++++++++---- 4 files changed, 43 insertions(+), 12 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fc22f556aa24..c0faf47d0cd9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4811,10 +4811,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (err) { drop_inode = 1; } else { + struct dentry *parent = dget_parent(dentry); btrfs_update_inode_block_group(trans, dir); err = btrfs_update_inode(trans, root, inode); BUG_ON(err); - btrfs_log_new_name(trans, inode, NULL, dentry->d_parent); + btrfs_log_new_name(trans, inode, NULL, parent); + dput(parent); } nr = trans->blocks_used; @@ -6768,8 +6770,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, BUG_ON(ret); if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { - btrfs_log_new_name(trans, old_inode, old_dir, - new_dentry->d_parent); + struct dentry *parent = dget_parent(new_dentry); + btrfs_log_new_name(trans, old_inode, old_dir, parent); + dput(parent); btrfs_end_log_trans(root); } out_fail: diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 6b4bfa72bf8d..f1c9bb4079ed 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -233,7 +233,8 @@ static noinline int create_subvol(struct btrfs_root *root, struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; struct btrfs_root *new_root; - struct inode *dir = dentry->d_parent->d_inode; + struct dentry *parent = dget_parent(dentry); + struct inode *dir; int ret; int err; u64 objectid; @@ -242,8 +243,13 @@ static noinline int create_subvol(struct btrfs_root *root, ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, 0, &objectid); - if (ret) + if (ret) { + dput(parent); return ret; + } + + dir = parent->d_inode; + /* * 1 - inode item * 2 - refs @@ -251,8 +257,10 @@ static noinline int create_subvol(struct btrfs_root *root, * 2 - dir items */ trans = btrfs_start_transaction(root, 6); - if (IS_ERR(trans)) + if (IS_ERR(trans)) { + dput(parent); return PTR_ERR(trans); + } leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, objectid, NULL, 0, 0, 0); @@ -339,6 +347,7 @@ static noinline int create_subvol(struct btrfs_root *root, d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); fail: + dput(parent); if (async_transid) { *async_transid = trans->transid; err = btrfs_commit_transaction_async(trans, root, 1); @@ -354,6 +363,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, char *name, int namelen, u64 *async_transid) { struct inode *inode; + struct dentry *parent; struct btrfs_pending_snapshot *pending_snapshot; struct btrfs_trans_handle *trans; int ret; @@ -396,7 +406,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, btrfs_orphan_cleanup(pending_snapshot->snap); - inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); + parent = dget_parent(dentry); + inode = btrfs_lookup_dentry(parent->d_inode, dentry); + dput(parent); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto fail; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1fffbc017bdf..f50e931fc217 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -902,6 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root *root = pending->root; struct btrfs_root *parent_root; struct inode *parent_inode; + struct dentry *parent; struct dentry *dentry; struct extent_buffer *tmp; struct extent_buffer *old; @@ -941,7 +942,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, trans->block_rsv = &pending->block_rsv; dentry = pending->dentry; - parent_inode = dentry->d_parent->d_inode; + parent = dget_parent(dentry); + parent_inode = parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; record_root_in_trans(trans, parent_root); @@ -989,6 +991,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, parent_inode->i_ino, index, dentry->d_name.name, dentry->d_name.len); BUG_ON(ret); + dput(parent); key.offset = (u64)-1; pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a29f19384a27..054744ac5719 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2869,6 +2869,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, { int ret = 0; struct btrfs_root *root; + struct dentry *old_parent = NULL; /* * for regular files, if its inode is already on disk, we don't @@ -2910,10 +2911,13 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, if (IS_ROOT(parent)) break; - parent = parent->d_parent; + parent = dget_parent(parent); + dput(old_parent); + old_parent = parent; inode = parent->d_inode; } + dput(old_parent); out: return ret; } @@ -2945,6 +2949,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, { int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; struct super_block *sb; + struct dentry *old_parent = NULL; int ret = 0; u64 last_committed = root->fs_info->last_trans_committed; @@ -3016,10 +3021,13 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (IS_ROOT(parent)) break; - parent = parent->d_parent; + parent = dget_parent(parent); + dput(old_parent); + old_parent = parent; } ret = 0; end_trans: + dput(old_parent); if (ret < 0) { BUG_ON(ret != -ENOSPC); root->fs_info->last_trans_log_full_commit = trans->transid; @@ -3039,8 +3047,13 @@ end_no_trans: int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct dentry *dentry) { - return btrfs_log_inode_parent(trans, root, dentry->d_inode, - dentry->d_parent, 0); + struct dentry *parent = dget_parent(dentry); + int ret; + + ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0); + dput(parent); + + return ret; } /* -- cgit v1.2.2 From 495e86779f4f319828bc10dfc0c9ac2161868077 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Nov 2010 20:36:10 +0000 Subject: Btrfs: hold i_mutex when calling btrfs_log_dentry_safe Since we walk up the path logging all of the parts of the inode's path, we need to hold i_mutex to make sure that the inode is not renamed while we're logging everything. btrfs_log_dentry_safe does dget_parent and all of that jazz, but we may get unexpected results if the rename changes the inode's location while we're higher up the path logging those dentries, so do this for safety reasons. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/file.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e354c33df082..c1faded5fca0 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1047,8 +1047,14 @@ out: if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + num_written = PTR_ERR(trans); + goto done; + } + mutex_lock(&inode->i_mutex); ret = btrfs_log_dentry_safe(trans, root, file->f_dentry); + mutex_unlock(&inode->i_mutex); if (ret == 0) { ret = btrfs_sync_log(trans, root); if (ret == 0) @@ -1067,6 +1073,7 @@ out: (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); } } +done: current->backing_dev_info = NULL; return num_written ? num_written : err; } -- cgit v1.2.2 From a1b075d28da563c5e2325577f282c042494254ba Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Nov 2010 20:36:11 +0000 Subject: Btrfs: make btrfs_add_nondir take parent inode as an argument Everybody who calls btrfs_add_nondir just passes in the dentry of the new file and then dereference dentry->d_parent->d_inode, but everybody who calls btrfs_add_nondir() are already passed the parent's inode. So instead of dereferencing dentry->d_parent, just make btrfs_add_nondir take the dir inode as an argument and pass that along so we don't have to worry about d_parent. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c0faf47d0cd9..37cc1776a5d7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4623,12 +4623,12 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, } static int btrfs_add_nondir(struct btrfs_trans_handle *trans, - struct dentry *dentry, struct inode *inode, - int backref, u64 index) + struct inode *dir, struct dentry *dentry, + struct inode *inode, int backref, u64 index) { - int err = btrfs_add_link(trans, dentry->d_parent->d_inode, - inode, dentry->d_name.name, - dentry->d_name.len, backref, index); + int err = btrfs_add_link(trans, dir, inode, + dentry->d_name.name, dentry->d_name.len, + backref, index); if (!err) { d_instantiate(dentry, inode); return 0; @@ -4669,8 +4669,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, - dentry->d_parent->d_inode->i_ino, objectid, + dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); err = PTR_ERR(inode); if (IS_ERR(inode)) @@ -4683,7 +4682,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, } btrfs_set_trans_block_group(trans, inode); - err = btrfs_add_nondir(trans, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; else { @@ -4731,10 +4730,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, - dentry->d_parent->d_inode->i_ino, - objectid, BTRFS_I(dir)->block_group, mode, - &index); + dentry->d_name.len, dir->i_ino, objectid, + BTRFS_I(dir)->block_group, mode, &index); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_unlock; @@ -4746,7 +4743,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, } btrfs_set_trans_block_group(trans, inode); - err = btrfs_add_nondir(trans, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; else { @@ -4806,7 +4803,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, btrfs_set_trans_block_group(trans, dir); atomic_inc(&inode->i_count); - err = btrfs_add_nondir(trans, dentry, inode, 1, index); + err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); if (err) { drop_inode = 1; @@ -4856,8 +4853,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) btrfs_set_trans_block_group(trans, dir); inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, - dentry->d_parent->d_inode->i_ino, objectid, + dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFDIR | mode, &index); if (IS_ERR(inode)) { @@ -4880,9 +4876,8 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (err) goto out_fail; - err = btrfs_add_link(trans, dentry->d_parent->d_inode, - inode, dentry->d_name.name, - dentry->d_name.len, 0, index); + err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, + dentry->d_name.len, 0, index); if (err) goto out_fail; @@ -6922,8 +6917,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, - dentry->d_parent->d_inode->i_ino, objectid, + dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, &index); err = PTR_ERR(inode); @@ -6937,7 +6931,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, } btrfs_set_trans_block_group(trans, inode); - err = btrfs_add_nondir(trans, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; else { -- cgit v1.2.2 From 45f49bce99d008d6864a20324548f35936ba46fb Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sun, 21 Nov 2010 22:27:44 -0500 Subject: Btrfs: avoid NULL pointer deref in try_release_extent_buffer If we fail to find a pointer in the radix tree, don't try to deref the NULL one we do have. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f60aa3c35c23..143d3f541d64 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3837,8 +3837,10 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) spin_lock(&tree->buffer_lock); eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); - if (!eb) - goto out; + if (!eb) { + spin_unlock(&tree->buffer_lock); + return ret; + } if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { ret = 0; -- cgit v1.2.2 From 55a61d1d06a3dc443d0db8aaa613365dcb83b98a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 22 Nov 2010 18:50:32 +0000 Subject: Btrfs: fix typo in fallocate to make it honor actual size There is a typo in __btrfs_prealloc_file_range() where we set the i_size to actual_len/cur_offset, and then just set it to cur_offset again, and do the same with btrfs_ordered_update_i_size(). This fixes it back to keeping i_size in a local variable and then updating i_size properly. Tested this with xfs_io -F -f -c "falloc 0 1" -c "pwrite 0 1" foo stat'ing foo gives us a size of 1 instead of 4096 like it was. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 37cc1776a5d7..0058fb3c2561 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7002,6 +7002,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key ins; u64 cur_offset = start; + u64 i_size; int ret = 0; bool own_trans = true; @@ -7043,11 +7044,11 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, (actual_len > inode->i_size) && (cur_offset > inode->i_size)) { if (cur_offset > actual_len) - i_size_write(inode, actual_len); + i_size = actual_len; else - i_size_write(inode, cur_offset); - i_size_write(inode, cur_offset); - btrfs_ordered_update_i_size(inode, cur_offset, NULL); + i_size = cur_offset; + i_size_write(inode, i_size); + btrfs_ordered_update_i_size(inode, i_size, NULL); } ret = btrfs_update_inode(trans, root, inode); -- cgit v1.2.2 From 0ed42a63f3edb144b091d9528401fce95c3c4d8d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 22 Nov 2010 18:55:39 +0000 Subject: Btrfs: make sure new inode size is ok in fallocate We have been failing xfstest 228 forever, because we don't check to make sure the new inode size is acceptable as far as RLIMIT is concerned. Just check to make sure it's ok to create a inode with this new size and error out if not. With this patch we now pass 228. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0058fb3c2561..0eeacd93e8e5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7102,6 +7102,10 @@ static long btrfs_fallocate(struct inode *inode, int mode, btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); mutex_lock(&inode->i_mutex); + ret = inode_newsize_ok(inode, alloc_end); + if (ret) + goto out; + if (alloc_start > inode->i_size) { ret = btrfs_cont_expand(inode, alloc_start); if (ret) -- cgit v1.2.2 From bc1cbf1f86aa2501efa9ca637c736fce6bcc4b1d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 23 Nov 2010 19:50:59 +0000 Subject: Btrfs: update inode ctime when using links Currently we fail xfstest 236 because we're not updating the inode ctime on link. This is a simple fix, and makes it so we pass 236 now. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0eeacd93e8e5..6df921f218fb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4785,6 +4785,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, return -EPERM; btrfs_inc_nlink(inode); + inode->i_ctime = CURRENT_TIME; err = btrfs_set_inode_index(dir, &index); if (err) -- cgit v1.2.2 From 619c8c763928841b1112e1d417f88bc1d44daecb Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Mon, 22 Nov 2010 02:21:38 +0000 Subject: Btrfs - fix race between btrfs_get_sb() and umount When mounting a btrfs file system btrfs_test_super() may attempt to use sb->s_fs_info, the btrfs root, of a super block that is going away and that has had the btrfs root set to NULL in its ->put_super(). But if the super block is going away it cannot be an existing super block so we can return false in this case. Signed-off-by: Ian Kent Signed-off-by: Chris Mason --- fs/btrfs/super.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 66e4612a7916..141fb317d3bc 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -566,6 +566,12 @@ static int btrfs_test_super(struct super_block *s, void *data) struct btrfs_fs_devices *test_fs_devices = data; struct btrfs_root *root = btrfs_sb(s); + /* + * If this super block is going away, return false as it + * can't match as an existing super block. + */ + if (!atomic_read(&s->s_active)) + return 0; return root->fs_info->fs_devices == test_fs_devices; } -- cgit v1.2.2 From 975f84fee2e8a77ee5f41bfe7c5682bf29366b10 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 23 Nov 2010 19:36:57 +0000 Subject: Btrfs: fix fiemap There are two big problems currently with FIEMAP 1) We return extents for holes. This isn't supposed to happen, we just don't return extents for holes and then userspace interprets the lack of an extent as a hole. 2) We sometimes don't set FIEMAP_EXTENT_LAST properly. This is because we wait to see a EXTENT_FLAG_VACANCY flag on the em, but this won't happen if say we ask fiemap to map up to the last extent in a file, and there is nothing but holes up to the i_size. To fix this we need to lookup the last extent in this file and save the logical offset, so if we happen to try and map that extent we can be sure to set FIEMAP_EXTENT_LAST. With this patch we now pass xfstest 225, which we never have before. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 63 ++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 54 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 143d3f541d64..5e7a94d7da89 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2901,21 +2901,53 @@ out: int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent) { - int ret; + int ret = 0; u64 off = start; u64 max = start + len; u32 flags = 0; + u32 found_type; + u64 last; u64 disko = 0; + struct btrfs_key found_key; struct extent_map *em = NULL; struct extent_state *cached_state = NULL; + struct btrfs_path *path; + struct btrfs_file_extent_item *item; int end = 0; u64 em_start = 0, em_len = 0; unsigned long emflags; - ret = 0; + int hole = 0; if (len == 0) return -EINVAL; + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, + path, inode->i_ino, -1, 0); + if (ret < 0) { + btrfs_free_path(path); + return ret; + } + WARN_ON(!ret); + path->slots[0]--; + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_file_extent_item); + btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); + found_type = btrfs_key_type(&found_key); + + /* No extents, just return */ + if (found_key.objectid != inode->i_ino || + found_type != BTRFS_EXTENT_DATA_KEY) { + btrfs_free_path(path); + return 0; + } + last = found_key.offset; + btrfs_free_path(path); + lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, &cached_state, GFP_NOFS); em = get_extent(inode, NULL, 0, off, max - off, 0); @@ -2925,11 +2957,18 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ret = PTR_ERR(em); goto out; } + while (!end) { + hole = 0; off = em->start + em->len; if (off >= max) end = 1; + if (em->block_start == EXTENT_MAP_HOLE) { + hole = 1; + goto next; + } + em_start = em->start; em_len = em->len; @@ -2939,8 +2978,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (em->block_start == EXTENT_MAP_LAST_BYTE) { end = 1; flags |= FIEMAP_EXTENT_LAST; - } else if (em->block_start == EXTENT_MAP_HOLE) { - flags |= FIEMAP_EXTENT_UNWRITTEN; } else if (em->block_start == EXTENT_MAP_INLINE) { flags |= (FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED); @@ -2953,10 +2990,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) flags |= FIEMAP_EXTENT_ENCODED; +next: emflags = em->flags; free_extent_map(em); em = NULL; - if (!end) { em = get_extent(inode, NULL, 0, off, max - off, 0); if (!em) @@ -2967,15 +3004,23 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } emflags = em->flags; } + if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - ret = fiemap_fill_next_extent(fieinfo, em_start, disko, - em_len, flags); - if (ret) - goto out_free; + if (em_start == last) { + flags |= FIEMAP_EXTENT_LAST; + end = 1; + } + + if (!hole) { + ret = fiemap_fill_next_extent(fieinfo, em_start, disko, + em_len, flags); + if (ret) + goto out_free; + } } out_free: free_extent_map(em); -- cgit v1.2.2 From 450ba0ea06b6ed3612d27f2b7127a9de4160f285 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Nov 2010 14:59:15 -0500 Subject: Btrfs: setup blank root and fs_info for mount time There is a problem with how we use sget, it searches through the list of supers attached to the fs_type looking for a super with the same fs_devices as what we're trying to mount. This depends on sb->s_fs_info being filled, but we don't fill that in until we get to btrfs_fill_super, so we could hit supers on the fs_type super list that have a null s_fs_info. In order to fix that we need to go ahead and setup a blank root with a blank fs_info to hold fs_devices, that way our test will work out right and then we can set s_fs_info in btrfs_set_super, and then open_ctree will simply use our pre-allocated root and fs_info when setting everything up. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 6 ++---- fs/btrfs/super.c | 34 +++++++++++++++++++++++++++++++--- 2 files changed, 33 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a67b98d58c2a..57c9d8eeb7dc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1561,10 +1561,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, GFP_NOFS); struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); - struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root), - GFP_NOFS); - struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info), - GFP_NOFS); + struct btrfs_root *tree_root = btrfs_sb(sb); + struct btrfs_fs_info *fs_info = tree_root->fs_info; struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 141fb317d3bc..47bf67cbe6bf 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -563,7 +563,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) static int btrfs_test_super(struct super_block *s, void *data) { - struct btrfs_fs_devices *test_fs_devices = data; + struct btrfs_root *test_root = data; struct btrfs_root *root = btrfs_sb(s); /* @@ -572,9 +572,17 @@ static int btrfs_test_super(struct super_block *s, void *data) */ if (!atomic_read(&s->s_active)) return 0; - return root->fs_info->fs_devices == test_fs_devices; + return root->fs_info->fs_devices == test_root->fs_info->fs_devices; } +static int btrfs_set_super(struct super_block *s, void *data) +{ + s->s_fs_info = data; + + return set_anon_super(s, data); +} + + /* * Find a superblock for the given device / mount point. * @@ -588,6 +596,8 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, struct super_block *s; struct dentry *root; struct btrfs_fs_devices *fs_devices = NULL; + struct btrfs_root *tree_root = NULL; + struct btrfs_fs_info *fs_info = NULL; fmode_t mode = FMODE_READ; char *subvol_name = NULL; u64 subvol_objectid = 0; @@ -615,8 +625,24 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, goto error_close_devices; } + /* + * Setup a dummy root and fs_info for test/set super. This is because + * we don't actually fill this stuff out until open_ctree, but we need + * it for searching for existing supers, so this lets us do that and + * then open_ctree will properly initialize everything later. + */ + fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); + tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); + if (!fs_info || !tree_root) { + error = -ENOMEM; + goto error_close_devices; + } + fs_info->tree_root = tree_root; + fs_info->fs_devices = fs_devices; + tree_root->fs_info = fs_info; + bdev = fs_devices->latest_bdev; - s = sget(fs_type, btrfs_test_super, set_anon_super, fs_devices); + s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root); if (IS_ERR(s)) goto error_s; @@ -685,6 +711,8 @@ error_s: error = PTR_ERR(s); error_close_devices: btrfs_close_devices(fs_devices); + kfree(fs_info); + kfree(tree_root); error_free_subvol_name: kfree(subvol_name); return error; -- cgit v1.2.2 From 163cf09c2a0ee5cac6285f9347975bd1e97725da Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sun, 28 Nov 2010 19:56:33 -0500 Subject: Btrfs: deal with DIO bios that span more than one ordered extent The new DIO bio splitting code has problems when the bio spans more than one ordered extent. This will happen as the generic DIO code merges our get_blocks calls together into a bigger single bio. This fixes things by walking forward in the ordered extent code finding all the overlapping ordered extents and completing them all at once. Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 23 ++++++++++++++--- fs/btrfs/ordered-data.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/ordered-data.h | 3 +++ 3 files changed, 89 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6df921f218fb..0f34cae0a633 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5602,15 +5602,18 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered = NULL; struct extent_state *cached_state = NULL; + u64 ordered_offset = dip->logical_offset; + u64 ordered_bytes = dip->bytes; int ret; if (err) goto out_done; - - ret = btrfs_dec_test_ordered_pending(inode, &ordered, - dip->logical_offset, dip->bytes); +again: + ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, + &ordered_offset, + ordered_bytes); if (!ret) - goto out_done; + goto out_test; BUG_ON(!ordered); @@ -5670,8 +5673,20 @@ out_unlock: out: btrfs_delalloc_release_metadata(inode, ordered->len); btrfs_end_transaction(trans, root); + ordered_offset = ordered->file_offset + ordered->len; btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); + +out_test: + /* + * our bio might span multiple ordered extents. If we haven't + * completed the accounting for the whole dio, go back and try again + */ + if (ordered_offset < dip->logical_offset + dip->bytes) { + ordered_bytes = dip->logical_offset + dip->bytes - + ordered_offset; + goto again; + } out_done: bio->bi_private = dip->private; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index f4621f6deca1..ae7737e352c9 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -248,6 +248,73 @@ int btrfs_add_ordered_sum(struct inode *inode, return 0; } +/* + * this is used to account for finished IO across a given range + * of the file. The IO may span ordered extents. If + * a given ordered_extent is completely done, 1 is returned, otherwise + * 0. + * + * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used + * to make sure this function only returns 1 once for a given ordered extent. + * + * file_offset is updated to one byte past the range that is recorded as + * complete. This allows you to walk forward in the file. + */ +int btrfs_dec_test_first_ordered_pending(struct inode *inode, + struct btrfs_ordered_extent **cached, + u64 *file_offset, u64 io_size) +{ + struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; + struct btrfs_ordered_extent *entry = NULL; + int ret; + u64 dec_end; + u64 dec_start; + u64 to_dec; + + tree = &BTRFS_I(inode)->ordered_tree; + spin_lock(&tree->lock); + node = tree_search(tree, *file_offset); + if (!node) { + ret = 1; + goto out; + } + + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (!offset_in_entry(entry, *file_offset)) { + ret = 1; + goto out; + } + + dec_start = max(*file_offset, entry->file_offset); + dec_end = min(*file_offset + io_size, entry->file_offset + + entry->len); + *file_offset = dec_end; + if (dec_start > dec_end) { + printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n", + (unsigned long long)dec_start, + (unsigned long long)dec_end); + } + to_dec = dec_end - dec_start; + if (to_dec > entry->bytes_left) { + printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n", + (unsigned long long)entry->bytes_left, + (unsigned long long)to_dec); + } + entry->bytes_left -= to_dec; + if (entry->bytes_left == 0) + ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); + else + ret = 1; +out: + if (!ret && cached && entry) { + *cached = entry; + atomic_inc(&entry->refs); + } + spin_unlock(&tree->lock); + return ret == 0; +} + /* * this is used to account for finished IO across a given range * of the file. The IO should not span ordered extents. If diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 8ac365492a3f..61dca83119dd 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -141,6 +141,9 @@ int btrfs_remove_ordered_extent(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode, struct btrfs_ordered_extent **cached, u64 file_offset, u64 io_size); +int btrfs_dec_test_first_ordered_pending(struct inode *inode, + struct btrfs_ordered_extent **cached, + u64 *file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type); int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, -- cgit v1.2.2 From 5a92bc88cef279261d3f138e25850c122df67045 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 29 Nov 2010 09:49:11 -0500 Subject: Btrfs: don't use migrate page without CONFIG_MIGRATION Fixes compile error Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 57c9d8eeb7dc..33b6d459494c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -712,8 +712,11 @@ static int btree_migratepage(struct address_space *mapping, if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) return -EAGAIN; - +#ifdef CONFIG_MIGRATION return migrate_page(mapping, newpage, page); +#else + return -ENOSYS; +#endif } static int btree_writepage(struct page *page, struct writeback_control *wbc) @@ -821,7 +824,9 @@ static const struct address_space_operations btree_aops = { .releasepage = btree_releasepage, .invalidatepage = btree_invalidatepage, .sync_page = block_sync_page, +#ifdef CONFIG_MIGRATION .migratepage = btree_migratepage, +#endif }; int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, -- cgit v1.2.2 From 955256f2c3e25c94ad373c43fbc38d2ac8af2a71 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 19 Nov 2010 09:41:10 -0500 Subject: Btrfs: fix use after free in O_DIRECT This fixes a bug where we use dip after we have freed it. Instead just use the file_offset that was passed to the function. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0f34cae0a633..ae6c0d190bc1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5934,8 +5934,7 @@ free_ordered: */ if (write) { struct btrfs_ordered_extent *ordered; - ordered = btrfs_lookup_ordered_extent(inode, - dip->logical_offset); + ordered = btrfs_lookup_ordered_extent(inode, file_offset); if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) btrfs_free_reserved_extent(root, ordered->start, -- cgit v1.2.2 From 2b20982e3154266106573beac2a4d4ba57a2789a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 3 Dec 2010 13:17:53 -0500 Subject: Btrfs: deal with space cache errors better Currently if the space cache inode generation number doesn't match the generation number in the space cache header we will just fail to load the space cache, but we won't mark the space cache as an error, so we'll keep getting that error each time somebody tries to cache that block group until we actually clear the thing. Fix this by marking the space cache as having an error so we only get the message once. This patch also makes it so that we don't try and setup space cache for a block group that isn't cached, since we won't be able to write it out anyway. None of these problems are actual problems, they are just annoying and sub-optimal. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 10 ++++++---- fs/btrfs/free-space-cache.c | 12 +++++++----- 2 files changed, 13 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ddaf6340fe7f..8c56f5b38948 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2742,6 +2742,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, struct btrfs_root *root = block_group->fs_info->tree_root; struct inode *inode = NULL; u64 alloc_hint = 0; + int dcs = BTRFS_DC_ERROR; int num_pages = 0; int retries = 0; int ret = 0; @@ -2796,6 +2797,8 @@ again: spin_lock(&block_group->lock); if (block_group->cached != BTRFS_CACHE_FINISHED) { + /* We're not cached, don't bother trying to write stuff out */ + dcs = BTRFS_DC_WRITTEN; spin_unlock(&block_group->lock); goto out_put; } @@ -2822,6 +2825,8 @@ again: ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, num_pages, num_pages, &alloc_hint); + if (!ret) + dcs = BTRFS_DC_SETUP; btrfs_free_reserved_data_space(inode, num_pages); out_put: iput(inode); @@ -2829,10 +2834,7 @@ out_free: btrfs_release_path(root, path); out: spin_lock(&block_group->lock); - if (ret) - block_group->disk_cache_state = BTRFS_DC_ERROR; - else - block_group->disk_cache_state = BTRFS_DC_SETUP; + block_group->disk_cache_state = dcs; spin_unlock(&block_group->lock); return ret; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 22ee0dc2e6b8..60d684266959 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -290,7 +290,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, (unsigned long long)BTRFS_I(inode)->generation, (unsigned long long)generation, (unsigned long long)block_group->key.objectid); - goto out; + goto free_cache; } if (!num_entries) @@ -524,6 +524,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, return 0; } + node = rb_first(&block_group->free_space_offset); + if (!node) { + iput(inode); + return 0; + } + last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; filemap_write_and_wait(inode->i_mapping); btrfs_wait_ordered_range(inode, inode->i_size & @@ -543,10 +549,6 @@ int btrfs_write_out_cache(struct btrfs_root *root, */ first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); - node = rb_first(&block_group->free_space_offset); - if (!node) - goto out_free; - /* * Lock all pages first so we can lock the extent safely. * -- cgit v1.2.2 From b8399dee478db7939cd0d6fda8ecacddf2facd03 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 8 Dec 2010 09:15:11 -0500 Subject: Btrfs: do not do fast caching if we are allocating blocks for tree_root Since the fast caching uses normal tree locking, we can possibly deadlock if we get to the caching via a btrfs_search_slot() on the tree_root. So just check to see if the root we are on is the tree root, and just don't do the fast caching. Reported-by: Sage Weil Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8c56f5b38948..cec05e100142 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -429,6 +429,7 @@ err: static int cache_block_group(struct btrfs_block_group_cache *cache, struct btrfs_trans_handle *trans, + struct btrfs_root *root, int load_cache_only) { struct btrfs_fs_info *fs_info = cache->fs_info; @@ -442,9 +443,12 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, /* * We can't do the read from on-disk cache during a commit since we need - * to have the normal tree locking. + * to have the normal tree locking. Also if we are currently trying to + * allocate blocks for the tree root we can't do the fast caching since + * we likely hold important locks. */ - if (!trans->transaction->in_commit) { + if (!trans->transaction->in_commit && + (root && root != root->fs_info->tree_root)) { spin_lock(&cache->lock); if (cache->cached != BTRFS_CACHE_NO) { spin_unlock(&cache->lock); @@ -4083,7 +4087,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, * space back to the block group, otherwise we will leak space. */ if (!alloc && cache->cached == BTRFS_CACHE_NO) - cache_block_group(cache, trans, 1); + cache_block_group(cache, trans, NULL, 1); byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); @@ -4937,7 +4941,8 @@ have_block_group: if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { u64 free_percent; - ret = cache_block_group(block_group, trans, 1); + ret = cache_block_group(block_group, trans, + orig_root, 1); if (block_group->cached == BTRFS_CACHE_FINISHED) goto have_block_group; @@ -4961,7 +4966,8 @@ have_block_group: if (loop > LOOP_CACHING_NOWAIT || (loop > LOOP_FIND_IDEAL && atomic_read(&space_info->caching_threads) < 2)) { - ret = cache_block_group(block_group, trans, 0); + ret = cache_block_group(block_group, trans, + orig_root, 0); BUG_ON(ret); } found_uncached_bg = true; @@ -5518,7 +5524,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, u64 num_bytes = ins->offset; block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); - cache_block_group(block_group, trans, 0); + cache_block_group(block_group, trans, NULL, 0); caching_ctl = get_caching_control(block_group); if (!caching_ctl) { -- cgit v1.2.2 From 7e1fea731da8c1b5fcf5d8e157befd389b030760 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 8 Dec 2010 12:22:34 -0500 Subject: Btrfs: fixup return code for btrfs_del_orphan_item If the orphan item doesn't exist, we return 1, which doesn't make any sense to the callers. Instead return -ENOENT if we didn't find the item. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/orphan.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c index 79cba5fbc28e..f8be250963a0 100644 --- a/fs/btrfs/orphan.c +++ b/fs/btrfs/orphan.c @@ -56,8 +56,12 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, return -ENOMEM; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret) + if (ret < 0) goto out; + if (ret) { + ret = -ENOENT; + goto out; + } ret = btrfs_del_item(trans, root, path); -- cgit v1.2.2 From 84cd948cb11041f205242de457e680b9bb872a36 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 8 Dec 2010 12:24:01 -0500 Subject: Btrfs: do not BUG if we fail to remove the orphan item for dead snapshots Not being able to delete an orphan item isn't a horrible thing. The worst that happens is the next time around we try and do the orphan cleanup and we can't find the referenced object and just delete the item and move on. Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cec05e100142..41133b064d72 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6309,9 +6309,13 @@ int btrfs_drop_snapshot(struct btrfs_root *root, NULL, NULL); BUG_ON(ret < 0); if (ret > 0) { - ret = btrfs_del_orphan_item(trans, tree_root, - root->root_key.objectid); - BUG_ON(ret); + /* if we fail to delete the orphan item this time + * around, it'll get picked up the next time. + * + * The most common failure here is just -ENOENT. + */ + btrfs_del_orphan_item(trans, tree_root, + root->root_key.objectid); } } -- cgit v1.2.2 From 24ae63656a165c870c0d69fcc8aac1dc35e25e34 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 6 Dec 2010 07:02:36 +0000 Subject: Btrfs: Fix page leak in compressed writeback path "start + num_bytes >= actual_end" can happen when compressed page writeback races with file truncation. In that case we need unlock and release pages past the end of file. Signed-off-by: Yan, Zheng Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ae6c0d190bc1..4875d69871b5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -495,7 +495,7 @@ again: add_async_extent(async_cow, start, num_bytes, total_compressed, pages, nr_pages_ret); - if (start + num_bytes < end && start + num_bytes < actual_end) { + if (start + num_bytes < end) { start += num_bytes; pages = NULL; cond_resched(); -- cgit v1.2.2 From 75eaa0e22c055e38982df267d0f84cc510ba38bf Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 10 Dec 2010 00:36:28 +0000 Subject: Btrfs: fix sync subvol/snapshot creation We were incorrectly taking the async path even for the sync ioctls by passing in &transid unconditionally. There's ample room for further cleanup here, but this keeps the fix simple. Signed-off-by: Sage Weil Reviewed-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f1c9bb4079ed..7cc2e8e075b4 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -964,6 +964,15 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, name = async_vol_args->name; fd = async_vol_args->fd; async_vol_args->name[BTRFS_SNAPSHOT_NAME_MAX] = '\0'; + + ret = btrfs_ioctl_snap_create_transid(file, name, fd, + subvol, &transid); + + if (ret == 0 && + copy_to_user(arg + + offsetof(struct btrfs_ioctl_async_vol_args, + transid), &transid, sizeof(transid))) + ret = -EFAULT; } else { vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) @@ -971,16 +980,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, name = vol_args->name; fd = vol_args->fd; vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - } - ret = btrfs_ioctl_snap_create_transid(file, name, fd, - subvol, &transid); - - if (!ret && async) { - if (copy_to_user(arg + - offsetof(struct btrfs_ioctl_async_vol_args, - transid), &transid, sizeof(transid))) - return -EFAULT; + ret = btrfs_ioctl_snap_create_transid(file, name, fd, + subvol, NULL); } kfree(vol_args); -- cgit v1.2.2 From f106e82caaa0d943e47cacc184f5b40d538e0044 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Dec 2010 01:51:26 +0000 Subject: Btrfs: Fix a crash when mounting a subvolume We should drop dentry before deactivating the superblock, otherwise we can hit this bug: BUG: Dentry f349a690{i=100,n=/} still in use (1) [unmount of btrfs loop1] ... Steps to reproduce the bug: # mount /dev/loop1 /mnt # mkdir save # btrfs subvolume snapshot /mnt save/snap1 # umount /mnt # mount -o subvol=save/snap1 /dev/loop1 /mnt (crash) Reported-by: Michael Niederle Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 47bf67cbe6bf..61bd79abb805 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -685,9 +685,9 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, mutex_unlock(&root->d_inode->i_mutex); if (IS_ERR(new_root)) { + dput(root); deactivate_locked_super(s); error = PTR_ERR(new_root); - dput(root); goto error_free_subvol_name; } if (!new_root->d_inode) { -- cgit v1.2.2 From 914ee295af418e936ec20a08c1663eaabe4cd07a Mon Sep 17 00:00:00 2001 From: Xin Zhong Date: Thu, 9 Dec 2010 09:30:14 +0000 Subject: Btrfs: pwrite blocked when writing from the mmaped buffer of the same page This problem is found in meego testing: http://bugs.meego.com/show_bug.cgi?id=6672 A file in btrfs is mmaped and the mmaped buffer is passed to pwrite to write to the same page of the same file. In btrfs_file_aio_write(), the pages is locked by prepare_pages(). So when btrfs_copy_from_user() is called, page fault happens and the same page needs to be locked again in filemap_fault(). The fix is to move iov_iter_fault_in_readable() before prepage_pages() to make page fault happen before pages are locked. And also disable page fault in critical region in btrfs_copy_from_user(). Reviewed-by: Yan, Zheng Signed-off-by: Zhong, Xin Signed-off-by: Chris Mason --- fs/btrfs/file.c | 92 +++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 60 insertions(+), 32 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c1faded5fca0..66836d85763b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -48,30 +48,34 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, struct page **prepared_pages, struct iov_iter *i) { - size_t copied; + size_t copied = 0; int pg = 0; int offset = pos & (PAGE_CACHE_SIZE - 1); + int total_copied = 0; while (write_bytes > 0) { size_t count = min_t(size_t, PAGE_CACHE_SIZE - offset, write_bytes); struct page *page = prepared_pages[pg]; -again: - if (unlikely(iov_iter_fault_in_readable(i, count))) - return -EFAULT; - - /* Copy data from userspace to the current page */ - copied = iov_iter_copy_from_user(page, i, offset, count); + /* + * Copy data from userspace to the current page + * + * Disable pagefault to avoid recursive lock since + * the pages are already locked + */ + pagefault_disable(); + copied = iov_iter_copy_from_user_atomic(page, i, offset, count); + pagefault_enable(); /* Flush processor's dcache for this page */ flush_dcache_page(page); iov_iter_advance(i, copied); write_bytes -= copied; + total_copied += copied; + /* Return to btrfs_file_aio_write to fault page */ if (unlikely(copied == 0)) { - count = min_t(size_t, PAGE_CACHE_SIZE - offset, - iov_iter_single_seg_count(i)); - goto again; + break; } if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { @@ -81,7 +85,7 @@ again: offset = 0; } } - return 0; + return total_copied; } /* @@ -854,6 +858,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, unsigned long last_index; int will_write; int buffered = 0; + int copied = 0; + int dirty_pages = 0; will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || (file->f_flags & O_DIRECT)); @@ -970,7 +976,17 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, WARN_ON(num_pages > nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs); - ret = btrfs_delalloc_reserve_space(inode, write_bytes); + /* + * Fault pages before locking them in prepare_pages + * to avoid recursive lock + */ + if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) { + ret = -EFAULT; + goto out; + } + + ret = btrfs_delalloc_reserve_space(inode, + num_pages << PAGE_CACHE_SHIFT); if (ret) goto out; @@ -978,37 +994,49 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, pos, first_index, last_index, write_bytes); if (ret) { - btrfs_delalloc_release_space(inode, write_bytes); + btrfs_delalloc_release_space(inode, + num_pages << PAGE_CACHE_SHIFT); goto out; } - ret = btrfs_copy_from_user(pos, num_pages, + copied = btrfs_copy_from_user(pos, num_pages, write_bytes, pages, &i); - if (ret == 0) { + dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + + if (num_pages > dirty_pages) { + if (copied > 0) + atomic_inc( + &BTRFS_I(inode)->outstanding_extents); + btrfs_delalloc_release_space(inode, + (num_pages - dirty_pages) << + PAGE_CACHE_SHIFT); + } + + if (copied > 0) { dirty_and_release_pages(NULL, root, file, pages, - num_pages, pos, write_bytes); + dirty_pages, pos, copied); } btrfs_drop_pages(pages, num_pages); - if (ret) { - btrfs_delalloc_release_space(inode, write_bytes); - goto out; - } - if (will_write) { - filemap_fdatawrite_range(inode->i_mapping, pos, - pos + write_bytes - 1); - } else { - balance_dirty_pages_ratelimited_nr(inode->i_mapping, - num_pages); - if (num_pages < - (root->leafsize >> PAGE_CACHE_SHIFT) + 1) - btrfs_btree_balance_dirty(root, 1); - btrfs_throttle(root); + if (copied > 0) { + if (will_write) { + filemap_fdatawrite_range(inode->i_mapping, pos, + pos + copied - 1); + } else { + balance_dirty_pages_ratelimited_nr( + inode->i_mapping, + dirty_pages); + if (dirty_pages < + (root->leafsize >> PAGE_CACHE_SHIFT) + 1) + btrfs_btree_balance_dirty(root, 1); + btrfs_throttle(root); + } } - pos += write_bytes; - num_written += write_bytes; + pos += copied; + num_written += copied; cond_resched(); } -- cgit v1.2.2 From fdfb1e4f6c61477a61890b64974d65cdc3a98702 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 10 Dec 2010 06:41:56 +0000 Subject: Btrfs: Make async snapshot ioctl more generic If we had reserved some bytes in struct btrfs_ioctl_vol_args, we wouldn't have to create a new structure for async snapshot creation. Here we convert async snapshot ioctl to use a more generic ABI, as we'll add more ioctls for snapshots/subvolumes in the future, readonly snapshots for example. Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 44 +++++++++++++++++++++++++++----------------- fs/btrfs/ioctl.h | 14 +++++++++----- 2 files changed, 36 insertions(+), 22 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7cc2e8e075b4..f87552a1d7ea 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -947,31 +947,41 @@ out: static noinline int btrfs_ioctl_snap_create(struct file *file, void __user *arg, int subvol, - int async) + int v2) { struct btrfs_ioctl_vol_args *vol_args = NULL; - struct btrfs_ioctl_async_vol_args *async_vol_args = NULL; + struct btrfs_ioctl_vol_args_v2 *vol_args_v2 = NULL; char *name; u64 fd; - u64 transid = 0; int ret; - if (async) { - async_vol_args = memdup_user(arg, sizeof(*async_vol_args)); - if (IS_ERR(async_vol_args)) - return PTR_ERR(async_vol_args); + if (v2) { + u64 transid = 0; + u64 *ptr = NULL; - name = async_vol_args->name; - fd = async_vol_args->fd; - async_vol_args->name[BTRFS_SNAPSHOT_NAME_MAX] = '\0'; + vol_args_v2 = memdup_user(arg, sizeof(*vol_args_v2)); + if (IS_ERR(vol_args_v2)) + return PTR_ERR(vol_args_v2); + + if (vol_args_v2->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) { + ret = -EINVAL; + goto out; + } + + name = vol_args_v2->name; + fd = vol_args_v2->fd; + vol_args_v2->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; + + if (vol_args_v2->flags & BTRFS_SUBVOL_CREATE_ASYNC) + ptr = &transid; ret = btrfs_ioctl_snap_create_transid(file, name, fd, - subvol, &transid); + subvol, ptr); - if (ret == 0 && + if (ret == 0 && ptr && copy_to_user(arg + - offsetof(struct btrfs_ioctl_async_vol_args, - transid), &transid, sizeof(transid))) + offsetof(struct btrfs_ioctl_vol_args_v2, + transid), ptr, sizeof(*ptr))) ret = -EFAULT; } else { vol_args = memdup_user(arg, sizeof(*vol_args)); @@ -984,9 +994,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, ret = btrfs_ioctl_snap_create_transid(file, name, fd, subvol, NULL); } - +out: kfree(vol_args); - kfree(async_vol_args); + kfree(vol_args_v2); return ret; } @@ -2248,7 +2258,7 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_getversion(file, argp); case BTRFS_IOC_SNAP_CREATE: return btrfs_ioctl_snap_create(file, argp, 0, 0); - case BTRFS_IOC_SNAP_CREATE_ASYNC: + case BTRFS_IOC_SNAP_CREATE_V2: return btrfs_ioctl_snap_create(file, argp, 0, 1); case BTRFS_IOC_SUBVOL_CREATE: return btrfs_ioctl_snap_create(file, argp, 1, 0); diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 17c99ebdf960..c344d12c646b 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -30,11 +30,15 @@ struct btrfs_ioctl_vol_args { char name[BTRFS_PATH_NAME_MAX + 1]; }; -#define BTRFS_SNAPSHOT_NAME_MAX 4079 -struct btrfs_ioctl_async_vol_args { +#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) + +#define BTRFS_SUBVOL_NAME_MAX 4039 +struct btrfs_ioctl_vol_args_v2 { __s64 fd; __u64 transid; - char name[BTRFS_SNAPSHOT_NAME_MAX + 1]; + __u64 flags; + __u64 unused[4]; + char name[BTRFS_SUBVOL_NAME_MAX + 1]; }; #define BTRFS_INO_LOOKUP_PATH_MAX 4080 @@ -187,6 +191,6 @@ struct btrfs_ioctl_space_args { struct btrfs_ioctl_space_args) #define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) #define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) -#define BTRFS_IOC_SNAP_CREATE_ASYNC _IOW(BTRFS_IOCTL_MAGIC, 23, \ - struct btrfs_ioctl_async_vol_args) +#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ + struct btrfs_ioctl_vol_args_v2) #endif -- cgit v1.2.2 From 3dd1462e82bcab7625cec129952f26dae7a8b742 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 7 Dec 2010 14:54:09 +0000 Subject: Btrfs: fix compiler warnings ... regarding an unused function when !MIGRATION, and regarding a printk() format string vs argument mismatch. Signed-off-by: Jan Beulich Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 6 ++---- fs/btrfs/inode.c | 6 +++--- 2 files changed, 5 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 33b6d459494c..b803c2667673 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -696,6 +696,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, __btree_submit_bio_done); } +#ifdef CONFIG_MIGRATION static int btree_migratepage(struct address_space *mapping, struct page *newpage, struct page *page) { @@ -712,12 +713,9 @@ static int btree_migratepage(struct address_space *mapping, if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) return -EAGAIN; -#ifdef CONFIG_MIGRATION return migrate_page(mapping, newpage, page); -#else - return -ENOSYS; -#endif } +#endif static int btree_writepage(struct page *page, struct writeback_control *wbc) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4875d69871b5..5f9194438f7c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5712,9 +5712,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) if (err) { printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " - "disk_bytenr %lu len %u err no %d\n", - dip->inode->i_ino, bio->bi_rw, bio->bi_sector, - bio->bi_size, err); + "sector %#Lx len %u err no %d\n", + dip->inode->i_ino, bio->bi_rw, + (unsigned long long)bio->bi_sector, bio->bi_size, err); dip->errors = 1; /* -- cgit v1.2.2 From 68433b73b104bff388aac376631d32abbbd872b0 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Dec 2010 14:47:58 -0500 Subject: Btrfs: EIO when we fail to read tree roots If we just get a plain IO error when we read tree roots, the code wasn't properly sending that error up the chain. This allowed mounts to continue when they should failed, and allowed operations on partially setup root structs. The end result was usually oopsen on spinlocks that hadn't been spun up correctly. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b803c2667673..a5d2249e6da5 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1007,7 +1007,10 @@ static int find_and_setup_root(struct btrfs_root *tree_root, blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), blocksize, generation); - BUG_ON(!root->node); + if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) { + free_extent_buffer(root->node); + return -EIO; + } root->commit_root = btrfs_root_node(root); return 0; } -- cgit v1.2.2 From cd02dca56442e1504fd6bc5b96f7f1870162b266 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Dec 2010 14:56:23 -0500 Subject: Btrfs: account for missing devices in RAID allocation profiles When we mount in RAID degraded mode without adding a new device to replace the failed one, we can end up using the wrong RAID flags for allocations. This results in strange combinations of block groups (raid1 in a raid10 filesystem) and corruptions when we try to allocate blocks from single spindle chunks on drives that are actually missing. The first device has two small 4MB chunks in it that mkfs creates and these are usually unused in a raid1 or raid10 setup. But, in -o degraded, the allocator will fall back to these because the mask of desired raid groups isn't correct. The fix here is to count the missing devices as we build up the list of devices in the system. This count is used when picking the raid level to make sure we continue using the same levels that were in place before we lost a drive. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 17 +++++++++++++++-- fs/btrfs/volumes.c | 20 +++++++++++++++++++- fs/btrfs/volumes.h | 2 ++ 3 files changed, 36 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 41133b064d72..4be231e0d2bd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3044,7 +3044,13 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) { - u64 num_devices = root->fs_info->fs_devices->rw_devices; + /* + * we add in the count of missing devices because we want + * to make sure that any RAID levels on a degraded FS + * continue to be honored. + */ + u64 num_devices = root->fs_info->fs_devices->rw_devices + + root->fs_info->fs_devices->missing_devices; if (num_devices == 1) flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); @@ -7891,7 +7897,14 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; - num_devices = root->fs_info->fs_devices->rw_devices; + /* + * we add in the count of missing devices because we want + * to make sure that any RAID levels on a degraded FS + * continue to be honored. + */ + num_devices = root->fs_info->fs_devices->rw_devices + + root->fs_info->fs_devices->missing_devices; + if (num_devices == 1) { stripped |= BTRFS_BLOCK_GROUP_DUP; stripped = flags & ~stripped; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 91851b555e2e..177b73179590 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -413,12 +413,16 @@ static noinline int device_list_add(const char *path, device->fs_devices = fs_devices; fs_devices->num_devices++; - } else if (strcmp(device->name, path)) { + } else if (!device->name || strcmp(device->name, path)) { name = kstrdup(path, GFP_NOFS); if (!name) return -ENOMEM; kfree(device->name); device->name = name; + if (device->missing) { + fs_devices->missing_devices--; + device->missing = 0; + } } if (found_transid > fs_devices->latest_trans) { @@ -1238,6 +1242,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) device->fs_devices->num_devices--; + if (device->missing) + root->fs_info->fs_devices->missing_devices--; + next_device = list_entry(root->fs_info->fs_devices->devices.next, struct btrfs_device, dev_list); if (device->bdev == root->fs_info->sb->s_bdev) @@ -3084,7 +3091,9 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root, device->devid = devid; device->work.func = pending_bios_fn; device->fs_devices = fs_devices; + device->missing = 1; fs_devices->num_devices++; + fs_devices->missing_devices++; spin_lock_init(&device->io_lock); INIT_LIST_HEAD(&device->dev_alloc_list); memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); @@ -3282,6 +3291,15 @@ static int read_one_dev(struct btrfs_root *root, device = add_missing_dev(root, devid, dev_uuid); if (!device) return -ENOMEM; + } else if (!device->missing) { + /* + * this happens when a device that was properly setup + * in the device info lists suddenly goes bad. + * device->bdev is NULL, and so we have to set + * device->missing to one here + */ + root->fs_info->fs_devices->missing_devices++; + device->missing = 1; } } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 31b0fabdd2ea..a668c0116982 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -45,6 +45,7 @@ struct btrfs_device { int barriers; int writeable; int in_fs_metadata; + int missing; spinlock_t io_lock; @@ -94,6 +95,7 @@ struct btrfs_fs_devices { u64 num_devices; u64 open_devices; u64 rw_devices; + u64 missing_devices; u64 total_rw_bytes; struct block_device *latest_bdev; -- cgit v1.2.2 From 83a50de97fe96aca82389e061862ed760ece2283 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Dec 2010 15:06:46 -0500 Subject: Btrfs: prevent RAID level downgrades when space is low The extent allocator has code that allows us to fill allocations from any available block group, even if it doesn't match the raid level we've requested. This was put in because adding a new drive to a filesystem made with the default mkfs options actually upgrades the metadata from single spindle dup to full RAID1. But, the code also allows us to allocate from a raid0 chunk when we really want a raid1 or raid10 chunk. This can cause big trouble because mkfs creates a small (4MB) raid0 chunk for data and metadata which then goes unused for raid1/raid10 installs. The allocator will happily wander in and allocate from that chunk when things get tight, which is not correct. The fix here is to make sure that we provide duplication when the caller has asked for it. It does all the dups to be any raid level, which preserves the dup->raid1 upgrade abilities. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4be231e0d2bd..7e5162e5c411 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4943,6 +4943,25 @@ search: btrfs_get_block_group(block_group); search_start = block_group->key.objectid; + /* + * this can happen if we end up cycling through all the + * raid types, but we want to make sure we only allocate + * for the proper type. + */ + if (!block_group_bits(block_group, data)) { + u64 extra = BTRFS_BLOCK_GROUP_DUP | + BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_RAID10; + + /* + * if they asked for extra copies and this block group + * doesn't provide them, bail. This does allow us to + * fill raid0 from raid1. + */ + if ((data & extra) && !(block_group->flags & extra)) + goto loop; + } + have_block_group: if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { u64 free_percent; @@ -8273,7 +8292,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) break; if (ret != 0) goto error; - leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); cache = kzalloc(sizeof(*cache), GFP_NOFS); -- cgit v1.2.2 From 3cb50ddf97a0a1ca4c68bc12fa1e727a6b45fbf2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Dec 2010 15:53:18 +0000 Subject: Fix btrfs b0rkage Buggered-in: 76dda93c6ae2 ("Btrfs: add snapshot/subvolume destroy ioctl") Signed-off-by: Al Viro Acked-by: Chris Mason Signed-off-by: Linus Torvalds --- fs/btrfs/export.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 6f0444473594..659f532d26a0 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -166,7 +166,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, static struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = child->d_inode; - static struct dentry *dentry; + struct dentry *dentry; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_path *path; struct extent_buffer *leaf; -- cgit v1.2.2 From 8844355df7f4e091b03cc131e1549631238b397b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 25 Oct 2010 15:11:43 +0800 Subject: btrfs: Fix bugs in zlib workspace - Fix a race that can result in alloc_workspace > cpus. - Fix to check num_workspace after wakeup. Signed-off-by: Li Zefan --- fs/btrfs/zlib.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index b9cd5445f71c..e5b8b22e07d6 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -75,16 +75,19 @@ again: return workspace; } - spin_unlock(&workspace_lock); if (atomic_read(&alloc_workspace) > cpus) { DEFINE_WAIT(wait); + + spin_unlock(&workspace_lock); prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_read(&alloc_workspace) > cpus) + if (atomic_read(&alloc_workspace) > cpus && !num_workspace) schedule(); finish_wait(&workspace_wait, &wait); goto again; } atomic_inc(&alloc_workspace); + spin_unlock(&workspace_lock); + workspace = kzalloc(sizeof(*workspace), GFP_NOFS); if (!workspace) { ret = -ENOMEM; -- cgit v1.2.2 From 4b72029dc3fd6ba7dc45ccd1cf0aa0ebfa209bd3 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 08:27:27 +0800 Subject: btrfs: Fix error handling in zlib Return failure if alloc_page() fails to allocate memory, and the upper code will just give up compression. Signed-off-by: Li Zefan --- fs/btrfs/zlib.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index e5b8b22e07d6..b01558661e3b 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -225,6 +225,10 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, data_in = kmap(in_page); out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -1; + goto out; + } cpage_out = kmap(out_page); pages[0] = out_page; nr_pages = 1; @@ -263,6 +267,10 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, goto out; } out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -1; + goto out; + } cpage_out = kmap(out_page); pages[nr_pages] = out_page; nr_pages++; -- cgit v1.2.2 From 261507a02ccba9afda919852263b6bc1581ce1ef Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 17 Dec 2010 14:21:50 +0800 Subject: btrfs: Allow to add new compression algorithm Make the code aware of compression type, instead of always assuming zlib compression. Also make the zlib workspace function as common code for all compression types. Signed-off-by: Li Zefan --- fs/btrfs/btrfs_inode.h | 2 +- fs/btrfs/compression.c | 236 +++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/compression.h | 66 +++++++++---- fs/btrfs/ctree.h | 10 +- fs/btrfs/extent_io.c | 5 +- fs/btrfs/extent_io.h | 17 +++- fs/btrfs/extent_map.c | 2 + fs/btrfs/extent_map.h | 3 +- fs/btrfs/file.c | 2 + fs/btrfs/inode.c | 82 ++++++++++------ fs/btrfs/ioctl.c | 4 +- fs/btrfs/ordered-data.c | 18 +++- fs/btrfs/ordered-data.h | 8 +- fs/btrfs/super.c | 47 ++++++--- fs/btrfs/zlib.c | 253 ++++++++++-------------------------------------- 15 files changed, 473 insertions(+), 282 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 6ad63f17eca0..ccc991c542df 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -157,7 +157,7 @@ struct btrfs_inode { /* * always compress this one file */ - unsigned force_compress:1; + unsigned force_compress:4; struct inode vfs_inode; }; diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b50bc4bd5c56..6638c9877720 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -62,6 +62,9 @@ struct compressed_bio { /* number of bytes on disk */ unsigned long compressed_len; + /* the compression algorithm for this bio */ + int compress_type; + /* number of compressed pages in the array */ unsigned long nr_pages; @@ -173,11 +176,12 @@ static void end_compressed_bio_read(struct bio *bio, int err) /* ok, we're the last bio for this extent, lets start * the decompression. */ - ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, - cb->start, - cb->orig_bio->bi_io_vec, - cb->orig_bio->bi_vcnt, - cb->compressed_len); + ret = btrfs_decompress_biovec(cb->compress_type, + cb->compressed_pages, + cb->start, + cb->orig_bio->bi_io_vec, + cb->orig_bio->bi_vcnt, + cb->compressed_len); csum_failed: if (ret) cb->errors = 1; @@ -588,6 +592,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = uncompressed_len; cb->compressed_len = compressed_len; + cb->compress_type = extent_compress_type(bio_flags); cb->orig_bio = bio; nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / @@ -677,3 +682,224 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bio_put(comp_bio); return 0; } + +static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; +static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; +static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; +static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; +static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; + +struct btrfs_compress_op *btrfs_compress_op[] = { + &btrfs_zlib_compress, +}; + +int __init btrfs_init_compress(void) +{ + int i; + + for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { + INIT_LIST_HEAD(&comp_idle_workspace[i]); + spin_lock_init(&comp_workspace_lock[i]); + atomic_set(&comp_alloc_workspace[i], 0); + init_waitqueue_head(&comp_workspace_wait[i]); + } + return 0; +} + +/* + * this finds an available workspace or allocates a new one + * ERR_PTR is returned if things go bad. + */ +static struct list_head *find_workspace(int type) +{ + struct list_head *workspace; + int cpus = num_online_cpus(); + int idx = type - 1; + + struct list_head *idle_workspace = &comp_idle_workspace[idx]; + spinlock_t *workspace_lock = &comp_workspace_lock[idx]; + atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; + wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; + int *num_workspace = &comp_num_workspace[idx]; +again: + spin_lock(workspace_lock); + if (!list_empty(idle_workspace)) { + workspace = idle_workspace->next; + list_del(workspace); + (*num_workspace)--; + spin_unlock(workspace_lock); + return workspace; + + } + if (atomic_read(alloc_workspace) > cpus) { + DEFINE_WAIT(wait); + + spin_unlock(workspace_lock); + prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); + if (atomic_read(alloc_workspace) > cpus && !*num_workspace) + schedule(); + finish_wait(workspace_wait, &wait); + goto again; + } + atomic_inc(alloc_workspace); + spin_unlock(workspace_lock); + + workspace = btrfs_compress_op[idx]->alloc_workspace(); + if (IS_ERR(workspace)) { + atomic_dec(alloc_workspace); + wake_up(workspace_wait); + } + return workspace; +} + +/* + * put a workspace struct back on the list or free it if we have enough + * idle ones sitting around + */ +static void free_workspace(int type, struct list_head *workspace) +{ + int idx = type - 1; + struct list_head *idle_workspace = &comp_idle_workspace[idx]; + spinlock_t *workspace_lock = &comp_workspace_lock[idx]; + atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; + wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; + int *num_workspace = &comp_num_workspace[idx]; + + spin_lock(workspace_lock); + if (*num_workspace < num_online_cpus()) { + list_add_tail(workspace, idle_workspace); + (*num_workspace)++; + spin_unlock(workspace_lock); + goto wake; + } + spin_unlock(workspace_lock); + + btrfs_compress_op[idx]->free_workspace(workspace); + atomic_dec(alloc_workspace); +wake: + if (waitqueue_active(workspace_wait)) + wake_up(workspace_wait); +} + +/* + * cleanup function for module exit + */ +static void free_workspaces(void) +{ + struct list_head *workspace; + int i; + + for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { + while (!list_empty(&comp_idle_workspace[i])) { + workspace = comp_idle_workspace[i].next; + list_del(workspace); + btrfs_compress_op[i]->free_workspace(workspace); + atomic_dec(&comp_alloc_workspace[i]); + } + } +} + +/* + * given an address space and start/len, compress the bytes. + * + * pages are allocated to hold the compressed result and stored + * in 'pages' + * + * out_pages is used to return the number of pages allocated. There + * may be pages allocated even if we return an error + * + * total_in is used to return the number of bytes actually read. It + * may be smaller then len if we had to exit early because we + * ran out of room in the pages array or because we cross the + * max_out threshold. + * + * total_out is used to return the total number of compressed bytes + * + * max_out tells us the max number of bytes that we're allowed to + * stuff into pages + */ +int btrfs_compress_pages(int type, struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out) +{ + struct list_head *workspace; + int ret; + + workspace = find_workspace(type); + if (IS_ERR(workspace)) + return -1; + + ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, + start, len, pages, + nr_dest_pages, out_pages, + total_in, total_out, + max_out); + free_workspace(type, workspace); + return ret; +} + +/* + * pages_in is an array of pages with compressed data. + * + * disk_start is the starting logical offset of this array in the file + * + * bvec is a bio_vec of pages from the file that we want to decompress into + * + * vcnt is the count of pages in the biovec + * + * srclen is the number of bytes in pages_in + * + * The basic idea is that we have a bio that was created by readpages. + * The pages in the bio are for the uncompressed data, and they may not + * be contiguous. They all correspond to the range of bytes covered by + * the compressed extent. + */ +int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, + struct bio_vec *bvec, int vcnt, size_t srclen) +{ + struct list_head *workspace; + int ret; + + workspace = find_workspace(type); + if (IS_ERR(workspace)) + return -ENOMEM; + + ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, + disk_start, + bvec, vcnt, srclen); + free_workspace(type, workspace); + return ret; +} + +/* + * a less complex decompression routine. Our compressed data fits in a + * single page, and we want to read a single page out of it. + * start_byte tells us the offset into the compressed data we're interested in + */ +int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, + unsigned long start_byte, size_t srclen, size_t destlen) +{ + struct list_head *workspace; + int ret; + + workspace = find_workspace(type); + if (IS_ERR(workspace)) + return -ENOMEM; + + ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, + dest_page, start_byte, + srclen, destlen); + + free_workspace(type, workspace); + return ret; +} + +void __exit btrfs_exit_compress(void) +{ + free_workspaces(); +} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 421f5b4aa715..9b5f2f365b79 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -19,24 +19,22 @@ #ifndef __BTRFS_COMPRESSION_ #define __BTRFS_COMPRESSION_ -int btrfs_zlib_decompress(unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen); -int btrfs_zlib_compress_pages(struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out); -int btrfs_zlib_decompress_biovec(struct page **pages_in, - u64 disk_start, - struct bio_vec *bvec, - int vcnt, - size_t srclen); -void btrfs_zlib_exit(void); +int btrfs_init_compress(void); +void btrfs_exit_compress(void); + +int btrfs_compress_pages(int type, struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out); +int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, + struct bio_vec *bvec, int vcnt, size_t srclen); +int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, + unsigned long start_byte, size_t srclen, size_t destlen); + int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long len, u64 disk_start, unsigned long compressed_len, @@ -44,4 +42,36 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long nr_pages); int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags); + +struct btrfs_compress_op { + struct list_head *(*alloc_workspace)(void); + + void (*free_workspace)(struct list_head *workspace); + + int (*compress_pages)(struct list_head *workspace, + struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out); + + int (*decompress_biovec)(struct list_head *workspace, + struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen); + + int (*decompress)(struct list_head *workspace, + unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen); +}; + +extern struct btrfs_compress_op btrfs_zlib_compress; + #endif diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index af52f6d7a4d8..e06534438592 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -551,9 +551,10 @@ struct btrfs_timespec { } __attribute__ ((__packed__)); enum btrfs_compression_type { - BTRFS_COMPRESS_NONE = 0, - BTRFS_COMPRESS_ZLIB = 1, - BTRFS_COMPRESS_LAST = 2, + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_TYPES = 1, + BTRFS_COMPRESS_LAST = 2, }; struct btrfs_inode_item { @@ -895,7 +896,8 @@ struct btrfs_fs_info { */ u64 last_trans_log_full_commit; u64 open_ioctl_trans; - unsigned long mount_opt; + unsigned long mount_opt:20; + unsigned long compress_type:4; u64 max_inline; u64 alloc_start; struct btrfs_transaction *running_transaction; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5e7a94d7da89..f1d198128959 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2028,8 +2028,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree, BUG_ON(extent_map_end(em) <= cur); BUG_ON(end < cur); - if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { this_bio_flag = EXTENT_BIO_COMPRESSED; + extent_set_compress_type(&this_bio_flag, + em->compress_type); + } iosize = min(extent_map_end(em) - cur, end - cur + 1); cur_end = min(extent_map_end(em) - 1, end); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 4183c8178f01..7083cfafd061 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -20,8 +20,12 @@ #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) -/* flags for bio submission */ +/* + * flags for bio submission. The high bits indicate the compression + * type for this bio + */ #define EXTENT_BIO_COMPRESSED 1 +#define EXTENT_BIO_FLAG_SHIFT 16 /* these are bit numbers for test/set bit */ #define EXTENT_BUFFER_UPTODATE 0 @@ -135,6 +139,17 @@ struct extent_buffer { wait_queue_head_t lock_wq; }; +static inline void extent_set_compress_type(unsigned long *bio_flags, + int compress_type) +{ + *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT; +} + +static inline int extent_compress_type(unsigned long bio_flags) +{ + return bio_flags >> EXTENT_BIO_FLAG_SHIFT; +} + struct extent_map_tree; static inline struct extent_state *extent_state_next(struct extent_state *state) diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 23cb8da3ff66..b0e1fce12530 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -3,6 +3,7 @@ #include #include #include +#include "ctree.h" #include "extent_map.h" @@ -54,6 +55,7 @@ struct extent_map *alloc_extent_map(gfp_t mask) return em; em->in_tree = 0; em->flags = 0; + em->compress_type = BTRFS_COMPRESS_NONE; atomic_set(&em->refs, 1); return em; } diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index ab6d74b6e647..28b44dbd1e35 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -26,7 +26,8 @@ struct extent_map { unsigned long flags; struct block_device *bdev; atomic_t refs; - int in_tree; + unsigned int in_tree:1; + unsigned int compress_type:4; }; struct extent_map_tree { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 66836d85763b..05df688c96f4 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -224,6 +224,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->bdev = em->bdev; split->flags = flags; + split->compress_type = em->compress_type; ret = add_extent_mapping(em_tree, split); BUG_ON(ret); free_extent_map(split); @@ -238,6 +239,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->len = em->start + em->len - (start + len); split->bdev = em->bdev; split->flags = flags; + split->compress_type = em->compress_type; if (compressed) { split->block_len = em->block_len; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f9194438f7c..ba563b2a5d6c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -122,10 +122,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, size_t cur_size = size; size_t datasize; unsigned long offset; - int use_compress = 0; + int compress_type = BTRFS_COMPRESS_NONE; if (compressed_size && compressed_pages) { - use_compress = 1; + compress_type = root->fs_info->compress_type; cur_size = compressed_size; } @@ -159,7 +159,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, btrfs_set_file_extent_ram_bytes(leaf, ei, size); ptr = btrfs_file_extent_inline_start(ei); - if (use_compress) { + if (compress_type != BTRFS_COMPRESS_NONE) { struct page *cpage; int i = 0; while (compressed_size > 0) { @@ -176,7 +176,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, compressed_size -= cur_size; } btrfs_set_file_extent_compression(leaf, ei, - BTRFS_COMPRESS_ZLIB); + compress_type); } else { page = find_get_page(inode->i_mapping, start >> PAGE_CACHE_SHIFT); @@ -263,6 +263,7 @@ struct async_extent { u64 compressed_size; struct page **pages; unsigned long nr_pages; + int compress_type; struct list_head list; }; @@ -280,7 +281,8 @@ static noinline int add_async_extent(struct async_cow *cow, u64 start, u64 ram_size, u64 compressed_size, struct page **pages, - unsigned long nr_pages) + unsigned long nr_pages, + int compress_type) { struct async_extent *async_extent; @@ -290,6 +292,7 @@ static noinline int add_async_extent(struct async_cow *cow, async_extent->compressed_size = compressed_size; async_extent->pages = pages; async_extent->nr_pages = nr_pages; + async_extent->compress_type = compress_type; list_add_tail(&async_extent->list, &cow->extents); return 0; } @@ -332,6 +335,7 @@ static noinline int compress_file_range(struct inode *inode, unsigned long max_uncompressed = 128 * 1024; int i; int will_compress; + int compress_type = root->fs_info->compress_type; actual_end = min_t(u64, isize, end + 1); again: @@ -381,12 +385,16 @@ again: WARN_ON(pages); pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); - ret = btrfs_zlib_compress_pages(inode->i_mapping, start, - total_compressed, pages, - nr_pages, &nr_pages_ret, - &total_in, - &total_compressed, - max_compressed); + if (BTRFS_I(inode)->force_compress) + compress_type = BTRFS_I(inode)->force_compress; + + ret = btrfs_compress_pages(compress_type, + inode->i_mapping, start, + total_compressed, pages, + nr_pages, &nr_pages_ret, + &total_in, + &total_compressed, + max_compressed); if (!ret) { unsigned long offset = total_compressed & @@ -493,7 +501,8 @@ again: * and will submit them to the elevator. */ add_async_extent(async_cow, start, num_bytes, - total_compressed, pages, nr_pages_ret); + total_compressed, pages, nr_pages_ret, + compress_type); if (start + num_bytes < end) { start += num_bytes; @@ -515,7 +524,8 @@ cleanup_and_bail_uncompressed: __set_page_dirty_nobuffers(locked_page); /* unlocked later on in the async handlers */ } - add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0); + add_async_extent(async_cow, start, end - start + 1, + 0, NULL, 0, BTRFS_COMPRESS_NONE); *num_added += 1; } @@ -640,6 +650,7 @@ retry: em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; + em->compress_type = async_extent->compress_type; set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); @@ -656,11 +667,13 @@ retry: async_extent->ram_size - 1, 0); } - ret = btrfs_add_ordered_extent(inode, async_extent->start, - ins.objectid, - async_extent->ram_size, - ins.offset, - BTRFS_ORDERED_COMPRESSED); + ret = btrfs_add_ordered_extent_compress(inode, + async_extent->start, + ins.objectid, + async_extent->ram_size, + ins.offset, + BTRFS_ORDERED_COMPRESSED, + async_extent->compress_type); BUG_ON(ret); /* @@ -1670,7 +1683,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_state *cached_state = NULL; - int compressed = 0; + int compress_type = 0; int ret; bool nolock = false; @@ -1711,9 +1724,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans->block_rsv = &root->fs_info->delalloc_block_rsv; if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) - compressed = 1; + compress_type = ordered_extent->compress_type; if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { - BUG_ON(compressed); + BUG_ON(compress_type); ret = btrfs_mark_extent_written(trans, inode, ordered_extent->file_offset, ordered_extent->file_offset + @@ -1727,7 +1740,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->disk_len, ordered_extent->len, ordered_extent->len, - compressed, 0, 0, + compress_type, 0, 0, BTRFS_FILE_EXTENT_REG); unpin_extent_cache(&BTRFS_I(inode)->extent_tree, ordered_extent->file_offset, @@ -1829,6 +1842,8 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { logical = em->block_start; failrec->bio_flags = EXTENT_BIO_COMPRESSED; + extent_set_compress_type(&failrec->bio_flags, + em->compress_type); } failrec->logical = logical; free_extent_map(em); @@ -4930,8 +4945,10 @@ static noinline int uncompress_inline(struct btrfs_path *path, size_t max_size; unsigned long inline_size; unsigned long ptr; + int compress_type; WARN_ON(pg_offset != 0); + compress_type = btrfs_file_extent_compression(leaf, item); max_size = btrfs_file_extent_ram_bytes(leaf, item); inline_size = btrfs_file_extent_inline_item_len(leaf, btrfs_item_nr(leaf, path->slots[0])); @@ -4941,8 +4958,8 @@ static noinline int uncompress_inline(struct btrfs_path *path, read_extent_buffer(leaf, tmp, ptr, inline_size); max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); - ret = btrfs_zlib_decompress(tmp, page, extent_offset, - inline_size, max_size); + ret = btrfs_decompress(compress_type, tmp, page, + extent_offset, inline_size, max_size); if (ret) { char *kaddr = kmap_atomic(page, KM_USER0); unsigned long copy_size = min_t(u64, @@ -4984,7 +5001,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_trans_handle *trans = NULL; - int compressed; + int compress_type; again: read_lock(&em_tree->lock); @@ -5043,7 +5060,7 @@ again: found_type = btrfs_file_extent_type(leaf, item); extent_start = found_key.offset; - compressed = btrfs_file_extent_compression(leaf, item); + compress_type = btrfs_file_extent_compression(leaf, item); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { extent_end = extent_start + @@ -5089,8 +5106,9 @@ again: em->block_start = EXTENT_MAP_HOLE; goto insert; } - if (compressed) { + if (compress_type != BTRFS_COMPRESS_NONE) { set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + em->compress_type = compress_type; em->block_start = bytenr; em->block_len = btrfs_file_extent_disk_num_bytes(leaf, item); @@ -5124,12 +5142,14 @@ again: em->len = (copy_size + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); em->orig_start = EXTENT_MAP_INLINE; - if (compressed) + if (compress_type) { set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + em->compress_type = compress_type; + } ptr = btrfs_file_extent_inline_start(item) + extent_offset; if (create == 0 && !PageUptodate(page)) { - if (btrfs_file_extent_compression(leaf, item) == - BTRFS_COMPRESS_ZLIB) { + if (btrfs_file_extent_compression(leaf, item) != + BTRFS_COMPRESS_NONE) { ret = uncompress_inline(path, inode, page, pg_offset, extent_offset, item); @@ -6479,7 +6499,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->ordered_data_close = 0; ei->orphan_meta_reserved = 0; ei->dummy_inode = 0; - ei->force_compress = 0; + ei->force_compress = BTRFS_COMPRESS_NONE; inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree, GFP_NOFS); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f87552a1d7ea..8cb86d4d763c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -683,7 +683,7 @@ static int btrfs_defrag_file(struct file *file, total_read++; mutex_lock(&inode->i_mutex); if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) - BTRFS_I(inode)->force_compress = 1; + BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_ZLIB; ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (ret) @@ -781,7 +781,7 @@ loop_unlock: atomic_dec(&root->fs_info->async_submit_draining); mutex_lock(&inode->i_mutex); - BTRFS_I(inode)->force_compress = 0; + BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE; mutex_unlock(&inode->i_mutex); } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index ae7737e352c9..2b61e1ddcd99 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -172,7 +172,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, */ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, - int type, int dio) + int type, int dio, int compress_type) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -189,6 +189,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->disk_len = disk_len; entry->bytes_left = len; entry->inode = inode; + entry->compress_type = compress_type; if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) set_bit(type, &entry->flags); @@ -220,14 +221,25 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, - disk_len, type, 0); + disk_len, type, 0, + BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, - disk_len, type, 1); + disk_len, type, 1, + BTRFS_COMPRESS_NONE); +} + +int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, + int type, int compress_type) +{ + return __btrfs_add_ordered_extent(inode, file_offset, start, len, + disk_len, type, 0, + compress_type); } /* diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 61dca83119dd..ff1f69aa1883 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -68,7 +68,7 @@ struct btrfs_ordered_sum { #define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */ -#define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */ +#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */ #define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ @@ -93,6 +93,9 @@ struct btrfs_ordered_extent { /* flags (described above) */ unsigned long flags; + /* compression algorithm */ + int compress_type; + /* reference count */ atomic_t refs; @@ -148,6 +151,9 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type); int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type); +int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, + int type, int compress_type); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 61bd79abb805..f348f2b93164 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -69,9 +69,9 @@ enum { Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, - Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit, - Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_err, - Opt_user_subvol_rm_allowed, + Opt_compress_type, Opt_compress_force, Opt_compress_force_type, + Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, + Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, }; static match_table_t tokens = { @@ -86,7 +86,9 @@ static match_table_t tokens = { {Opt_alloc_start, "alloc_start=%s"}, {Opt_thread_pool, "thread_pool=%d"}, {Opt_compress, "compress"}, + {Opt_compress_type, "compress=%s"}, {Opt_compress_force, "compress-force"}, + {Opt_compress_force_type, "compress-force=%s"}, {Opt_ssd, "ssd"}, {Opt_ssd_spread, "ssd_spread"}, {Opt_nossd, "nossd"}, @@ -112,6 +114,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) char *p, *num, *orig; int intarg; int ret = 0; + char *compress_type; + bool compress_force = false; if (!options) return 0; @@ -154,14 +158,29 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) btrfs_set_opt(info->mount_opt, NODATACOW); btrfs_set_opt(info->mount_opt, NODATASUM); break; - case Opt_compress: - printk(KERN_INFO "btrfs: use compression\n"); - btrfs_set_opt(info->mount_opt, COMPRESS); - break; case Opt_compress_force: - printk(KERN_INFO "btrfs: forcing compression\n"); - btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); + case Opt_compress_force_type: + compress_force = true; + case Opt_compress: + case Opt_compress_type: + if (token == Opt_compress || + token == Opt_compress_force || + strcmp(args[0].from, "zlib") == 0) { + compress_type = "zlib"; + info->compress_type = BTRFS_COMPRESS_ZLIB; + } else { + ret = -EINVAL; + goto out; + } + btrfs_set_opt(info->mount_opt, COMPRESS); + if (compress_force) { + btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); + pr_info("btrfs: force %s compression\n", + compress_type); + } else + pr_info("btrfs: use %s compression\n", + compress_type); break; case Opt_ssd: printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); @@ -898,10 +917,14 @@ static int __init init_btrfs_fs(void) if (err) return err; - err = btrfs_init_cachep(); + err = btrfs_init_compress(); if (err) goto free_sysfs; + err = btrfs_init_cachep(); + if (err) + goto free_compress; + err = extent_io_init(); if (err) goto free_cachep; @@ -929,6 +952,8 @@ free_extent_io: extent_io_exit(); free_cachep: btrfs_destroy_cachep(); +free_compress: + btrfs_exit_compress(); free_sysfs: btrfs_exit_sysfs(); return err; @@ -943,7 +968,7 @@ static void __exit exit_btrfs_fs(void) unregister_filesystem(&btrfs_fs_type); btrfs_exit_sysfs(); btrfs_cleanup_fs_uuids(); - btrfs_zlib_exit(); + btrfs_exit_compress(); } module_init(init_btrfs_fs) diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index b01558661e3b..9a3e693917f2 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -32,15 +32,6 @@ #include #include "compression.h" -/* Plan: call deflate() with avail_in == *sourcelen, - avail_out = *dstlen - 12 and flush == Z_FINISH. - If it doesn't manage to finish, call it again with - avail_in == 0 and avail_out set to the remaining 12 - bytes for it to clean up. - Q: Is 12 bytes sufficient? -*/ -#define STREAM_END_SPACE 12 - struct workspace { z_stream inf_strm; z_stream def_strm; @@ -48,155 +39,51 @@ struct workspace { struct list_head list; }; -static LIST_HEAD(idle_workspace); -static DEFINE_SPINLOCK(workspace_lock); -static unsigned long num_workspace; -static atomic_t alloc_workspace = ATOMIC_INIT(0); -static DECLARE_WAIT_QUEUE_HEAD(workspace_wait); +static void zlib_free_workspace(struct list_head *ws) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); -/* - * this finds an available zlib workspace or allocates a new one - * NULL or an ERR_PTR is returned if things go bad. - */ -static struct workspace *find_zlib_workspace(void) + vfree(workspace->def_strm.workspace); + vfree(workspace->inf_strm.workspace); + kfree(workspace->buf); + kfree(workspace); +} + +static struct list_head *zlib_alloc_workspace(void) { struct workspace *workspace; - int ret; - int cpus = num_online_cpus(); - -again: - spin_lock(&workspace_lock); - if (!list_empty(&idle_workspace)) { - workspace = list_entry(idle_workspace.next, struct workspace, - list); - list_del(&workspace->list); - num_workspace--; - spin_unlock(&workspace_lock); - return workspace; - - } - if (atomic_read(&alloc_workspace) > cpus) { - DEFINE_WAIT(wait); - - spin_unlock(&workspace_lock); - prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_read(&alloc_workspace) > cpus && !num_workspace) - schedule(); - finish_wait(&workspace_wait, &wait); - goto again; - } - atomic_inc(&alloc_workspace); - spin_unlock(&workspace_lock); workspace = kzalloc(sizeof(*workspace), GFP_NOFS); - if (!workspace) { - ret = -ENOMEM; - goto fail; - } + if (!workspace) + return ERR_PTR(-ENOMEM); workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); - if (!workspace->def_strm.workspace) { - ret = -ENOMEM; - goto fail; - } workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); - if (!workspace->inf_strm.workspace) { - ret = -ENOMEM; - goto fail_inflate; - } workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); - if (!workspace->buf) { - ret = -ENOMEM; - goto fail_kmalloc; - } - return workspace; - -fail_kmalloc: - vfree(workspace->inf_strm.workspace); -fail_inflate: - vfree(workspace->def_strm.workspace); -fail: - kfree(workspace); - atomic_dec(&alloc_workspace); - wake_up(&workspace_wait); - return ERR_PTR(ret); -} - -/* - * put a workspace struct back on the list or free it if we have enough - * idle ones sitting around - */ -static int free_workspace(struct workspace *workspace) -{ - spin_lock(&workspace_lock); - if (num_workspace < num_online_cpus()) { - list_add_tail(&workspace->list, &idle_workspace); - num_workspace++; - spin_unlock(&workspace_lock); - if (waitqueue_active(&workspace_wait)) - wake_up(&workspace_wait); - return 0; - } - spin_unlock(&workspace_lock); - vfree(workspace->def_strm.workspace); - vfree(workspace->inf_strm.workspace); - kfree(workspace->buf); - kfree(workspace); + if (!workspace->def_strm.workspace || + !workspace->inf_strm.workspace || !workspace->buf) + goto fail; - atomic_dec(&alloc_workspace); - if (waitqueue_active(&workspace_wait)) - wake_up(&workspace_wait); - return 0; -} + INIT_LIST_HEAD(&workspace->list); -/* - * cleanup function for module exit - */ -static void free_workspaces(void) -{ - struct workspace *workspace; - while (!list_empty(&idle_workspace)) { - workspace = list_entry(idle_workspace.next, struct workspace, - list); - list_del(&workspace->list); - vfree(workspace->def_strm.workspace); - vfree(workspace->inf_strm.workspace); - kfree(workspace->buf); - kfree(workspace); - atomic_dec(&alloc_workspace); - } + return &workspace->list; +fail: + zlib_free_workspace(&workspace->list); + return ERR_PTR(-ENOMEM); } -/* - * given an address space and start/len, compress the bytes. - * - * pages are allocated to hold the compressed result and stored - * in 'pages' - * - * out_pages is used to return the number of pages allocated. There - * may be pages allocated even if we return an error - * - * total_in is used to return the number of bytes actually read. It - * may be smaller then len if we had to exit early because we - * ran out of room in the pages array or because we cross the - * max_out threshold. - * - * total_out is used to return the total number of compressed bytes - * - * max_out tells us the max number of bytes that we're allowed to - * stuff into pages - */ -int btrfs_zlib_compress_pages(struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) +static int zlib_compress_pages(struct list_head *ws, + struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out) { + struct workspace *workspace = list_entry(ws, struct workspace, list); int ret; - struct workspace *workspace; char *data_in; char *cpage_out; int nr_pages = 0; @@ -208,10 +95,6 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, *total_out = 0; *total_in = 0; - workspace = find_zlib_workspace(); - if (IS_ERR(workspace)) - return -1; - if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { printk(KERN_WARNING "deflateInit failed\n"); ret = -1; @@ -325,35 +208,18 @@ out: kunmap(in_page); page_cache_release(in_page); } - free_workspace(workspace); return ret; } -/* - * pages_in is an array of pages with compressed data. - * - * disk_start is the starting logical offset of this array in the file - * - * bvec is a bio_vec of pages from the file that we want to decompress into - * - * vcnt is the count of pages in the biovec - * - * srclen is the number of bytes in pages_in - * - * The basic idea is that we have a bio that was created by readpages. - * The pages in the bio are for the uncompressed data, and they may not - * be contiguous. They all correspond to the range of bytes covered by - * the compressed extent. - */ -int btrfs_zlib_decompress_biovec(struct page **pages_in, - u64 disk_start, - struct bio_vec *bvec, - int vcnt, - size_t srclen) +static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen) { + struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; int wbits = MAX_WBITS; - struct workspace *workspace; char *data_in; size_t total_out = 0; unsigned long page_bytes_left; @@ -371,10 +237,6 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, unsigned long current_buf_start; char *kaddr; - workspace = find_zlib_workspace(); - if (IS_ERR(workspace)) - return -ENOMEM; - data_in = kmap(pages_in[page_in_index]); workspace->inf_strm.next_in = data_in; workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); @@ -400,8 +262,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { printk(KERN_WARNING "inflateInit failed\n"); - ret = -1; - goto out; + return -1; } while (workspace->inf_strm.total_in < srclen) { ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); @@ -527,35 +388,21 @@ done: zlib_inflateEnd(&workspace->inf_strm); if (data_in) kunmap(pages_in[page_in_index]); -out: - free_workspace(workspace); return ret; } -/* - * a less complex decompression routine. Our compressed data fits in a - * single page, and we want to read a single page out of it. - * start_byte tells us the offset into the compressed data we're interested in - */ -int btrfs_zlib_decompress(unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) +static int zlib_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen) { + struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; int wbits = MAX_WBITS; - struct workspace *workspace; unsigned long bytes_left = destlen; unsigned long total_out = 0; char *kaddr; - if (destlen > PAGE_CACHE_SIZE) - return -ENOMEM; - - workspace = find_zlib_workspace(); - if (IS_ERR(workspace)) - return -ENOMEM; - workspace->inf_strm.next_in = data_in; workspace->inf_strm.avail_in = srclen; workspace->inf_strm.total_in = 0; @@ -576,8 +423,7 @@ int btrfs_zlib_decompress(unsigned char *data_in, if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { printk(KERN_WARNING "inflateInit failed\n"); - ret = -1; - goto out; + return -1; } while (bytes_left > 0) { @@ -627,12 +473,13 @@ next: ret = 0; zlib_inflateEnd(&workspace->inf_strm); -out: - free_workspace(workspace); return ret; } -void btrfs_zlib_exit(void) -{ - free_workspaces(); -} +struct btrfs_compress_op btrfs_zlib_compress = { + .alloc_workspace = zlib_alloc_workspace, + .free_workspace = zlib_free_workspace, + .compress_pages = zlib_compress_pages, + .decompress_biovec = zlib_decompress_biovec, + .decompress = zlib_decompress, +}; -- cgit v1.2.2 From a6fa6fae40ec336c7df6155255ae64ebef43a8bc Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 25 Oct 2010 15:12:26 +0800 Subject: btrfs: Add lzo compression support Lzo is a much faster compression algorithm than gzib, so would allow more users to enable transparent compression, and some users can choose from compression ratio and speed for different applications Usage: # mount -t btrfs -o compress[=] dev /mnt or # mount -t btrfs -o compress-force[=] dev /mnt "-o compress" without argument is still allowed for compatability. Compatibility: If we mount a filesystem with lzo compression, it will not be able be mounted in old kernels. One reason is, otherwise btrfs will directly dump compressed data, which sits in inline extent, to user. Performance: The test copied a linux source tarball (~400M) from an ext4 partition to the btrfs partition, and then extracted it. (time in second) lzo zlib nocompress copy: 10.6 21.7 14.9 extract: 70.1 94.4 66.6 (data size in MB) lzo zlib nocompress copy: 185.87 108.69 394.49 extract: 193.80 132.36 381.21 Changelog: v1 -> v2: - Select LZO_COMPRESS and LZO_DECOMPRESS in btrfs Kconfig. - Add incompability flag. - Fix error handling in compress code. Signed-off-by: Li Zefan --- fs/btrfs/Kconfig | 2 + fs/btrfs/Makefile | 2 +- fs/btrfs/compression.c | 1 + fs/btrfs/compression.h | 1 + fs/btrfs/ctree.h | 9 +- fs/btrfs/disk-io.c | 8 +- fs/btrfs/lzo.c | 509 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/super.c | 3 + 8 files changed, 527 insertions(+), 8 deletions(-) create mode 100644 fs/btrfs/lzo.c (limited to 'fs/btrfs') diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 7bb3c020e570..ecb9fd3be143 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -4,6 +4,8 @@ config BTRFS_FS select LIBCRC32C select ZLIB_INFLATE select ZLIB_DEFLATE + select LZO_COMPRESS + select LZO_DECOMPRESS help Btrfs is a new filesystem with extents, writable snapshotting, support for multiple devices and many more features. diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index a35eb36b32fd..31610ea73aec 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -6,5 +6,5 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ transaction.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ - export.o tree-log.o acl.o free-space-cache.o zlib.o \ + export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ compression.o delayed-ref.o relocation.o diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 6638c9877720..8faa2df9e719 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -691,6 +691,7 @@ static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; struct btrfs_compress_op *btrfs_compress_op[] = { &btrfs_zlib_compress, + &btrfs_lzo_compress, }; int __init btrfs_init_compress(void) diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 9b5f2f365b79..f7ce217113fa 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -73,5 +73,6 @@ struct btrfs_compress_op { }; extern struct btrfs_compress_op btrfs_zlib_compress; +extern struct btrfs_compress_op btrfs_lzo_compress; #endif diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e06534438592..53b984623983 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -398,13 +398,15 @@ struct btrfs_super_block { #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) +#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) #define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_INCOMPAT_SUPP \ (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ - BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) + BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ + BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO) /* * A leaf is full of items. offset and size tell us where to find @@ -553,8 +555,9 @@ struct btrfs_timespec { enum btrfs_compression_type { BTRFS_COMPRESS_NONE = 0, BTRFS_COMPRESS_ZLIB = 1, - BTRFS_COMPRESS_TYPES = 1, - BTRFS_COMPRESS_LAST = 2, + BTRFS_COMPRESS_LZO = 2, + BTRFS_COMPRESS_TYPES = 2, + BTRFS_COMPRESS_LAST = 3, }; struct btrfs_inode_item { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a5d2249e6da5..f88eb2ce7919 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1744,10 +1744,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, } features = btrfs_super_incompat_flags(disk_super); - if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) { - features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; - btrfs_set_super_incompat_flags(disk_super, features); - } + features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; + if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) + features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; + btrfs_set_super_incompat_flags(disk_super, features); features = btrfs_super_compat_ro_flags(disk_super) & ~BTRFS_FEATURE_COMPAT_RO_SUPP; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c new file mode 100644 index 000000000000..523b144e2aec --- /dev/null +++ b/fs/btrfs/lzo.c @@ -0,0 +1,509 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "compression.h" + +#define LZO_LEN 4 + +struct workspace { + void *mem; + void *buf; /* where compressed data goes */ + void *cbuf; /* where decompressed data goes */ + struct list_head list; +}; + +static void lzo_free_workspace(struct list_head *ws) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + + vfree(workspace->buf); + vfree(workspace->cbuf); + vfree(workspace->mem); + kfree(workspace); +} + +static struct list_head *lzo_alloc_workspace(void) +{ + struct workspace *workspace; + + workspace = kzalloc(sizeof(*workspace), GFP_NOFS); + if (!workspace) + return ERR_PTR(-ENOMEM); + + workspace->mem = vmalloc(LZO1X_MEM_COMPRESS); + workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); + workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); + if (!workspace->mem || !workspace->buf || !workspace->cbuf) + goto fail; + + INIT_LIST_HEAD(&workspace->list); + + return &workspace->list; +fail: + lzo_free_workspace(&workspace->list); + return ERR_PTR(-ENOMEM); +} + +static inline void write_compress_length(char *buf, size_t len) +{ + __le32 dlen; + + dlen = cpu_to_le32(len); + memcpy(buf, &dlen, LZO_LEN); +} + +static inline size_t read_compress_length(char *buf) +{ + __le32 dlen; + + memcpy(&dlen, buf, LZO_LEN); + return le32_to_cpu(dlen); +} + +static int lzo_compress_pages(struct list_head *ws, + struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + int ret = 0; + char *data_in; + char *cpage_out; + int nr_pages = 0; + struct page *in_page = NULL; + struct page *out_page = NULL; + unsigned long bytes_left; + + size_t in_len; + size_t out_len; + char *buf; + unsigned long tot_in = 0; + unsigned long tot_out = 0; + unsigned long pg_bytes_left; + unsigned long out_offset; + unsigned long bytes; + + *out_pages = 0; + *total_out = 0; + *total_in = 0; + + in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + data_in = kmap(in_page); + + /* + * store the size of all chunks of compressed data in + * the first 4 bytes + */ + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -ENOMEM; + goto out; + } + cpage_out = kmap(out_page); + out_offset = LZO_LEN; + tot_out = LZO_LEN; + pages[0] = out_page; + nr_pages = 1; + pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; + + /* compress at most one page of data each time */ + in_len = min(len, PAGE_CACHE_SIZE); + while (tot_in < len) { + ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, + &out_len, workspace->mem); + if (ret != LZO_E_OK) { + printk(KERN_DEBUG "btrfs deflate in loop returned %d\n", + ret); + ret = -1; + goto out; + } + + /* store the size of this chunk of compressed data */ + write_compress_length(cpage_out + out_offset, out_len); + tot_out += LZO_LEN; + out_offset += LZO_LEN; + pg_bytes_left -= LZO_LEN; + + tot_in += in_len; + tot_out += out_len; + + /* copy bytes from the working buffer into the pages */ + buf = workspace->cbuf; + while (out_len) { + bytes = min_t(unsigned long, pg_bytes_left, out_len); + + memcpy(cpage_out + out_offset, buf, bytes); + + out_len -= bytes; + pg_bytes_left -= bytes; + buf += bytes; + out_offset += bytes; + + /* + * we need another page for writing out. + * + * Note if there's less than 4 bytes left, we just + * skip to a new page. + */ + if ((out_len == 0 && pg_bytes_left < LZO_LEN) || + pg_bytes_left == 0) { + if (pg_bytes_left) { + memset(cpage_out + out_offset, 0, + pg_bytes_left); + tot_out += pg_bytes_left; + } + + /* we're done, don't allocate new page */ + if (out_len == 0 && tot_in >= len) + break; + + kunmap(out_page); + if (nr_pages == nr_dest_pages) { + out_page = NULL; + ret = -1; + goto out; + } + + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -ENOMEM; + goto out; + } + cpage_out = kmap(out_page); + pages[nr_pages++] = out_page; + + pg_bytes_left = PAGE_CACHE_SIZE; + out_offset = 0; + } + } + + /* we're making it bigger, give up */ + if (tot_in > 8192 && tot_in < tot_out) + goto out; + + /* we're all done */ + if (tot_in >= len) + break; + + if (tot_out > max_out) + break; + + bytes_left = len - tot_in; + kunmap(in_page); + page_cache_release(in_page); + + start += PAGE_CACHE_SIZE; + in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + data_in = kmap(in_page); + in_len = min(bytes_left, PAGE_CACHE_SIZE); + } + + if (tot_out > tot_in) + goto out; + + /* store the size of all chunks of compressed data */ + cpage_out = kmap(pages[0]); + write_compress_length(cpage_out, tot_out); + + kunmap(pages[0]); + + ret = 0; + *total_out = tot_out; + *total_in = tot_in; +out: + *out_pages = nr_pages; + if (out_page) + kunmap(out_page); + + if (in_page) { + kunmap(in_page); + page_cache_release(in_page); + } + + return ret; +} + +static int lzo_decompress_biovec(struct list_head *ws, + struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + int ret = 0; + char *data_in; + unsigned long page_bytes_left; + unsigned long page_in_index = 0; + unsigned long page_out_index = 0; + struct page *page_out; + unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; + unsigned long buf_start; + unsigned long buf_offset = 0; + unsigned long bytes; + unsigned long working_bytes; + unsigned long pg_offset; + unsigned long start_byte; + unsigned long current_buf_start; + char *kaddr; + + size_t in_len; + size_t out_len; + unsigned long in_offset; + unsigned long in_page_bytes_left; + unsigned long tot_in; + unsigned long tot_out; + unsigned long tot_len; + char *buf; + + data_in = kmap(pages_in[0]); + tot_len = read_compress_length(data_in); + + tot_in = LZO_LEN; + in_offset = LZO_LEN; + tot_len = min_t(size_t, srclen, tot_len); + in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; + + tot_out = 0; + page_out = bvec[0].bv_page; + page_bytes_left = PAGE_CACHE_SIZE; + pg_offset = 0; + + while (tot_in < tot_len) { + in_len = read_compress_length(data_in + in_offset); + in_page_bytes_left -= LZO_LEN; + in_offset += LZO_LEN; + tot_in += LZO_LEN; + + tot_in += in_len; + working_bytes = in_len; + + /* fast path: avoid using the working buffer */ + if (in_page_bytes_left >= in_len) { + buf = data_in + in_offset; + bytes = in_len; + goto cont; + } + + /* copy bytes from the pages into the working buffer */ + buf = workspace->cbuf; + buf_offset = 0; + while (working_bytes) { + bytes = min(working_bytes, in_page_bytes_left); + + memcpy(buf + buf_offset, data_in + in_offset, bytes); + buf_offset += bytes; +cont: + working_bytes -= bytes; + in_page_bytes_left -= bytes; + in_offset += bytes; + + /* check if we need to pick another page */ + if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN) + || in_page_bytes_left == 0) { + tot_in += in_page_bytes_left; + + if (working_bytes == 0 && tot_in >= tot_len) + break; + + kunmap(pages_in[page_in_index]); + page_in_index++; + if (page_in_index >= total_pages_in) { + ret = -1; + data_in = NULL; + goto done; + } + data_in = kmap(pages_in[page_in_index]); + + in_page_bytes_left = PAGE_CACHE_SIZE; + in_offset = 0; + } + } + + out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); + ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, + &out_len); + if (ret != LZO_E_OK) { + printk(KERN_WARNING "btrfs decompress failed\n"); + ret = -1; + break; + } + + /* + * buf start is the byte offset we're of the start of + * our workspace buffer + */ + buf_start = tot_out; + + /* tot_out is the last byte of the workspace buffer */ + tot_out += out_len; + + working_bytes = tot_out - buf_start; + + /* + * start_byte is the first byte of the page we're currently + * copying into relative to the start of the compressed data. + */ + start_byte = page_offset(page_out) - disk_start; + + if (working_bytes == 0) { + /* we didn't make progress in this inflate + * call, we're done + */ + break; + } + + /* we haven't yet hit data corresponding to this page */ + if (tot_out <= start_byte) + continue; + + /* + * the start of the data we care about is offset into + * the middle of our working buffer + */ + if (tot_out > start_byte && buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes -= buf_offset; + } else { + buf_offset = 0; + } + current_buf_start = buf_start; + + /* copy bytes from the working buffer into the pages */ + while (working_bytes > 0) { + bytes = min(PAGE_CACHE_SIZE - pg_offset, + PAGE_CACHE_SIZE - buf_offset); + bytes = min(bytes, working_bytes); + kaddr = kmap_atomic(page_out, KM_USER0); + memcpy(kaddr + pg_offset, workspace->buf + buf_offset, + bytes); + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(page_out); + + pg_offset += bytes; + page_bytes_left -= bytes; + buf_offset += bytes; + working_bytes -= bytes; + current_buf_start += bytes; + + /* check if we need to pick another page */ + if (page_bytes_left == 0) { + page_out_index++; + if (page_out_index >= vcnt) { + ret = 0; + goto done; + } + + page_out = bvec[page_out_index].bv_page; + pg_offset = 0; + page_bytes_left = PAGE_CACHE_SIZE; + start_byte = page_offset(page_out) - disk_start; + + /* + * make sure our new page is covered by this + * working buffer + */ + if (tot_out <= start_byte) + break; + + /* the next page in the biovec might not + * be adjacent to the last page, but it + * might still be found inside this working + * buffer. bump our offset pointer + */ + if (tot_out > start_byte && + current_buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes = tot_out - start_byte; + current_buf_start = buf_start + + buf_offset; + } + } + } + } +done: + if (data_in) + kunmap(pages_in[page_in_index]); + return ret; +} + +static int lzo_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + size_t in_len; + size_t out_len; + size_t tot_len; + int ret = 0; + char *kaddr; + unsigned long bytes; + + BUG_ON(srclen < LZO_LEN); + + tot_len = read_compress_length(data_in); + data_in += LZO_LEN; + + in_len = read_compress_length(data_in); + data_in += LZO_LEN; + + out_len = PAGE_CACHE_SIZE; + ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); + if (ret != LZO_E_OK) { + printk(KERN_WARNING "btrfs decompress failed!\n"); + ret = -1; + goto out; + } + + if (out_len < start_byte) { + ret = -1; + goto out; + } + + bytes = min_t(unsigned long, destlen, out_len - start_byte); + + kaddr = kmap_atomic(dest_page, KM_USER0); + memcpy(kaddr, workspace->buf + start_byte, bytes); + kunmap_atomic(kaddr, KM_USER0); +out: + return ret; +} + +struct btrfs_compress_op btrfs_lzo_compress = { + .alloc_workspace = lzo_alloc_workspace, + .free_workspace = lzo_free_workspace, + .compress_pages = lzo_compress_pages, + .decompress_biovec = lzo_decompress_biovec, + .decompress = lzo_decompress, +}; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f348f2b93164..a1a76b2a61f9 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -168,6 +168,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) strcmp(args[0].from, "zlib") == 0) { compress_type = "zlib"; info->compress_type = BTRFS_COMPRESS_ZLIB; + } else if (strcmp(args[0].from, "lzo") == 0) { + compress_type = "lzo"; + info->compress_type = BTRFS_COMPRESS_LZO; } else { ret = -EINVAL; goto out; -- cgit v1.2.2 From 1a419d85a76853d7d04e9b6280a80e96770bf3e3 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 25 Oct 2010 15:12:50 +0800 Subject: btrfs: Allow to specify compress method when defrag Update defrag ioctl, so one can choose lzo or zlib when turning on compression in defrag operation. Changelog: v1 -> v2 - Add incompability flag. - Fix to check invalid compress type. Signed-off-by: Li Zefan --- fs/btrfs/ioctl.c | 19 ++++++++++++++++++- fs/btrfs/ioctl.h | 9 ++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 8cb86d4d763c..b6985d33eede 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -638,9 +638,11 @@ static int btrfs_defrag_file(struct file *file, struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_ordered_extent *ordered; struct page *page; + struct btrfs_super_block *disk_super; unsigned long last_index; unsigned long ra_pages = root->fs_info->bdi.ra_pages; unsigned long total_read = 0; + u64 features; u64 page_start; u64 page_end; u64 last_len = 0; @@ -648,6 +650,14 @@ static int btrfs_defrag_file(struct file *file, u64 defrag_end = 0; unsigned long i; int ret; + int compress_type = BTRFS_COMPRESS_ZLIB; + + if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { + if (range->compress_type > BTRFS_COMPRESS_TYPES) + return -EINVAL; + if (range->compress_type) + compress_type = range->compress_type; + } if (inode->i_size == 0) return 0; @@ -683,7 +693,7 @@ static int btrfs_defrag_file(struct file *file, total_read++; mutex_lock(&inode->i_mutex); if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) - BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_ZLIB; + BTRFS_I(inode)->force_compress = compress_type; ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (ret) @@ -785,6 +795,13 @@ loop_unlock: mutex_unlock(&inode->i_mutex); } + disk_super = &root->fs_info->super_copy; + features = btrfs_super_incompat_flags(disk_super); + if (range->compress_type == BTRFS_COMPRESS_LZO) { + features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; + btrfs_set_super_incompat_flags(disk_super, features); + } + return 0; err_reservations: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index c344d12c646b..24d0f4628240 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -133,8 +133,15 @@ struct btrfs_ioctl_defrag_range_args { */ __u32 extent_thresh; + /* + * which compression method to use if turning on compression + * for this defrag operation. If unspecified, zlib will + * be used + */ + __u32 compress_type; + /* spare for later */ - __u32 unused[5]; + __u32 unused[4]; }; struct btrfs_ioctl_space_info { -- cgit v1.2.2 From 3a39c18d63fec35f49df577d4b2a4e29c2212f22 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 8 Nov 2010 15:22:19 +0800 Subject: btrfs: Extract duplicate decompress code Add a common function to copy decompressed data from working buffer to bio pages. Signed-off-by: Li Zefan --- fs/btrfs/compression.c | 92 ++++++++++++++++++++++++++++++++++++++++ fs/btrfs/compression.h | 5 +++ fs/btrfs/lzo.c | 101 +++----------------------------------------- fs/btrfs/zlib.c | 111 ++++++------------------------------------------- 4 files changed, 115 insertions(+), 194 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 8faa2df9e719..f745287fbf2e 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -904,3 +904,95 @@ void __exit btrfs_exit_compress(void) { free_workspaces(); } + +/* + * Copy uncompressed data from working buffer to pages. + * + * buf_start is the byte offset we're of the start of our workspace buffer. + * + * total_out is the last byte of the buffer + */ +int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, + unsigned long total_out, u64 disk_start, + struct bio_vec *bvec, int vcnt, + unsigned long *page_index, + unsigned long *pg_offset) +{ + unsigned long buf_offset; + unsigned long current_buf_start; + unsigned long start_byte; + unsigned long working_bytes = total_out - buf_start; + unsigned long bytes; + char *kaddr; + struct page *page_out = bvec[*page_index].bv_page; + + /* + * start byte is the first byte of the page we're currently + * copying into relative to the start of the compressed data. + */ + start_byte = page_offset(page_out) - disk_start; + + /* we haven't yet hit data corresponding to this page */ + if (total_out <= start_byte) + return 1; + + /* + * the start of the data we care about is offset into + * the middle of our working buffer + */ + if (total_out > start_byte && buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes -= buf_offset; + } else { + buf_offset = 0; + } + current_buf_start = buf_start; + + /* copy bytes from the working buffer into the pages */ + while (working_bytes > 0) { + bytes = min(PAGE_CACHE_SIZE - *pg_offset, + PAGE_CACHE_SIZE - buf_offset); + bytes = min(bytes, working_bytes); + kaddr = kmap_atomic(page_out, KM_USER0); + memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(page_out); + + *pg_offset += bytes; + buf_offset += bytes; + working_bytes -= bytes; + current_buf_start += bytes; + + /* check if we need to pick another page */ + if (*pg_offset == PAGE_CACHE_SIZE) { + (*page_index)++; + if (*page_index >= vcnt) + return 0; + + page_out = bvec[*page_index].bv_page; + *pg_offset = 0; + start_byte = page_offset(page_out) - disk_start; + + /* + * make sure our new page is covered by this + * working buffer + */ + if (total_out <= start_byte) + return 1; + + /* + * the next page in the biovec might not be adjacent + * to the last page, but it might still be found + * inside this working buffer. bump our offset pointer + */ + if (total_out > start_byte && + current_buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes = total_out - start_byte; + current_buf_start = buf_start + buf_offset; + } + } + } + + return 1; +} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index f7ce217113fa..51000174b9d7 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -34,6 +34,11 @@ int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, struct bio_vec *bvec, int vcnt, size_t srclen); int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); +int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, + unsigned long total_out, u64 disk_start, + struct bio_vec *bvec, int vcnt, + unsigned long *page_index, + unsigned long *pg_offset); int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long len, u64 disk_start, diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 523b144e2aec..cc9b450399df 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -260,12 +260,10 @@ static int lzo_decompress_biovec(struct list_head *ws, size_t srclen) { struct workspace *workspace = list_entry(ws, struct workspace, list); - int ret = 0; + int ret = 0, ret2; char *data_in; - unsigned long page_bytes_left; unsigned long page_in_index = 0; unsigned long page_out_index = 0; - struct page *page_out; unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; unsigned long buf_start; @@ -273,9 +271,6 @@ static int lzo_decompress_biovec(struct list_head *ws, unsigned long bytes; unsigned long working_bytes; unsigned long pg_offset; - unsigned long start_byte; - unsigned long current_buf_start; - char *kaddr; size_t in_len; size_t out_len; @@ -295,8 +290,6 @@ static int lzo_decompress_biovec(struct list_head *ws, in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; tot_out = 0; - page_out = bvec[0].bv_page; - page_bytes_left = PAGE_CACHE_SIZE; pg_offset = 0; while (tot_in < tot_len) { @@ -359,97 +352,15 @@ cont: break; } - /* - * buf start is the byte offset we're of the start of - * our workspace buffer - */ buf_start = tot_out; - - /* tot_out is the last byte of the workspace buffer */ tot_out += out_len; - working_bytes = tot_out - buf_start; - - /* - * start_byte is the first byte of the page we're currently - * copying into relative to the start of the compressed data. - */ - start_byte = page_offset(page_out) - disk_start; - - if (working_bytes == 0) { - /* we didn't make progress in this inflate - * call, we're done - */ + ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start, + tot_out, disk_start, + bvec, vcnt, + &page_out_index, &pg_offset); + if (ret2 == 0) break; - } - - /* we haven't yet hit data corresponding to this page */ - if (tot_out <= start_byte) - continue; - - /* - * the start of the data we care about is offset into - * the middle of our working buffer - */ - if (tot_out > start_byte && buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes -= buf_offset; - } else { - buf_offset = 0; - } - current_buf_start = buf_start; - - /* copy bytes from the working buffer into the pages */ - while (working_bytes > 0) { - bytes = min(PAGE_CACHE_SIZE - pg_offset, - PAGE_CACHE_SIZE - buf_offset); - bytes = min(bytes, working_bytes); - kaddr = kmap_atomic(page_out, KM_USER0); - memcpy(kaddr + pg_offset, workspace->buf + buf_offset, - bytes); - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page_out); - - pg_offset += bytes; - page_bytes_left -= bytes; - buf_offset += bytes; - working_bytes -= bytes; - current_buf_start += bytes; - - /* check if we need to pick another page */ - if (page_bytes_left == 0) { - page_out_index++; - if (page_out_index >= vcnt) { - ret = 0; - goto done; - } - - page_out = bvec[page_out_index].bv_page; - pg_offset = 0; - page_bytes_left = PAGE_CACHE_SIZE; - start_byte = page_offset(page_out) - disk_start; - - /* - * make sure our new page is covered by this - * working buffer - */ - if (tot_out <= start_byte) - break; - - /* the next page in the biovec might not - * be adjacent to the last page, but it - * might still be found inside this working - * buffer. bump our offset pointer - */ - if (tot_out > start_byte && - current_buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes = tot_out - start_byte; - current_buf_start = buf_start + - buf_offset; - } - } - } } done: if (data_in) diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 9a3e693917f2..f5ec2d44150d 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -218,24 +218,16 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, size_t srclen) { struct workspace *workspace = list_entry(ws, struct workspace, list); - int ret = 0; + int ret = 0, ret2; int wbits = MAX_WBITS; char *data_in; size_t total_out = 0; - unsigned long page_bytes_left; unsigned long page_in_index = 0; unsigned long page_out_index = 0; - struct page *page_out; unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; unsigned long buf_start; - unsigned long buf_offset; - unsigned long bytes; - unsigned long working_bytes; unsigned long pg_offset; - unsigned long start_byte; - unsigned long current_buf_start; - char *kaddr; data_in = kmap(pages_in[page_in_index]); workspace->inf_strm.next_in = data_in; @@ -245,8 +237,6 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, workspace->inf_strm.total_out = 0; workspace->inf_strm.next_out = workspace->buf; workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; - page_out = bvec[page_out_index].bv_page; - page_bytes_left = PAGE_CACHE_SIZE; pg_offset = 0; /* If it's deflate, and it's got no preset dictionary, then @@ -268,100 +258,23 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) break; - /* - * buf start is the byte offset we're of the start of - * our workspace buffer - */ - buf_start = total_out; - /* total_out is the last byte of the workspace buffer */ + buf_start = total_out; total_out = workspace->inf_strm.total_out; - working_bytes = total_out - buf_start; - - /* - * start byte is the first byte of the page we're currently - * copying into relative to the start of the compressed data. - */ - start_byte = page_offset(page_out) - disk_start; - - if (working_bytes == 0) { - /* we didn't make progress in this inflate - * call, we're done - */ - if (ret != Z_STREAM_END) - ret = -1; + /* we didn't make progress in this inflate call, we're done */ + if (buf_start == total_out) break; - } - /* we haven't yet hit data corresponding to this page */ - if (total_out <= start_byte) - goto next; - - /* - * the start of the data we care about is offset into - * the middle of our working buffer - */ - if (total_out > start_byte && buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes -= buf_offset; - } else { - buf_offset = 0; - } - current_buf_start = buf_start; - - /* copy bytes from the working buffer into the pages */ - while (working_bytes > 0) { - bytes = min(PAGE_CACHE_SIZE - pg_offset, - PAGE_CACHE_SIZE - buf_offset); - bytes = min(bytes, working_bytes); - kaddr = kmap_atomic(page_out, KM_USER0); - memcpy(kaddr + pg_offset, workspace->buf + buf_offset, - bytes); - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page_out); - - pg_offset += bytes; - page_bytes_left -= bytes; - buf_offset += bytes; - working_bytes -= bytes; - current_buf_start += bytes; - - /* check if we need to pick another page */ - if (page_bytes_left == 0) { - page_out_index++; - if (page_out_index >= vcnt) { - ret = 0; - goto done; - } - - page_out = bvec[page_out_index].bv_page; - pg_offset = 0; - page_bytes_left = PAGE_CACHE_SIZE; - start_byte = page_offset(page_out) - disk_start; - - /* - * make sure our new page is covered by this - * working buffer - */ - if (total_out <= start_byte) - goto next; - - /* the next page in the biovec might not - * be adjacent to the last page, but it - * might still be found inside this working - * buffer. bump our offset pointer - */ - if (total_out > start_byte && - current_buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes = total_out - start_byte; - current_buf_start = buf_start + - buf_offset; - } - } + ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start, + total_out, disk_start, + bvec, vcnt, + &page_out_index, &pg_offset); + if (ret2 == 0) { + ret = 0; + goto done; } -next: + workspace->inf_strm.next_out = workspace->buf; workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; -- cgit v1.2.2 From fa0d2b9bd717340e0bc4850a80ac0eb344e9a7fb Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 20 Dec 2010 15:53:28 +0800 Subject: Btrfs: Refactor btrfs_ioctl_snap_create() Split it into two functions for two different ioctls, since they share no common code. Signed-off-by: Li Zefan --- fs/btrfs/ioctl.c | 84 +++++++++++++++++++++++++++----------------------------- 1 file changed, 40 insertions(+), 44 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f87552a1d7ea..02554e19d974 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -946,58 +946,54 @@ out: } static noinline int btrfs_ioctl_snap_create(struct file *file, - void __user *arg, int subvol, - int v2) + void __user *arg, int subvol) { - struct btrfs_ioctl_vol_args *vol_args = NULL; - struct btrfs_ioctl_vol_args_v2 *vol_args_v2 = NULL; - char *name; - u64 fd; + struct btrfs_ioctl_vol_args *vol_args; int ret; - if (v2) { - u64 transid = 0; - u64 *ptr = NULL; + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); + vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - vol_args_v2 = memdup_user(arg, sizeof(*vol_args_v2)); - if (IS_ERR(vol_args_v2)) - return PTR_ERR(vol_args_v2); + ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, + vol_args->fd, subvol, NULL); - if (vol_args_v2->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) { - ret = -EINVAL; - goto out; - } - - name = vol_args_v2->name; - fd = vol_args_v2->fd; - vol_args_v2->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; + kfree(vol_args); + return ret; +} - if (vol_args_v2->flags & BTRFS_SUBVOL_CREATE_ASYNC) - ptr = &transid; +static noinline int btrfs_ioctl_snap_create_v2(struct file *file, + void __user *arg, int subvol) +{ + struct btrfs_ioctl_vol_args_v2 *vol_args; + int ret; + u64 transid = 0; + u64 *ptr = NULL; - ret = btrfs_ioctl_snap_create_transid(file, name, fd, - subvol, ptr); + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); + vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; - if (ret == 0 && ptr && - copy_to_user(arg + - offsetof(struct btrfs_ioctl_vol_args_v2, - transid), ptr, sizeof(*ptr))) - ret = -EFAULT; - } else { - vol_args = memdup_user(arg, sizeof(*vol_args)); - if (IS_ERR(vol_args)) - return PTR_ERR(vol_args); - name = vol_args->name; - fd = vol_args->fd; - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - - ret = btrfs_ioctl_snap_create_transid(file, name, fd, - subvol, NULL); + if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) { + ret = -EINVAL; + goto out; } + + if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) + ptr = &transid; + + ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, + vol_args->fd, subvol, ptr); + + if (ret == 0 && ptr && + copy_to_user(arg + + offsetof(struct btrfs_ioctl_vol_args_v2, + transid), ptr, sizeof(*ptr))) + ret = -EFAULT; out: kfree(vol_args); - kfree(vol_args_v2); - return ret; } @@ -2257,11 +2253,11 @@ long btrfs_ioctl(struct file *file, unsigned int case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); case BTRFS_IOC_SNAP_CREATE: - return btrfs_ioctl_snap_create(file, argp, 0, 0); + return btrfs_ioctl_snap_create(file, argp, 0); case BTRFS_IOC_SNAP_CREATE_V2: - return btrfs_ioctl_snap_create(file, argp, 0, 1); + return btrfs_ioctl_snap_create_v2(file, argp, 0); case BTRFS_IOC_SUBVOL_CREATE: - return btrfs_ioctl_snap_create(file, argp, 1, 0); + return btrfs_ioctl_snap_create(file, argp, 1); case BTRFS_IOC_SNAP_DESTROY: return btrfs_ioctl_snap_destroy(file, argp); case BTRFS_IOC_DEFAULT_SUBVOL: -- cgit v1.2.2 From b83cc9693f39689490970c19f6c5b866f6719a70 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 20 Dec 2010 16:04:08 +0800 Subject: Btrfs: Add readonly snapshots support Usage: Set BTRFS_SUBVOL_RDONLY of btrfs_ioctl_vol_arg_v2->flags, and call ioctl(BTRFS_I0CTL_SNAP_CREATE_V2). Implementation: - Set readonly bit of btrfs_root_item->flags. - Add readonly checks in btrfs_permission (inode_permission), btrfs_setattr, btrfs_set/remove_xattr and some ioctls. Changelog for v3: - Eliminate btrfs_root->readonly, but check btrfs_root->root_item.flags. - Rename BTRFS_ROOT_SNAP_RDONLY to BTRFS_ROOT_SUBVOL_RDONLY. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 7 +++++++ fs/btrfs/inode.c | 8 ++++++++ fs/btrfs/ioctl.c | 42 ++++++++++++++++++++++++++++++++---------- fs/btrfs/ioctl.h | 1 + fs/btrfs/transaction.c | 8 ++++++++ fs/btrfs/transaction.h | 1 + fs/btrfs/xattr.c | 18 ++++++++++++++++++ 7 files changed, 75 insertions(+), 10 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index af52f6d7a4d8..4403e5643d43 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -597,6 +597,8 @@ struct btrfs_dir_item { u8 type; } __attribute__ ((__packed__)); +#define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0) + struct btrfs_root_item { struct btrfs_inode_item inode; __le64 generation; @@ -1893,6 +1895,11 @@ BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, last_snapshot, 64); +static inline bool btrfs_root_readonly(struct btrfs_root *root) +{ + return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY; +} + /* struct btrfs_super_block */ BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f9194438f7c..956f1eb913b1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3671,8 +3671,12 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; int err; + if (btrfs_root_readonly(root)) + return -EROFS; + err = inode_change_ok(inode, attr); if (err) return err; @@ -7206,6 +7210,10 @@ static int btrfs_set_page_dirty(struct page *page) static int btrfs_permission(struct inode *inode, int mask) { + struct btrfs_root *root = BTRFS_I(inode)->root; + + if (btrfs_root_readonly(root) && (mask & MAY_WRITE)) + return -EROFS; if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) return -EACCES; return generic_permission(inode, mask, btrfs_check_acl); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 02554e19d974..f066ccb5dddf 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -147,6 +147,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) unsigned int flags, oldflags; int ret; + if (btrfs_root_readonly(root)) + return -EROFS; + if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; @@ -360,7 +363,8 @@ fail: } static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, - char *name, int namelen, u64 *async_transid) + char *name, int namelen, u64 *async_transid, + bool readonly) { struct inode *inode; struct dentry *parent; @@ -378,6 +382,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, btrfs_init_block_rsv(&pending_snapshot->block_rsv); pending_snapshot->dentry = dentry; pending_snapshot->root = root; + pending_snapshot->readonly = readonly; trans = btrfs_start_transaction(root->fs_info->extent_root, 5); if (IS_ERR(trans)) { @@ -509,7 +514,7 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) static noinline int btrfs_mksubvol(struct path *parent, char *name, int namelen, struct btrfs_root *snap_src, - u64 *async_transid) + u64 *async_transid, bool readonly) { struct inode *dir = parent->dentry->d_inode; struct dentry *dentry; @@ -541,7 +546,7 @@ static noinline int btrfs_mksubvol(struct path *parent, if (snap_src) { error = create_snapshot(snap_src, dentry, - name, namelen, async_transid); + name, namelen, async_transid, readonly); } else { error = create_subvol(BTRFS_I(dir)->root, dentry, name, namelen, async_transid); @@ -901,7 +906,8 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, char *name, unsigned long fd, int subvol, - u64 *transid) + u64 *transid, + bool readonly) { struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; struct file *src_file; @@ -919,7 +925,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, if (subvol) { ret = btrfs_mksubvol(&file->f_path, name, namelen, - NULL, transid); + NULL, transid, readonly); } else { struct inode *src_inode; src_file = fget(fd); @@ -938,7 +944,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, } ret = btrfs_mksubvol(&file->f_path, name, namelen, BTRFS_I(src_inode)->root, - transid); + transid, readonly); fput(src_file); } out: @@ -957,7 +963,8 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, - vol_args->fd, subvol, NULL); + vol_args->fd, subvol, + NULL, false); kfree(vol_args); return ret; @@ -970,22 +977,27 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, int ret; u64 transid = 0; u64 *ptr = NULL; + bool readonly = false; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; - if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) { - ret = -EINVAL; + if (vol_args->flags & + ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY)) { + ret = -EOPNOTSUPP; goto out; } if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) ptr = &transid; + if (vol_args->flags & BTRFS_SUBVOL_RDONLY) + readonly = true; ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, - vol_args->fd, subvol, ptr); + vol_args->fd, subvol, + ptr, readonly); if (ret == 0 && ptr && copy_to_user(arg + @@ -1505,6 +1517,9 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) struct btrfs_ioctl_defrag_range_args *range; int ret; + if (btrfs_root_readonly(root)) + return -EROFS; + ret = mnt_want_write(file->f_path.mnt); if (ret) return ret; @@ -1633,6 +1648,9 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) return -EINVAL; + if (btrfs_root_readonly(root)) + return -EROFS; + ret = mnt_want_write(file->f_path.mnt); if (ret) return ret; @@ -1954,6 +1972,10 @@ static long btrfs_ioctl_trans_start(struct file *file) if (file->private_data) goto out; + ret = -EROFS; + if (btrfs_root_readonly(root)) + goto out; + ret = mnt_want_write(file->f_path.mnt); if (ret) goto out; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index c344d12c646b..52ae489974be 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -31,6 +31,7 @@ struct btrfs_ioctl_vol_args { }; #define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) +#define BTRFS_SUBVOL_RDONLY (1ULL << 1) #define BTRFS_SUBVOL_NAME_MAX 4039 struct btrfs_ioctl_vol_args_v2 { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f50e931fc217..29e30d832ec9 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -910,6 +910,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, u64 to_reserve = 0; u64 index = 0; u64 objectid; + u64 root_flags; new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); if (!new_root_item) { @@ -967,6 +968,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); + root_flags = btrfs_root_flags(new_root_item); + if (pending->readonly) + root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; + else + root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; + btrfs_set_root_flags(new_root_item, root_flags); + old = btrfs_lock_root_node(root); btrfs_cow_block(trans, root, old, NULL, 0, &old); btrfs_set_lock_blocking(old); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index f104b57ad4ef..229a594cacd5 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -62,6 +62,7 @@ struct btrfs_pending_snapshot { struct btrfs_block_rsv block_rsv; /* extra metadata reseration for relocation */ int error; + bool readonly; struct list_head list; }; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 698fdd2c739c..a5776531dc2b 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -316,6 +316,15 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { + struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; + + /* + * The permission on security.* and system.* is not checked + * in permission(). + */ + if (btrfs_root_readonly(root)) + return -EROFS; + /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler @@ -336,6 +345,15 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, int btrfs_removexattr(struct dentry *dentry, const char *name) { + struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; + + /* + * The permission on security.* and system.* is not checked + * in permission(). + */ + if (btrfs_root_readonly(root)) + return -EROFS; + /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler -- cgit v1.2.2 From 0caa102da82799efaba88e234484786a9591c797 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 20 Dec 2010 16:30:25 +0800 Subject: Btrfs: Add BTRFS_IOC_SUBVOL_GETFLAGS/SETFLAGS ioctls This allows us to set a snapshot or a subvolume readonly or writable on the fly. Usage: Set BTRFS_SUBVOL_RDONLY of btrfs_ioctl_vol_arg_v2->flags, and then call ioctl(BTRFS_IOCTL_SUBVOL_SETFLAGS); Changelog for v3: - Change to pass __u64 as ioctl parameter. Changelog for v2: - Add _GETFLAGS ioctl. - Check if the passed fd is the root of a subvolume. - Change the name from _SNAP_SETFLAGS to _SUBVOL_SETFLAGS. Signed-off-by: Li Zefan --- fs/btrfs/ioctl.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/ioctl.h | 2 ++ 2 files changed, 85 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f066ccb5dddf..ad1983524f97 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1009,6 +1009,85 @@ out: return ret; } +static noinline int btrfs_ioctl_subvol_getflags(struct file *file, + void __user *arg) +{ + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + int ret = 0; + u64 flags = 0; + + if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + return -EINVAL; + + down_read(&root->fs_info->subvol_sem); + if (btrfs_root_readonly(root)) + flags |= BTRFS_SUBVOL_RDONLY; + up_read(&root->fs_info->subvol_sem); + + if (copy_to_user(arg, &flags, sizeof(flags))) + ret = -EFAULT; + + return ret; +} + +static noinline int btrfs_ioctl_subvol_setflags(struct file *file, + void __user *arg) +{ + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + u64 root_flags; + u64 flags; + int ret = 0; + + if (root->fs_info->sb->s_flags & MS_RDONLY) + return -EROFS; + + if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + return -EINVAL; + + if (copy_from_user(&flags, arg, sizeof(flags))) + return -EFAULT; + + if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) + return -EINVAL; + + if (flags & ~BTRFS_SUBVOL_RDONLY) + return -EOPNOTSUPP; + + down_write(&root->fs_info->subvol_sem); + + /* nothing to do */ + if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root)) + goto out; + + root_flags = btrfs_root_flags(&root->root_item); + if (flags & BTRFS_SUBVOL_RDONLY) + btrfs_set_root_flags(&root->root_item, + root_flags | BTRFS_ROOT_SUBVOL_RDONLY); + else + btrfs_set_root_flags(&root->root_item, + root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY); + + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_reset; + } + + ret = btrfs_update_root(trans, root, + &root->root_key, &root->root_item); + + btrfs_commit_transaction(trans, root); +out_reset: + if (ret) + btrfs_set_root_flags(&root->root_item, root_flags); +out: + up_write(&root->fs_info->subvol_sem); + return ret; +} + /* * helper to check if the subvolume references other subvolumes */ @@ -2282,6 +2361,10 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_snap_create(file, argp, 1); case BTRFS_IOC_SNAP_DESTROY: return btrfs_ioctl_snap_destroy(file, argp); + case BTRFS_IOC_SUBVOL_GETFLAGS: + return btrfs_ioctl_subvol_getflags(file, argp); + case BTRFS_IOC_SUBVOL_SETFLAGS: + return btrfs_ioctl_subvol_setflags(file, argp); case BTRFS_IOC_DEFAULT_SUBVOL: return btrfs_ioctl_default_subvol(file, argp); case BTRFS_IOC_DEFRAG: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 52ae489974be..1223223351fa 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -194,4 +194,6 @@ struct btrfs_ioctl_space_args { #define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ struct btrfs_ioctl_vol_args_v2) +#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64) +#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64) #endif -- cgit v1.2.2 From 65e5341b9a0c39767ae1fecc727d70eda0dd6d83 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 24 Dec 2010 06:41:52 -0500 Subject: Btrfs: fix off by one while setting block groups readonly When we read in block groups, we'll set non-redundant groups readonly if we find a raid1, DUP or raid10 group. But the ro code has an off by one bug in the math around testing to make sure out accounting doesn't go wrong. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7e5162e5c411..b180efdc8b68 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7971,13 +7971,14 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache) if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + sinfo->bytes_may_use + sinfo->bytes_readonly + - cache->reserved_pinned + num_bytes < sinfo->total_bytes) { + cache->reserved_pinned + num_bytes <= sinfo->total_bytes) { sinfo->bytes_readonly += num_bytes; sinfo->bytes_reserved += cache->reserved_pinned; cache->reserved_pinned = 0; cache->ro = 1; ret = 0; } + spin_unlock(&cache->lock); spin_unlock(&sinfo->lock); return ret; -- cgit v1.2.2 From fe15ce446beb3a33583af81ffe6c9d01a75314ed Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:23 +1100 Subject: fs: change d_delete semantics Change d_delete from a dentry deletion notification to a dentry caching advise, more like ->drop_inode. Require it to be constant and idempotent, and not take d_lock. This is how all existing filesystems use the callback anyway. This makes fine grained dentry locking of dput and dentry lru scanning much simpler. Signed-off-by: Nick Piggin --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 72f31ecb5c90..7ce9f8932789 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4127,7 +4127,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) return inode; } -static int btrfs_dentry_delete(struct dentry *dentry) +static int btrfs_dentry_delete(const struct dentry *dentry) { struct btrfs_root *root; -- cgit v1.2.2 From fa0d7e3de6d6fc5004ad9dea0dd6b286af8f03e9 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:49 +1100 Subject: fs: icache RCU free inodes RCU free the struct inode. This will allow: - Subsequent store-free path walking patch. The inode must be consulted for permissions when walking, so an RCU inode reference is a must. - sb_inode_list_lock to be moved inside i_lock because sb list walkers who want to take i_lock no longer need to take sb_inode_list_lock to walk the list in the first place. This will simplify and optimize locking. - Could remove some nested trylock loops in dcache code - Could potentially simplify things a bit in VM land. Do not need to take the page lock to follow page->mapping. The downsides of this is the performance cost of using RCU. In a simple creat/unlink microbenchmark, performance drops by about 10% due to inability to reuse cache-hot slab objects. As iterations increase and RCU freeing starts kicking over, this increases to about 20%. In cases where inode lifetimes are longer (ie. many inodes may be allocated during the average life span of a single inode), a lot of this cache reuse is not applicable, so the regression caused by this patch is smaller. The cache-hot regression could largely be avoided by using SLAB_DESTROY_BY_RCU, however this adds some complexity to list walking and store-free path walking, so I prefer to implement this at a later date, if it is shown to be a win in real situations. I haven't found a regression in any non-micro benchmark so I doubt it will be a problem. Signed-off-by: Nick Piggin --- fs/btrfs/inode.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7ce9f8932789..f9d2994a42a2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6495,6 +6495,13 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) return inode; } +static void btrfs_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + INIT_LIST_HEAD(&inode->i_dentry); + kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); +} + void btrfs_destroy_inode(struct inode *inode) { struct btrfs_ordered_extent *ordered; @@ -6564,7 +6571,7 @@ void btrfs_destroy_inode(struct inode *inode) inode_tree_del(inode); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); free: - kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); + call_rcu(&inode->i_rcu, btrfs_i_callback); } int btrfs_drop_inode(struct inode *inode) -- cgit v1.2.2 From fb045adb99d9b7c562dc7fef834857f78249daa1 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:55 +1100 Subject: fs: dcache reduce branches in lookup path Reduce some branches and memory accesses in dcache lookup by adding dentry flags to indicate common d_ops are set, rather than having to check them. This saves a pointer memory access (dentry->d_op) in common path lookup situations, and saves another pointer load and branch in cases where we have d_op but not the particular operation. Patched with: git grep -E '[.>]([[:space:]])*d_op([[:space:]])*=' | xargs sed -e 's/\([^\t ]*\)->d_op = \(.*\);/d_set_d_op(\1, \2);/' -e 's/\([^\t ]*\)\.d_op = \(.*\);/d_set_d_op(\&\1, \2);/' -i Signed-off-by: Nick Piggin --- fs/btrfs/export.c | 4 ++-- fs/btrfs/inode.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 659f532d26a0..0ccf9a8afcdf 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -110,7 +110,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, dentry = d_obtain_alias(inode); if (!IS_ERR(dentry)) - dentry->d_op = &btrfs_dentry_operations; + d_set_d_op(dentry, &btrfs_dentry_operations); return dentry; fail: srcu_read_unlock(&fs_info->subvol_srcu, index); @@ -225,7 +225,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) key.offset = 0; dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); if (!IS_ERR(dentry)) - dentry->d_op = &btrfs_dentry_operations; + d_set_d_op(dentry, &btrfs_dentry_operations); return dentry; fail: btrfs_free_path(path); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f9d2994a42a2..63e4546b478a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4084,7 +4084,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) int index; int ret; - dentry->d_op = &btrfs_dentry_operations; + d_set_d_op(dentry, &btrfs_dentry_operations); if (dentry->d_name.len > BTRFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); -- cgit v1.2.2 From b74c79e99389cd79b31fcc08f82c24e492e63c7e Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:58 +1100 Subject: fs: provide rcu-walk aware permission i_ops Signed-off-by: Nick Piggin --- fs/btrfs/acl.c | 6 ++++-- fs/btrfs/ctree.h | 2 +- fs/btrfs/inode.c | 7 +++++-- 3 files changed, 10 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 2222d161c7b6..cb518a4b917c 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -185,13 +185,15 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, return ret; } -int btrfs_check_acl(struct inode *inode, int mask) +int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags) { struct posix_acl *acl; int error = -EAGAIN; - acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); + if (flags & IPERM_FLAG_RCU) + return -ECHILD; + acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index af52f6d7a4d8..a142d204b526 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2544,7 +2544,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait); /* acl.c */ #ifdef CONFIG_BTRFS_FS_POSIX_ACL -int btrfs_check_acl(struct inode *inode, int mask); +int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags); #else #define btrfs_check_acl NULL #endif diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 63e4546b478a..5cf0db0081f9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7211,11 +7211,14 @@ static int btrfs_set_page_dirty(struct page *page) return __set_page_dirty_nobuffers(page); } -static int btrfs_permission(struct inode *inode, int mask) +static int btrfs_permission(struct inode *inode, int mask, unsigned int flags) { + if (flags & IPERM_FLAG_RCU) + return -ECHILD; + if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) return -EACCES; - return generic_permission(inode, mask, btrfs_check_acl); + return generic_permission(inode, mask, flags, btrfs_check_acl); } static const struct inode_operations btrfs_dir_inode_operations = { -- cgit v1.2.2 From 258a5aa8dfc6294f5f7df892023ee4d3e57c9841 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:50:01 +1100 Subject: btrfs: provide simple rcu-walk ACL implementation This simple implementation just checks for no ACLs on the inode, and if so, then the rcu-walk may proceed, otherwise fail it. Signed-off-by: Nick Piggin --- fs/btrfs/acl.c | 21 ++++++++++++--------- fs/btrfs/inode.c | 3 --- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index cb518a4b917c..6ae2c8cac9d5 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -187,18 +187,21 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags) { - struct posix_acl *acl; int error = -EAGAIN; - if (flags & IPERM_FLAG_RCU) - return -ECHILD; + if (flags & IPERM_FLAG_RCU) { + if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) + error = -ECHILD; - acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); - if (IS_ERR(acl)) - return PTR_ERR(acl); - if (acl) { - error = posix_acl_permission(inode, acl, mask); - posix_acl_release(acl); + } else { + struct posix_acl *acl; + acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl) { + error = posix_acl_permission(inode, acl, mask); + posix_acl_release(acl); + } } return error; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5cf0db0081f9..a0ff46a47895 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7213,9 +7213,6 @@ static int btrfs_set_page_dirty(struct page *page) static int btrfs_permission(struct inode *inode, int mask, unsigned int flags) { - if (flags & IPERM_FLAG_RCU) - return -ECHILD; - if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) return -EACCES; return generic_permission(inode, mask, flags, btrfs_check_acl); -- cgit v1.2.2 From af53d29ac13a97304d44343dc3b26154ca595268 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Dec 2010 10:56:06 -0500 Subject: switch btrfs, close races Signed-off-by: Al Viro --- fs/btrfs/export.c | 12 ++---------- fs/btrfs/inode.c | 2 -- fs/btrfs/super.c | 1 + 3 files changed, 3 insertions(+), 12 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 0ccf9a8afcdf..9786963b07e5 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -65,7 +65,6 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, { struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info; struct btrfs_root *root; - struct dentry *dentry; struct inode *inode; struct btrfs_key key; int index; @@ -108,10 +107,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, return ERR_PTR(-ESTALE); } - dentry = d_obtain_alias(inode); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &btrfs_dentry_operations); - return dentry; + return d_obtain_alias(inode); fail: srcu_read_unlock(&fs_info->subvol_srcu, index); return ERR_PTR(err); @@ -166,7 +162,6 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, static struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = child->d_inode; - struct dentry *dentry; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_path *path; struct extent_buffer *leaf; @@ -223,10 +218,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &btrfs_dentry_operations); - return dentry; + return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); fail: btrfs_free_path(path); return ERR_PTR(ret); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a0ff46a47895..f870aefc59dd 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4084,8 +4084,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) int index; int ret; - d_set_d_op(dentry, &btrfs_dentry_operations); - if (dentry->d_name.len > BTRFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 883c6fa1367e..22acdaa78ce1 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -460,6 +460,7 @@ static int btrfs_fill_super(struct super_block *sb, sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = BTRFS_SUPER_MAGIC; sb->s_op = &btrfs_super_ops; + sb->s_d_op = &btrfs_dentry_operations; sb->s_export_op = &btrfs_export_ops; sb->s_xattr = btrfs_xattr_handlers; sb->s_time_gran = 1; -- cgit v1.2.2 From 23a8519b55235660f6fb7d6f394a912de9d23208 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 17 Nov 2010 20:46:19 -0500 Subject: Btrfs: fail if we try to use hole punch Btrfs doesn't have the ability to punch holes yet, so make sure we return EOPNOTSUPP if we try to use hole punching through fallocate. This support can be added later. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/btrfs/inode.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f870aefc59dd..a3798a3aa0d2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7115,6 +7115,10 @@ static long btrfs_fallocate(struct inode *inode, int mode, alloc_start = offset & ~mask; alloc_end = (offset + len + mask) & ~mask; + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + return -EOPNOTSUPP; + /* * wait for ordered IO before we have any locks. We'll loop again * below with the locks held. -- cgit v1.2.2 From f580eb0931fbcb6dc3916f094f471671facd1daa Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Wed, 12 Jan 2011 09:30:42 +0000 Subject: fs/btrfs: Fix build of ctree CC [M] fs/btrfs/ctree.o In file included from fs/btrfs/ctree.c:21:0: fs/btrfs/ctree.h:1003:17: error: field <91>super_kobj<92> has incomplete type fs/btrfs/ctree.h:1074:17: error: field <91>root_kobj<92> has incomplete type make[2]: *** [fs/btrfs/ctree.o] Error 1 make[1]: *** [fs/btrfs] Error 2 make: *** [fs] Error 2 We need to include kobject.h here. Reported-by: Jeff Garzik Fix-suggested-by: Li Zefan Signed-off-by: Stefan Schmidt Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4acd4c611efa..0cb322cc4fc0 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "extent_io.h" #include "extent_map.h" -- cgit v1.2.2 From 299a08b1c34f9397797946a0fa215c5fd145c5cf Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:15 +0000 Subject: btrfs: fix wrong data space statistics Josef has implemented mixed data/metadata chunks, we must add those chunks' space just like data chunks. Signed-off-by: Miao Xie Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/super.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a1a76b2a61f9..caa5bcc62f16 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -790,11 +790,10 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { - if (found->flags & (BTRFS_BLOCK_GROUP_METADATA | - BTRFS_BLOCK_GROUP_SYSTEM)) - total_used_data += found->disk_total; - else + if (found->flags & BTRFS_BLOCK_GROUP_DATA) total_used_data += found->disk_used; + else + total_used_data += found->disk_total; total_used += found->disk_used; } rcu_read_unlock(); -- cgit v1.2.2 From d52a5b5f1fa40804f681cf9868d4a8f90661bdf3 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:18 +0000 Subject: btrfs: try to reclaim some space when chunk allocation fails We cannot write data into files when when there is tiny space in the filesystem. Reproduce steps: # mkfs.btrfs /dev/sda1 # mount /dev/sda1 /mnt # dd if=/dev/zero of=/mnt/tmpfile0 bs=4K count=1 # dd if=/dev/zero of=/mnt/tmpfile1 bs=4K count=99999999999999 (fill the filesystem) # umount /mnt # mount /dev/sda1 /mnt # rm -f /mnt/tmpfile0 # dd if=/dev/zero of=/mnt/tmpfile0 bs=4K count=1 (failed with nospec) But if we do the last step again, we can write data successfully. The reason of the problem is that btrfs didn't try to commit the current transaction and reclaim some space when chunk allocation failed. This patch fixes it by committing the current transaction to reclaim some space when chunk allocation fails. Signed-off-by: Miao Xie Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b180efdc8b68..3c71d95111fe 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3162,8 +3162,12 @@ alloc: bytes + 2 * 1024 * 1024, alloc_target, 0); btrfs_end_transaction(trans, root); - if (ret < 0) - return ret; + if (ret < 0) { + if (ret != -ENOSPC) + return ret; + else + goto commit_trans; + } if (!data_sinfo) { btrfs_set_inode_space_info(root, inode); @@ -3174,6 +3178,7 @@ alloc: spin_unlock(&data_sinfo->lock); /* commit the current transaction and try again */ +commit_trans: if (!committed && !root->fs_info->open_ioctl_trans) { committed = 1; trans = btrfs_join_transaction(root, 1); -- cgit v1.2.2 From 1974a3b42d8cf7a9c74f1e0310c593023617037a Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:24 +0000 Subject: btrfs: fix wrong calculation of stripe size There are two tiny problem: - One is When we check the chunk size is greater than the max chunk size or not, we should take mirrors into account, but the original code didn't. - The other is btrfs shouldn't use the size of the residual free space as the length of of a dup chunk when doing chunk allocation. It is because the device space that a dup chunk needs is twice as large as the chunk size, if we use the size of the residual free space as the length of a dup chunk, we can not get enough free space. Fix it. Signed-off-by: Miao Xie Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 177b73179590..c50a85e0d08f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2177,6 +2177,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, int num_stripes = 1; int min_stripes = 1; int sub_stripes = 0; + int ncopies = 1; int looped = 0; int ret; int index; @@ -2197,12 +2198,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, if (type & (BTRFS_BLOCK_GROUP_DUP)) { num_stripes = 2; min_stripes = 2; + ncopies = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID1)) { if (fs_devices->rw_devices < 2) return -ENOSPC; num_stripes = 2; min_stripes = 2; + ncopies = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID10)) { num_stripes = fs_devices->rw_devices; @@ -2210,6 +2213,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, return -ENOSPC; num_stripes &= ~(u32)1; sub_stripes = 2; + ncopies = 2; min_stripes = 4; } @@ -2239,8 +2243,8 @@ again: map->num_stripes = num_stripes; } - if (calc_size * num_stripes > max_chunk_size) { - calc_size = max_chunk_size; + if (calc_size * num_stripes > max_chunk_size * ncopies) { + calc_size = max_chunk_size * ncopies; do_div(calc_size, num_stripes); do_div(calc_size, stripe_len); calc_size *= stripe_len; @@ -2321,6 +2325,8 @@ again: if (!looped && max_avail > 0) { looped = 1; calc_size = max_avail; + if (type & BTRFS_BLOCK_GROUP_DUP) + do_div(calc_size, 2); goto again; } kfree(map); -- cgit v1.2.2 From 7bfc837df935d850fe996dfe92ef48975cd4170a Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:26 +0000 Subject: btrfs: restructure find_free_dev_extent() - make it return the start position and length of the max free space when it can not find a suitable free space. - make it more readability Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 +- fs/btrfs/volumes.c | 155 ++++++++++++++++++++++++++++--------------------- 2 files changed, 91 insertions(+), 68 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3c71d95111fe..1e1c9a177626 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8099,7 +8099,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) mutex_lock(&root->fs_info->chunk_mutex); list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { u64 min_free = btrfs_block_group_used(&block_group->item); - u64 dev_offset, max_avail; + u64 dev_offset; /* * check to make sure we can actually find a chunk with enough @@ -8107,7 +8107,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) */ if (device->total_bytes > device->bytes_used + min_free) { ret = find_free_dev_extent(NULL, device, min_free, - &dev_offset, &max_avail); + &dev_offset, NULL); if (!ret) break; ret = -1; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c50a85e0d08f..4838bd395e49 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -729,58 +729,82 @@ error: } /* + * find_free_dev_extent - find free space in the specified device + * @trans: transaction handler + * @device: the device which we search the free space in + * @num_bytes: the size of the free space that we need + * @start: store the start of the free space. + * @len: the size of the free space. that we find, or the size of the max + * free space if we don't find suitable free space + * * this uses a pretty simple search, the expectation is that it is * called very infrequently and that a given device has a small number * of extents + * + * @start is used to store the start of the free space if we find. But if we + * don't find suitable free space, it will be used to store the start position + * of the max free space. + * + * @len is used to store the size of the free space that we find. + * But if we don't find suitable free space, it is used to store the size of + * the max free space. */ int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, - u64 *start, u64 *max_avail) + u64 *start, u64 *len) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; - struct btrfs_dev_extent *dev_extent = NULL; + struct btrfs_dev_extent *dev_extent; struct btrfs_path *path; - u64 hole_size = 0; - u64 last_byte = 0; - u64 search_start = 0; + u64 hole_size; + u64 max_hole_start; + u64 max_hole_size; + u64 extent_end; + u64 search_start; u64 search_end = device->total_bytes; int ret; - int slot = 0; - int start_found; + int slot; struct extent_buffer *l; - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - path->reada = 2; - start_found = 0; - /* FIXME use last free of some kind */ /* we don't want to overwrite the superblock on the drive, * so we make sure to start at an offset of at least 1MB */ - search_start = max((u64)1024 * 1024, search_start); + search_start = 1024 * 1024; - if (root->fs_info->alloc_start + num_bytes <= device->total_bytes) + if (root->fs_info->alloc_start + num_bytes <= search_end) search_start = max(root->fs_info->alloc_start, search_start); + max_hole_start = search_start; + max_hole_size = 0; + + if (search_start >= search_end) { + ret = -ENOSPC; + goto error; + } + + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto error; + } + path->reada = 2; + key.objectid = device->devid; key.offset = search_start; key.type = BTRFS_DEV_EXTENT_KEY; + ret = btrfs_search_slot(trans, root, &key, path, 0, 0); if (ret < 0) - goto error; + goto out; if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, key.type); if (ret < 0) - goto error; - if (ret > 0) - start_found = 1; + goto out; } - l = path->nodes[0]; - btrfs_item_key_to_cpu(l, &key, path->slots[0]); + while (1) { l = path->nodes[0]; slot = path->slots[0]; @@ -789,24 +813,9 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, if (ret == 0) continue; if (ret < 0) - goto error; -no_more_items: - if (!start_found) { - if (search_start >= search_end) { - ret = -ENOSPC; - goto error; - } - *start = search_start; - start_found = 1; - goto check_pending; - } - *start = last_byte > search_start ? - last_byte : search_start; - if (search_end <= *start) { - ret = -ENOSPC; - goto error; - } - goto check_pending; + goto out; + + break; } btrfs_item_key_to_cpu(l, &key, slot); @@ -814,48 +823,62 @@ no_more_items: goto next; if (key.objectid > device->devid) - goto no_more_items; + break; - if (key.offset >= search_start && key.offset > last_byte && - start_found) { - if (last_byte < search_start) - last_byte = search_start; - hole_size = key.offset - last_byte; + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + goto next; - if (hole_size > *max_avail) - *max_avail = hole_size; + if (key.offset > search_start) { + hole_size = key.offset - search_start; - if (key.offset > last_byte && - hole_size >= num_bytes) { - *start = last_byte; - goto check_pending; + if (hole_size > max_hole_size) { + max_hole_start = search_start; + max_hole_size = hole_size; + } + + /* + * If this free space is greater than which we need, + * it must be the max free space that we have found + * until now, so max_hole_start must point to the start + * of this free space and the length of this free space + * is stored in max_hole_size. Thus, we return + * max_hole_start and max_hole_size and go back to the + * caller. + */ + if (hole_size >= num_bytes) { + ret = 0; + goto out; } } - if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) - goto next; - start_found = 1; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); - last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent); + extent_end = key.offset + btrfs_dev_extent_length(l, + dev_extent); + if (extent_end > search_start) + search_start = extent_end; next: path->slots[0]++; cond_resched(); } -check_pending: - /* we have to make sure we didn't find an extent that has already - * been allocated by the map tree or the original allocation - */ - BUG_ON(*start < search_start); - if (*start + num_bytes > search_end) { - ret = -ENOSPC; - goto error; + hole_size = search_end- search_start; + if (hole_size > max_hole_size) { + max_hole_start = search_start; + max_hole_size = hole_size; } - /* check for pending inserts here */ - ret = 0; -error: + /* See above. */ + if (hole_size < num_bytes) + ret = -ENOSPC; + else + ret = 0; + +out: btrfs_free_path(path); +error: + *start = max_hole_start; + if (len && max_hole_size > *len) + *len = max_hole_size; return ret; } -- cgit v1.2.2 From b2117a39fa96cf4814e7cab8c11494149ba6f29d Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:28 +0000 Subject: btrfs: make the chunk allocator utilize the devices better With this patch, we change the handling method when we can not get enough free extents with default size. Implementation: 1. Look up the suitable free extent on each device and keep the search result. If not find a suitable free extent, keep the max free extent 2. If we get enough suitable free extents with default size, chunk allocation succeeds. 3. If we can not get enough free extents, but the number of the extent with default size is >= min_stripes, we just change the mapping information (reduce the number of stripes in the extent map), and chunk allocation succeeds. 4. If the number of the extent with default size is < min_stripes, sort the devices by its max free extent's size descending 5. Use the size of the max free extent on the (num_stripes - 1)th device as the stripe size to allocate the device space By this way, the chunk allocator can allocate chunks as large as possible when the devices' space is not enough and make full use of the devices. Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 379 ++++++++++++++++++++++++++++++++++++++--------------- fs/btrfs/volumes.h | 24 ++++ 2 files changed, 300 insertions(+), 103 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 4838bd395e49..c22784b989b7 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -877,7 +877,7 @@ out: btrfs_free_path(path); error: *start = max_hole_start; - if (len && max_hole_size > *len) + if (len) *len = max_hole_size; return ret; } @@ -2176,70 +2176,67 @@ static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, return calc_size * num_stripes; } -static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct map_lookup **map_ret, - u64 *num_bytes, u64 *stripe_size, - u64 start, u64 type) +/* Used to sort the devices by max_avail(descending sort) */ +int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2) { - struct btrfs_fs_info *info = extent_root->fs_info; - struct btrfs_device *device = NULL; - struct btrfs_fs_devices *fs_devices = info->fs_devices; - struct list_head *cur; - struct map_lookup *map = NULL; - struct extent_map_tree *em_tree; - struct extent_map *em; - struct list_head private_devs; - int min_stripe_size = 1 * 1024 * 1024; - u64 calc_size = 1024 * 1024 * 1024; - u64 max_chunk_size = calc_size; - u64 min_free; - u64 avail; - u64 max_avail = 0; - u64 dev_offset; - int num_stripes = 1; - int min_stripes = 1; - int sub_stripes = 0; - int ncopies = 1; - int looped = 0; - int ret; - int index; - int stripe_len = 64 * 1024; + if (((struct btrfs_device_info *)dev_info1)->max_avail > + ((struct btrfs_device_info *)dev_info2)->max_avail) + return -1; + else if (((struct btrfs_device_info *)dev_info1)->max_avail < + ((struct btrfs_device_info *)dev_info2)->max_avail) + return 1; + else + return 0; +} - if ((type & BTRFS_BLOCK_GROUP_RAID1) && - (type & BTRFS_BLOCK_GROUP_DUP)) { - WARN_ON(1); - type &= ~BTRFS_BLOCK_GROUP_DUP; - } - if (list_empty(&fs_devices->alloc_list)) - return -ENOSPC; +static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, + int *num_stripes, int *min_stripes, + int *sub_stripes) +{ + *num_stripes = 1; + *min_stripes = 1; + *sub_stripes = 0; if (type & (BTRFS_BLOCK_GROUP_RAID0)) { - num_stripes = fs_devices->rw_devices; - min_stripes = 2; + *num_stripes = fs_devices->rw_devices; + *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_DUP)) { - num_stripes = 2; - min_stripes = 2; - ncopies = 2; + *num_stripes = 2; + *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID1)) { if (fs_devices->rw_devices < 2) return -ENOSPC; - num_stripes = 2; - min_stripes = 2; - ncopies = 2; + *num_stripes = 2; + *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID10)) { - num_stripes = fs_devices->rw_devices; - if (num_stripes < 4) + *num_stripes = fs_devices->rw_devices; + if (*num_stripes < 4) return -ENOSPC; - num_stripes &= ~(u32)1; - sub_stripes = 2; - ncopies = 2; - min_stripes = 4; + *num_stripes &= ~(u32)1; + *sub_stripes = 2; + *min_stripes = 4; } + return 0; +} + +static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, + u64 proposed_size, u64 type, + int num_stripes, int small_stripe) +{ + int min_stripe_size = 1 * 1024 * 1024; + u64 calc_size = proposed_size; + u64 max_chunk_size = calc_size; + int ncopies = 1; + + if (type & (BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_DUP | + BTRFS_BLOCK_GROUP_RAID10)) + ncopies = 2; + if (type & BTRFS_BLOCK_GROUP_DATA) { max_chunk_size = 10 * calc_size; min_stripe_size = 64 * 1024 * 1024; @@ -2256,51 +2253,209 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); -again: - max_avail = 0; - if (!map || map->num_stripes != num_stripes) { - kfree(map); - map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); - if (!map) - return -ENOMEM; - map->num_stripes = num_stripes; - } - if (calc_size * num_stripes > max_chunk_size * ncopies) { calc_size = max_chunk_size * ncopies; do_div(calc_size, num_stripes); - do_div(calc_size, stripe_len); - calc_size *= stripe_len; + do_div(calc_size, BTRFS_STRIPE_LEN); + calc_size *= BTRFS_STRIPE_LEN; } /* we don't want tiny stripes */ - if (!looped) + if (!small_stripe) calc_size = max_t(u64, min_stripe_size, calc_size); /* - * we're about to do_div by the stripe_len so lets make sure + * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure * we end up with something bigger than a stripe */ - calc_size = max_t(u64, calc_size, stripe_len * 4); + calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); + + do_div(calc_size, BTRFS_STRIPE_LEN); + calc_size *= BTRFS_STRIPE_LEN; + + return calc_size; +} + +static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map, + int num_stripes) +{ + struct map_lookup *new; + size_t len = map_lookup_size(num_stripes); + + BUG_ON(map->num_stripes < num_stripes); + + if (map->num_stripes == num_stripes) + return map; + + new = kmalloc(len, GFP_NOFS); + if (!new) { + /* just change map->num_stripes */ + map->num_stripes = num_stripes; + return map; + } + + memcpy(new, map, len); + new->num_stripes = num_stripes; + kfree(map); + return new; +} + +/* + * helper to allocate device space from btrfs_device_info, in which we stored + * max free space information of every device. It is used when we can not + * allocate chunks by default size. + * + * By this helper, we can allocate a new chunk as larger as possible. + */ +static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans, + struct btrfs_fs_devices *fs_devices, + struct btrfs_device_info *devices, + int nr_device, u64 type, + struct map_lookup **map_lookup, + int min_stripes, u64 *stripe_size) +{ + int i, index, sort_again = 0; + int min_devices = min_stripes; + u64 max_avail, min_free; + struct map_lookup *map = *map_lookup; + int ret; + + if (nr_device < min_stripes) + return -ENOSPC; + + btrfs_descending_sort_devices(devices, nr_device); + + max_avail = devices[0].max_avail; + if (!max_avail) + return -ENOSPC; + + for (i = 0; i < nr_device; i++) { + /* + * if dev_offset = 0, it means the free space of this device + * is less than what we need, and we didn't search max avail + * extent on this device, so do it now. + */ + if (!devices[i].dev_offset) { + ret = find_free_dev_extent(trans, devices[i].dev, + max_avail, + &devices[i].dev_offset, + &devices[i].max_avail); + if (ret != 0 && ret != -ENOSPC) + return ret; + sort_again = 1; + } + } + + /* we update the max avail free extent of each devices, sort again */ + if (sort_again) + btrfs_descending_sort_devices(devices, nr_device); + + if (type & BTRFS_BLOCK_GROUP_DUP) + min_devices = 1; + + if (!devices[min_devices - 1].max_avail) + return -ENOSPC; + + max_avail = devices[min_devices - 1].max_avail; + if (type & BTRFS_BLOCK_GROUP_DUP) + do_div(max_avail, 2); - do_div(calc_size, stripe_len); - calc_size *= stripe_len; + max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, + min_stripes, 1); + if (type & BTRFS_BLOCK_GROUP_DUP) + min_free = max_avail * 2; + else + min_free = max_avail; + + if (min_free > devices[min_devices - 1].max_avail) + return -ENOSPC; + + map = __shrink_map_lookup_stripes(map, min_stripes); + *stripe_size = max_avail; + + index = 0; + for (i = 0; i < min_stripes; i++) { + map->stripes[i].dev = devices[index].dev; + map->stripes[i].physical = devices[index].dev_offset; + if (type & BTRFS_BLOCK_GROUP_DUP) { + i++; + map->stripes[i].dev = devices[index].dev; + map->stripes[i].physical = devices[index].dev_offset + + max_avail; + } + index++; + } + *map_lookup = map; + + return 0; +} + +static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, + struct map_lookup **map_ret, + u64 *num_bytes, u64 *stripe_size, + u64 start, u64 type) +{ + struct btrfs_fs_info *info = extent_root->fs_info; + struct btrfs_device *device = NULL; + struct btrfs_fs_devices *fs_devices = info->fs_devices; + struct list_head *cur; + struct map_lookup *map; + struct extent_map_tree *em_tree; + struct extent_map *em; + struct btrfs_device_info *devices_info; + struct list_head private_devs; + u64 calc_size = 1024 * 1024 * 1024; + u64 min_free; + u64 avail; + u64 dev_offset; + int num_stripes; + int min_stripes; + int sub_stripes; + int min_devices; /* the min number of devices we need */ + int i; + int ret; + int index; + + if ((type & BTRFS_BLOCK_GROUP_RAID1) && + (type & BTRFS_BLOCK_GROUP_DUP)) { + WARN_ON(1); + type &= ~BTRFS_BLOCK_GROUP_DUP; + } + if (list_empty(&fs_devices->alloc_list)) + return -ENOSPC; + + ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes, + &min_stripes, &sub_stripes); + if (ret) + return ret; + + devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, + GFP_NOFS); + if (!devices_info) + return -ENOMEM; + + map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); + if (!map) { + ret = -ENOMEM; + goto error; + } + map->num_stripes = num_stripes; cur = fs_devices->alloc_list.next; index = 0; + i = 0; - if (type & BTRFS_BLOCK_GROUP_DUP) + calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, + num_stripes, 0); + + if (type & BTRFS_BLOCK_GROUP_DUP) { min_free = calc_size * 2; - else + min_devices = 1; + } else { min_free = calc_size; - - /* - * we add 1MB because we never use the first 1MB of the device, unless - * we've looped, then we are likely allocating the maximum amount of - * space left already - */ - if (!looped) - min_free += 1024 * 1024; + min_devices = min_stripes; + } INIT_LIST_HEAD(&private_devs); while (index < num_stripes) { @@ -2313,27 +2468,39 @@ again: cur = cur->next; if (device->in_fs_metadata && avail >= min_free) { - ret = find_free_dev_extent(trans, device, - min_free, &dev_offset, - &max_avail); + ret = find_free_dev_extent(trans, device, min_free, + &devices_info[i].dev_offset, + &devices_info[i].max_avail); if (ret == 0) { list_move_tail(&device->dev_alloc_list, &private_devs); map->stripes[index].dev = device; - map->stripes[index].physical = dev_offset; + map->stripes[index].physical = + devices_info[i].dev_offset; index++; if (type & BTRFS_BLOCK_GROUP_DUP) { map->stripes[index].dev = device; map->stripes[index].physical = - dev_offset + calc_size; + devices_info[i].dev_offset + + calc_size; index++; } - } - } else if (device->in_fs_metadata && avail > max_avail) - max_avail = avail; + } else if (ret != -ENOSPC) + goto error; + + devices_info[i].dev = device; + i++; + } else if (device->in_fs_metadata && + avail >= BTRFS_STRIPE_LEN) { + devices_info[i].dev = device; + devices_info[i].max_avail = avail; + i++; + } + if (cur == &fs_devices->alloc_list) break; } + list_splice(&private_devs, &fs_devices->alloc_list); if (index < num_stripes) { if (index >= min_stripes) { @@ -2342,36 +2509,36 @@ again: num_stripes /= sub_stripes; num_stripes *= sub_stripes; } - looped = 1; - goto again; - } - if (!looped && max_avail > 0) { - looped = 1; - calc_size = max_avail; - if (type & BTRFS_BLOCK_GROUP_DUP) - do_div(calc_size, 2); - goto again; + + map = __shrink_map_lookup_stripes(map, num_stripes); + } else if (i >= min_devices) { + ret = __btrfs_alloc_tiny_space(trans, fs_devices, + devices_info, i, type, + &map, min_stripes, + &calc_size); + if (ret) + goto error; + } else { + ret = -ENOSPC; + goto error; } - kfree(map); - return -ENOSPC; } map->sector_size = extent_root->sectorsize; - map->stripe_len = stripe_len; - map->io_align = stripe_len; - map->io_width = stripe_len; + map->stripe_len = BTRFS_STRIPE_LEN; + map->io_align = BTRFS_STRIPE_LEN; + map->io_width = BTRFS_STRIPE_LEN; map->type = type; - map->num_stripes = num_stripes; map->sub_stripes = sub_stripes; *map_ret = map; *stripe_size = calc_size; *num_bytes = chunk_bytes_by_type(type, calc_size, - num_stripes, sub_stripes); + map->num_stripes, sub_stripes); em = alloc_extent_map(GFP_NOFS); if (!em) { - kfree(map); - return -ENOMEM; + ret = -ENOMEM; + goto error; } em->bdev = (struct block_device *)map; em->start = start; @@ -2404,7 +2571,13 @@ again: index++; } + kfree(devices_info); return 0; + +error: + kfree(map); + kfree(devices_info); + return ret; } static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a668c0116982..a5cfedf393f9 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -20,8 +20,11 @@ #define __BTRFS_VOLUMES_ #include +#include #include "async-thread.h" +#define BTRFS_STRIPE_LEN (64 * 1024) + struct buffer_head; struct btrfs_pending_bios { struct bio *head; @@ -137,6 +140,27 @@ struct btrfs_multi_bio { struct btrfs_bio_stripe stripes[]; }; +struct btrfs_device_info { + struct btrfs_device *dev; + u64 dev_offset; + u64 max_avail; +}; + +/* Used to sort the devices by max_avail(descending sort) */ +int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); + +/* + * sort the devices by max_avail, in which max free extent size of each device + * is stored.(Descending Sort) + */ +static inline void btrfs_descending_sort_devices( + struct btrfs_device_info *devices, + size_t nr_devices) +{ + sort(devices, nr_devices, sizeof(struct btrfs_device_info), + btrfs_cmp_device_free_bytes, NULL); +} + #define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \ (sizeof(struct btrfs_bio_stripe) * (n))) -- cgit v1.2.2 From 6d07bcec969af335d4e35b3921131b7929bd634e Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:31 +0000 Subject: btrfs: fix wrong free space information of btrfs When we store data by raid profile in btrfs with two or more different size disks, df command shows there is some free space in the filesystem, but the user can not write any data in fact, df command shows the wrong free space information of btrfs. # mkfs.btrfs -d raid1 /dev/sda9 /dev/sda10 # btrfs-show Label: none uuid: a95cd49e-6e33-45b8-8741-a36153ce4b64 Total devices 2 FS bytes used 28.00KB devid 1 size 5.01GB used 2.03GB path /dev/sda9 devid 2 size 10.00GB used 2.01GB path /dev/sda10 # btrfs device scan /dev/sda9 /dev/sda10 # mount /dev/sda9 /mnt # dd if=/dev/zero of=tmpfile0 bs=4K count=9999999999 (fill the filesystem) # sync # df -TH Filesystem Type Size Used Avail Use% Mounted on /dev/sda9 btrfs 17G 8.6G 5.4G 62% /mnt # btrfs-show Label: none uuid: a95cd49e-6e33-45b8-8741-a36153ce4b64 Total devices 2 FS bytes used 3.99GB devid 1 size 5.01GB used 5.01GB path /dev/sda9 devid 2 size 10.00GB used 4.99GB path /dev/sda10 It is because btrfs cannot allocate chunks when one of the pairing disks has no space, the free space on the other disks can not be used for ever, and should be subtracted from the total space, but btrfs doesn't subtract this space from the total. It is strange to the user. This patch fixes it by calcing the free space that can be used to allocate chunks. Implementation: 1. get all the devices free space, and align them by stripe length. 2. sort the devices by the free space. 3. check the free space of the devices, 3.1. if it is not zero, and then check the number of the devices that has more free space than this device, if the number of the devices is beyond the min stripe number, the free space can be used, and add into total free space. if the number of the devices is below the min stripe number, we can not use the free space, the check ends. 3.2. if the free space is zero, check the next devices, goto 3.1 This implementation is just likely fake chunk allocation. After appling this patch, df can show correct space information: # df -TH Filesystem Type Size Used Avail Use% Mounted on /dev/sda9 btrfs 17G 8.6G 0 100% /mnt Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 + fs/btrfs/extent-tree.c | 58 +++++++++++++++++++- fs/btrfs/super.c | 146 +++++++++++++++++++++++++++++++++++++++++++++++-- fs/btrfs/volumes.c | 84 ++++++++++++++++++++++++++++ fs/btrfs/volumes.h | 3 + 5 files changed, 286 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0cb322cc4fc0..0995f4f68d7a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2158,6 +2158,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 group_start); u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); +u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); int btrfs_check_data_free_space(struct inode *inode, u64 bytes); @@ -2201,6 +2202,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, int btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache); void btrfs_put_block_group_cache(struct btrfs_fs_info *info); +u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1e1c9a177626..04bfc3a2bd9f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3090,7 +3090,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) return btrfs_reduce_alloc_profile(root, flags); } -static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) +u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) { u64 flags; @@ -8019,6 +8019,62 @@ out: return ret; } +/* + * helper to account the unused space of all the readonly block group in the + * list. takes mirrors into account. + */ +static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list) +{ + struct btrfs_block_group_cache *block_group; + u64 free_bytes = 0; + int factor; + + list_for_each_entry(block_group, groups_list, list) { + spin_lock(&block_group->lock); + + if (!block_group->ro) { + spin_unlock(&block_group->lock); + continue; + } + + if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_RAID10 | + BTRFS_BLOCK_GROUP_DUP)) + factor = 2; + else + factor = 1; + + free_bytes += (block_group->key.offset - + btrfs_block_group_used(&block_group->item)) * + factor; + + spin_unlock(&block_group->lock); + } + + return free_bytes; +} + +/* + * helper to account the unused space of all the readonly block group in the + * space_info. takes mirrors into account. + */ +u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) +{ + int i; + u64 free_bytes = 0; + + spin_lock(&sinfo->lock); + + for(i = 0; i < BTRFS_NR_RAID_TYPES; i++) + if (!list_empty(&sinfo->block_groups[i])) + free_bytes += __btrfs_get_ro_block_group_free_space( + &sinfo->block_groups[i]); + + spin_unlock(&sinfo->lock); + + return free_bytes; +} + int btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index caa5bcc62f16..2963376e77f4 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -777,6 +777,127 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) return 0; } +/* + * The helper to calc the free space on the devices that can be used to store + * file data. + */ +static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_device_info *devices_info; + struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; + struct btrfs_device *device; + u64 skip_space; + u64 type; + u64 avail_space; + u64 used_space; + u64 min_stripe_size; + int min_stripes = 1; + int i = 0, nr_devices; + int ret; + + nr_devices = fs_info->fs_devices->rw_devices; + BUG_ON(!nr_devices); + + devices_info = kmalloc(sizeof(*devices_info) * nr_devices, + GFP_NOFS); + if (!devices_info) + return -ENOMEM; + + /* calc min stripe number for data space alloction */ + type = btrfs_get_alloc_profile(root, 1); + if (type & BTRFS_BLOCK_GROUP_RAID0) + min_stripes = 2; + else if (type & BTRFS_BLOCK_GROUP_RAID1) + min_stripes = 2; + else if (type & BTRFS_BLOCK_GROUP_RAID10) + min_stripes = 4; + + if (type & BTRFS_BLOCK_GROUP_DUP) + min_stripe_size = 2 * BTRFS_STRIPE_LEN; + else + min_stripe_size = BTRFS_STRIPE_LEN; + + list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { + if (!device->in_fs_metadata) + continue; + + avail_space = device->total_bytes - device->bytes_used; + + /* align with stripe_len */ + do_div(avail_space, BTRFS_STRIPE_LEN); + avail_space *= BTRFS_STRIPE_LEN; + + /* + * In order to avoid overwritting the superblock on the drive, + * btrfs starts at an offset of at least 1MB when doing chunk + * allocation. + */ + skip_space = 1024 * 1024; + + /* user can set the offset in fs_info->alloc_start. */ + if (fs_info->alloc_start + BTRFS_STRIPE_LEN <= + device->total_bytes) + skip_space = max(fs_info->alloc_start, skip_space); + + /* + * btrfs can not use the free space in [0, skip_space - 1], + * we must subtract it from the total. In order to implement + * it, we account the used space in this range first. + */ + ret = btrfs_account_dev_extents_size(device, 0, skip_space - 1, + &used_space); + if (ret) { + kfree(devices_info); + return ret; + } + + /* calc the free space in [0, skip_space - 1] */ + skip_space -= used_space; + + /* + * we can use the free space in [0, skip_space - 1], subtract + * it from the total. + */ + if (avail_space && avail_space >= skip_space) + avail_space -= skip_space; + else + avail_space = 0; + + if (avail_space < min_stripe_size) + continue; + + devices_info[i].dev = device; + devices_info[i].max_avail = avail_space; + + i++; + } + + nr_devices = i; + + btrfs_descending_sort_devices(devices_info, nr_devices); + + i = nr_devices - 1; + avail_space = 0; + while (nr_devices >= min_stripes) { + if (devices_info[i].max_avail >= min_stripe_size) { + int j; + u64 alloc_size; + + avail_space += devices_info[i].max_avail * min_stripes; + alloc_size = devices_info[i].max_avail; + for (j = i + 1 - min_stripes; j <= i; j++) + devices_info[j].max_avail -= alloc_size; + } + i--; + nr_devices--; + } + + kfree(devices_info); + *free_bytes = avail_space; + return 0; +} + static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct btrfs_root *root = btrfs_sb(dentry->d_sb); @@ -784,16 +905,21 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) struct list_head *head = &root->fs_info->space_info; struct btrfs_space_info *found; u64 total_used = 0; - u64 total_used_data = 0; + u64 total_free_data = 0; int bits = dentry->d_sb->s_blocksize_bits; __be32 *fsid = (__be32 *)root->fs_info->fsid; + int ret; + /* holding chunk_muext to avoid allocating new chunks */ + mutex_lock(&root->fs_info->chunk_mutex); rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { - if (found->flags & BTRFS_BLOCK_GROUP_DATA) - total_used_data += found->disk_used; - else - total_used_data += found->disk_total; + if (found->flags & BTRFS_BLOCK_GROUP_DATA) { + total_free_data += found->disk_total - found->disk_used; + total_free_data -= + btrfs_account_ro_block_groups_free_space(found); + } + total_used += found->disk_used; } rcu_read_unlock(); @@ -801,9 +927,17 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_namelen = BTRFS_NAME_LEN; buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; buf->f_bfree = buf->f_blocks - (total_used >> bits); - buf->f_bavail = buf->f_blocks - (total_used_data >> bits); buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_type = BTRFS_SUPER_MAGIC; + buf->f_bavail = total_free_data; + ret = btrfs_calc_avail_data_space(root, &total_free_data); + if (ret) { + mutex_unlock(&root->fs_info->chunk_mutex); + return ret; + } + buf->f_bavail += total_free_data; + buf->f_bavail = buf->f_bavail >> bits; + mutex_unlock(&root->fs_info->chunk_mutex); /* We treat it as constant endianness (it doesn't matter _which_) because we want the fsid to come out the same whether mounted diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c22784b989b7..0c7f478cf645 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -728,6 +728,90 @@ error: return ret; } +/* helper to account the used device space in the range */ +int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, + u64 end, u64 *length) +{ + struct btrfs_key key; + struct btrfs_root *root = device->dev_root; + struct btrfs_dev_extent *dev_extent; + struct btrfs_path *path; + u64 extent_end; + int ret; + int slot; + struct extent_buffer *l; + + *length = 0; + + if (start >= device->total_bytes) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->reada = 2; + + key.objectid = device->devid; + key.offset = start; + key.type = BTRFS_DEV_EXTENT_KEY; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + if (ret > 0) { + ret = btrfs_previous_item(root, path, key.objectid, key.type); + if (ret < 0) + goto out; + } + + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid < device->devid) + goto next; + + if (key.objectid > device->devid) + break; + + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + goto next; + + dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); + extent_end = key.offset + btrfs_dev_extent_length(l, + dev_extent); + if (key.offset <= start && extent_end > end) { + *length = end - start + 1; + break; + } else if (key.offset <= start && extent_end > start) + *length += extent_end - start; + else if (key.offset > start && extent_end <= end) + *length += extent_end - key.offset; + else if (key.offset > start && key.offset <= end) { + *length += end - key.offset + 1; + break; + } else if (key.offset > end) + break; + +next: + path->slots[0]++; + } + ret = 0; +out: + btrfs_free_path(path); + return ret; +} + /* * find_free_dev_extent - find free space in the specified device * @trans: transaction handler diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a5cfedf393f9..7af6144a7954 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -161,6 +161,9 @@ static inline void btrfs_descending_sort_devices( btrfs_cmp_device_free_bytes, NULL); } +int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, + u64 end, u64 *length); + #define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \ (sizeof(struct btrfs_bio_stripe) * (n))) -- cgit v1.2.2 From 42838bb265b9cff3de9587fcacc398b5112dc2d9 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 6 Jan 2011 21:45:21 +0000 Subject: btrfs: Mem leak in btrfs_get_acl() It seems to me that we leak the memory allocated to 'value' in btrfs_get_acl() if the call to posix_acl_from_xattr() fails. Here's a patch that attempts to correct that problem. Signed-off-by: Jesper Juhl Signed-off-by: Chris Mason --- fs/btrfs/acl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 2222d161c7b6..6d1410e392d3 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -60,8 +60,10 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) size = __btrfs_getxattr(inode, name, value, size); if (size > 0) { acl = posix_acl_from_xattr(value, size); - if (IS_ERR(acl)) + if (IS_ERR(acl)) { + kfree(value); return acl; + } set_cached_acl(inode, type, acl); } kfree(value); -- cgit v1.2.2 From 20b450773d17e325190c158e10bfdb25dc21d2d6 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Sat, 8 Jan 2011 10:09:13 +0000 Subject: btrfs: mount failure return value fix I happened to pass swap partition as root partition in cmdline, then kernel panic and tell me about "Cannot open root device". It is not correct, in fact it is a fs type mismatch instead of 'no device'. Eventually I found btrfs mounting failed with -EIO, it should be -EINVAL. The logic in init/do_mounts.c: for (p = fs_names; *p; p += strlen(p)+1) { int err = do_mount_root(name, p, flags, root_mount_data); switch (err) { case 0: goto out; case -EACCES: flags |= MS_RDONLY; goto retry; case -EINVAL: continue; } print "Cannot open root device" panic } SO fs type after btrfs will have no chance to mount Here fix the return value as -EINVAL Signed-off-by: Dave Young Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 +++- fs/btrfs/volumes.c | 8 +++++--- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f88eb2ce7919..f9efb68fc2e3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1713,8 +1713,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info, BTRFS_ROOT_TREE_OBJECTID); bh = btrfs_read_dev_super(fs_devices->latest_bdev); - if (!bh) + if (!bh) { + err = -EINVAL; goto fail_iput; + } memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); memcpy(&fs_info->super_for_commit, &fs_info->super_copy, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0c7f478cf645..e8be478178aa 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -600,8 +600,10 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, set_blocksize(bdev, 4096); bh = btrfs_read_dev_super(bdev); - if (!bh) + if (!bh) { + ret = -EINVAL; goto error_close; + } disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); @@ -702,7 +704,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, goto error_close; bh = btrfs_read_dev_super(bdev); if (!bh) { - ret = -EIO; + ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; @@ -1302,7 +1304,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) set_blocksize(bdev, 4096); bh = btrfs_read_dev_super(bdev); if (!bh) { - ret = -EIO; + ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; -- cgit v1.2.2 From ff175d57f057f77d2d3031d674c2af9167a4af02 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sat, 25 Dec 2010 21:22:30 +0000 Subject: btrfs: Don't pass NULL ptr to func that may deref it. Hi, In fs/btrfs/inode.c::fixup_tree_root_location() we have this code: ... if (!path) { err = -ENOMEM; goto out; } ... out: btrfs_free_path(path); return err; btrfs_free_path() passes its argument on to other functions and some of them end up dereferencing the pointer. In the code above that pointer is clearly NULL, so btrfs_free_path() will eventually cause a NULL dereference. There are many ways to cut this cake (fix the bug). The one I chose was to make btrfs_free_path() deal gracefully with NULL pointers. If you disagree, feel free to come up with an alternative patch. Signed-off-by: Jesper Juhl Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 9ac171599258..99599f1c1554 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -105,6 +105,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, /* this also releases the path */ void btrfs_free_path(struct btrfs_path *p) { + if (!p) + return; btrfs_release_path(NULL, p); kmem_cache_free(btrfs_path_cachep, p); } -- cgit v1.2.2 From 91ca338d776e0cefb255bf2979b6448febd880f5 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Wed, 5 Jan 2011 02:32:22 +0000 Subject: btrfs: check NULL or not Should check if functions returns NULL or not. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 6 ++++++ fs/btrfs/disk-io.c | 8 ++++++++ fs/btrfs/extent_io.c | 2 ++ 3 files changed, 16 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 99599f1c1554..b5baff0dccfe 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2516,6 +2516,9 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_assert_tree_locked(path->nodes[1]); right = read_node_slot(root, upper, slot + 1); + if (right == NULL) + return 1; + btrfs_tree_lock(right); btrfs_set_lock_blocking(right); @@ -2766,6 +2769,9 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_assert_tree_locked(path->nodes[1]); left = read_node_slot(root, path->nodes[1], slot - 1); + if (left == NULL) + return 1; + btrfs_tree_lock(left); btrfs_set_lock_blocking(left); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f9efb68fc2e3..a0c37b2ee9ed 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -353,6 +353,10 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) WARN_ON(len == 0); eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + if (eb == NULL) { + WARN_ON(1); + goto out; + } ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, btrfs_header_generation(eb)); BUG_ON(ret); @@ -427,6 +431,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, WARN_ON(len == 0); eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + if (eb == NULL) { + ret = -EIO; + goto out; + } found_start = btrfs_header_bytenr(eb); if (found_start != start) { diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f1d198128959..8b8d3d99ae68 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3075,6 +3075,8 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, #endif eb = kmem_cache_zalloc(extent_buffer_cache, mask); + if (eb == NULL) + return NULL; eb->start = start; eb->len = len; spin_lock_init(&eb->lock); -- cgit v1.2.2 From 5e540f7715b8cd83b8e60beaaa525b125cc122de Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 27 Dec 2010 06:53:10 +0000 Subject: btrfs: Fix memory leak in btrfs_read_fs_root_no_radix() In btrfs_read_fs_root_no_radix(), 'root' is not freed if btrfs_search_slot() returns error. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a0c37b2ee9ed..9b1dd4138072 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1153,6 +1153,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, } btrfs_free_path(path); if (ret) { + kfree(root); if (ret > 0) ret = -ENOENT; return ERR_PTR(ret); -- cgit v1.2.2 From f690efb1aa2a961dd6655529c1797fcac60ad6d9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 12 Jan 2011 21:04:22 +0000 Subject: Btrfs: don't warn if we get ENOSPC in btrfs_block_rsv_check If we run low on space we could get a bunch of warnings out of btrfs_block_rsv_check, but this is mostly just called via the transaction code to see if we need to end the transaction, it expects to see failures, so let's not WARN and freak everybody out for no reason. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 04bfc3a2bd9f..055b837eab19 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3727,11 +3727,6 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, return 0; } - WARN_ON(1); - printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", - block_rsv->size, block_rsv->reserved, - block_rsv->freed[0], block_rsv->freed[1]); - return -ENOSPC; } -- cgit v1.2.2 From 6f88a4403def422bd8e276ddf6863d6ac71435d2 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 29 Dec 2010 14:55:03 +0000 Subject: btrfs: Require CAP_SYS_ADMIN for filesystem rebalance Filesystem rebalancing (BTRFS_IOC_BALANCE) affects the entire filesystem and may run uninterruptibly for a long time. This does not seem to be something that an unprivileged user should be able to do. Reported-by: Aron Xu Signed-off-by: Ben Hutchings Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e8be478178aa..f2d2f4ccc738 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "compat.h" #include "ctree.h" @@ -2024,6 +2025,9 @@ int btrfs_balance(struct btrfs_root *dev_root) if (dev_root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + mutex_lock(&dev_root->fs_info->volume_mutex); dev_root = dev_root->fs_info->dev_root; -- cgit v1.2.2 From f8b18087fd3277e424a24e13ce0edf30abe97ce0 Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Wed, 12 Jan 2011 10:30:42 +0100 Subject: fs/btrfs: Fix build of ctree Fix the build failure in some configurations: CC [M] fs/btrfs/ctree.o In file included from fs/btrfs/ctree.c:21:0: fs/btrfs/ctree.h:1003:17: error: field 'super_kobj' has incomplete type fs/btrfs/ctree.h:1074:17: error: field 'root_kobj' has incomplete type make[2]: *** [fs/btrfs/ctree.o] Error 1 make[1]: *** [fs/btrfs] Error 2 make: *** [fs] Error 2 caused by commit 57cc7215b708 ("headers: kobject.h redux") We need to include kobject.h here. Reported-by: Jeff Garzik Fix-suggested-by: Li Zefan Signed-off-by: Stefan Schmidt Signed-off-by: Linus Torvalds --- fs/btrfs/ctree.h | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a142d204b526..b875d445ea81 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "extent_io.h" #include "extent_map.h" -- cgit v1.2.2 From 64c23e86873ee410554d6d1c76b60da47025e96f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 14 Jan 2011 13:07:30 +0100 Subject: make the feature checks in ->fallocate future proof Instead of various home grown checks that might need updates for new flags just check for any bit outside the mask of the features supported by the filesystem. This makes the check future proof for any newly added flag. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a3798a3aa0d2..64daf2acd0d5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7116,7 +7116,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, alloc_end = (offset + len + mask) & ~mask; /* We only support the FALLOC_FL_KEEP_SIZE mode */ - if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + if (mode & ~FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; /* -- cgit v1.2.2 From 2fe17c1075836b66678ed2a305fd09b6773883aa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 14 Jan 2011 13:07:43 +0100 Subject: fallocate should be a file operation Currently all filesystems except XFS implement fallocate asynchronously, while XFS forced a commit. Both of these are suboptimal - in case of O_SYNC I/O we really want our allocation on disk, especially for the !KEEP_SIZE case where we actually grow the file with user-visible zeroes. On the other hand always commiting the transaction is a bad idea for fast-path uses of fallocate like for example in recent Samba versions. Given that block allocation is a data plane operation anyway change it from an inode operation to a file operation so that we have the file structure available that lets us check for O_SYNC. This also includes moving the code around for a few of the filesystems, and remove the already unnedded S_ISDIR checks given that we only wire up fallocate for regular files. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/btrfs/file.c | 113 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/inode.c | 111 ------------------------------------------------------ 2 files changed, 113 insertions(+), 111 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 66836d85763b..a9e0a4eaf3d9 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -1237,6 +1238,117 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) return 0; } +static long btrfs_fallocate(struct file *file, int mode, + loff_t offset, loff_t len) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct extent_state *cached_state = NULL; + u64 cur_offset; + u64 last_byte; + u64 alloc_start; + u64 alloc_end; + u64 alloc_hint = 0; + u64 locked_end; + u64 mask = BTRFS_I(inode)->root->sectorsize - 1; + struct extent_map *em; + int ret; + + alloc_start = offset & ~mask; + alloc_end = (offset + len + mask) & ~mask; + + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode & ~FALLOC_FL_KEEP_SIZE) + return -EOPNOTSUPP; + + /* + * wait for ordered IO before we have any locks. We'll loop again + * below with the locks held. + */ + btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); + + mutex_lock(&inode->i_mutex); + ret = inode_newsize_ok(inode, alloc_end); + if (ret) + goto out; + + if (alloc_start > inode->i_size) { + ret = btrfs_cont_expand(inode, alloc_start); + if (ret) + goto out; + } + + ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); + if (ret) + goto out; + + locked_end = alloc_end - 1; + while (1) { + struct btrfs_ordered_extent *ordered; + + /* the extent lock is ordered inside the running + * transaction + */ + lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, + locked_end, 0, &cached_state, GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, + alloc_end - 1); + if (ordered && + ordered->file_offset + ordered->len > alloc_start && + ordered->file_offset < alloc_end) { + btrfs_put_ordered_extent(ordered); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + alloc_start, locked_end, + &cached_state, GFP_NOFS); + /* + * we can't wait on the range with the transaction + * running or with the extent lock held + */ + btrfs_wait_ordered_range(inode, alloc_start, + alloc_end - alloc_start); + } else { + if (ordered) + btrfs_put_ordered_extent(ordered); + break; + } + } + + cur_offset = alloc_start; + while (1) { + em = btrfs_get_extent(inode, NULL, 0, cur_offset, + alloc_end - cur_offset, 0); + BUG_ON(IS_ERR(em) || !em); + last_byte = min(extent_map_end(em), alloc_end); + last_byte = (last_byte + mask) & ~mask; + if (em->block_start == EXTENT_MAP_HOLE || + (cur_offset >= inode->i_size && + !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { + ret = btrfs_prealloc_file_range(inode, mode, cur_offset, + last_byte - cur_offset, + 1 << inode->i_blkbits, + offset + len, + &alloc_hint); + if (ret < 0) { + free_extent_map(em); + break; + } + } + free_extent_map(em); + + cur_offset = last_byte; + if (cur_offset >= alloc_end) { + ret = 0; + break; + } + } + unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, + &cached_state, GFP_NOFS); + + btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); +out: + mutex_unlock(&inode->i_mutex); + return ret; +} + const struct file_operations btrfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -1248,6 +1360,7 @@ const struct file_operations btrfs_file_operations = { .open = generic_file_open, .release = btrfs_release_file, .fsync = btrfs_sync_file, + .fallocate = btrfs_fallocate, .unlocked_ioctl = btrfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = btrfs_ioctl, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 64daf2acd0d5..902afbf50811 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7098,116 +7098,6 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, min_size, actual_len, alloc_hint, trans); } -static long btrfs_fallocate(struct inode *inode, int mode, - loff_t offset, loff_t len) -{ - struct extent_state *cached_state = NULL; - u64 cur_offset; - u64 last_byte; - u64 alloc_start; - u64 alloc_end; - u64 alloc_hint = 0; - u64 locked_end; - u64 mask = BTRFS_I(inode)->root->sectorsize - 1; - struct extent_map *em; - int ret; - - alloc_start = offset & ~mask; - alloc_end = (offset + len + mask) & ~mask; - - /* We only support the FALLOC_FL_KEEP_SIZE mode */ - if (mode & ~FALLOC_FL_KEEP_SIZE) - return -EOPNOTSUPP; - - /* - * wait for ordered IO before we have any locks. We'll loop again - * below with the locks held. - */ - btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); - - mutex_lock(&inode->i_mutex); - ret = inode_newsize_ok(inode, alloc_end); - if (ret) - goto out; - - if (alloc_start > inode->i_size) { - ret = btrfs_cont_expand(inode, alloc_start); - if (ret) - goto out; - } - - ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); - if (ret) - goto out; - - locked_end = alloc_end - 1; - while (1) { - struct btrfs_ordered_extent *ordered; - - /* the extent lock is ordered inside the running - * transaction - */ - lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, - locked_end, 0, &cached_state, GFP_NOFS); - ordered = btrfs_lookup_first_ordered_extent(inode, - alloc_end - 1); - if (ordered && - ordered->file_offset + ordered->len > alloc_start && - ordered->file_offset < alloc_end) { - btrfs_put_ordered_extent(ordered); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - alloc_start, locked_end, - &cached_state, GFP_NOFS); - /* - * we can't wait on the range with the transaction - * running or with the extent lock held - */ - btrfs_wait_ordered_range(inode, alloc_start, - alloc_end - alloc_start); - } else { - if (ordered) - btrfs_put_ordered_extent(ordered); - break; - } - } - - cur_offset = alloc_start; - while (1) { - em = btrfs_get_extent(inode, NULL, 0, cur_offset, - alloc_end - cur_offset, 0); - BUG_ON(IS_ERR(em) || !em); - last_byte = min(extent_map_end(em), alloc_end); - last_byte = (last_byte + mask) & ~mask; - if (em->block_start == EXTENT_MAP_HOLE || - (cur_offset >= inode->i_size && - !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { - ret = btrfs_prealloc_file_range(inode, mode, cur_offset, - last_byte - cur_offset, - 1 << inode->i_blkbits, - offset + len, - &alloc_hint); - if (ret < 0) { - free_extent_map(em); - break; - } - } - free_extent_map(em); - - cur_offset = last_byte; - if (cur_offset >= alloc_end) { - ret = 0; - break; - } - } - unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, - &cached_state, GFP_NOFS); - - btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); -out: - mutex_unlock(&inode->i_mutex); - return ret; -} - static int btrfs_set_page_dirty(struct page *page) { return __set_page_dirty_nobuffers(page); @@ -7310,7 +7200,6 @@ static const struct inode_operations btrfs_file_inode_operations = { .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .permission = btrfs_permission, - .fallocate = btrfs_fallocate, .fiemap = btrfs_fiemap, }; static const struct inode_operations btrfs_special_inode_operations = { -- cgit v1.2.2 From acce952b0263825da32cf10489413dec78053347 Mon Sep 17 00:00:00 2001 From: liubo Date: Thu, 6 Jan 2011 19:30:25 +0800 Subject: Btrfs: forced readonly mounts on errors This patch comes from "Forced readonly mounts on errors" ideas. As we know, this is the first step in being more fault tolerant of disk corruptions instead of just using BUG() statements. The major content: - add a framework for generating errors that should result in filesystems going readonly. - keep FS state in disk super block. - make sure that all of resource will be freed and released at umount time. - make sure that fter FS is forced readonly on error, there will be no more disk change before FS is corrected. For this, we should stop write operation. After this patch is applied, the conversion from BUG() to such a framework can happen incrementally. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 24 +++ fs/btrfs/disk-io.c | 391 ++++++++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/disk-io.h | 1 + fs/btrfs/extent-tree.c | 11 ++ fs/btrfs/file.c | 11 ++ fs/btrfs/super.c | 84 +++++++++++ fs/btrfs/transaction.c | 3 + 7 files changed, 523 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0995f4f68d7a..72195378bef9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -295,6 +295,14 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes) #define BTRFS_FSID_SIZE 16 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) + +/* + * File system states + */ + +/* Errors detected */ +#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) + #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) @@ -1058,6 +1066,9 @@ struct btrfs_fs_info { unsigned metadata_ratio; void *bdev_holder; + + /* filesystem state */ + u64 fs_state; }; /* @@ -2203,6 +2214,11 @@ int btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache); void btrfs_put_block_group_cache(struct btrfs_fs_info *info); u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); +int btrfs_error_unpin_extent_range(struct btrfs_root *root, + u64 start, u64 end); +int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, + u64 num_bytes); + /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); @@ -2556,6 +2572,14 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); /* super.c */ int btrfs_parse_options(struct btrfs_root *root, char *options); int btrfs_sync_fs(struct super_block *sb, int wait); +void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno); + +#define btrfs_std_error(fs_info, errno) \ +do { \ + if ((errno)) \ + __btrfs_std_error((fs_info), __func__, __LINE__, (errno));\ +} while (0) /* acl.c */ #ifdef CONFIG_BTRFS_FS_POSIX_ACL diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9b1dd4138072..1a3af9e8e0c4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -44,6 +44,20 @@ static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); static void free_fs_root(struct btrfs_root *root); +static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, + int read_only); +static int btrfs_destroy_ordered_operations(struct btrfs_root *root); +static int btrfs_destroy_ordered_extents(struct btrfs_root *root); +static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, + struct btrfs_root *root); +static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); +static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root); +static int btrfs_destroy_marked_extents(struct btrfs_root *root, + struct extent_io_tree *dirty_pages, + int mark); +static int btrfs_destroy_pinned_extent(struct btrfs_root *root, + struct extent_io_tree *pinned_extents); +static int btrfs_cleanup_transaction(struct btrfs_root *root); /* * end_io_wq structs are used to do processing in task context when an IO is @@ -1738,6 +1752,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!btrfs_super_root(disk_super)) goto fail_iput; + /* check FS state, whether FS is broken. */ + fs_info->fs_state |= btrfs_super_flags(disk_super); + + btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); + ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; @@ -1968,7 +1987,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_set_opt(fs_info->mount_opt, SSD); } - if (btrfs_super_log_root(disk_super) != 0) { + /* do not make disk changes in broken FS */ + if (btrfs_super_log_root(disk_super) != 0 && + !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) { u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { @@ -2464,8 +2485,28 @@ int close_ctree(struct btrfs_root *root) smp_mb(); btrfs_put_block_group_cache(fs_info); + + /* + * Here come 2 situations when btrfs is broken to flip readonly: + * + * 1. when btrfs flips readonly somewhere else before + * btrfs_commit_super, sb->s_flags has MS_RDONLY flag, + * and btrfs will skip to write sb directly to keep + * ERROR state on disk. + * + * 2. when btrfs flips readonly just in btrfs_commit_super, + * and in such case, btrfs cannnot write sb via btrfs_commit_super, + * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, + * btrfs will cleanup all FS resources first and write sb then. + */ if (!(fs_info->sb->s_flags & MS_RDONLY)) { - ret = btrfs_commit_super(root); + ret = btrfs_commit_super(root); + if (ret) + printk(KERN_ERR "btrfs: commit super ret %d\n", ret); + } + + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + ret = btrfs_error_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } @@ -2641,6 +2682,352 @@ out: return 0; } +static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, + int read_only) +{ + if (read_only) + return; + + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) + printk(KERN_WARNING "warning: mount fs with errors, " + "running btrfsck is recommended\n"); +} + +int btrfs_error_commit_super(struct btrfs_root *root) +{ + int ret; + + mutex_lock(&root->fs_info->cleaner_mutex); + btrfs_run_delayed_iputs(root); + mutex_unlock(&root->fs_info->cleaner_mutex); + + down_write(&root->fs_info->cleanup_work_sem); + up_write(&root->fs_info->cleanup_work_sem); + + /* cleanup FS via transaction */ + btrfs_cleanup_transaction(root); + + ret = write_ctree_super(NULL, root, 0); + + return ret; +} + +static int btrfs_destroy_ordered_operations(struct btrfs_root *root) +{ + struct btrfs_inode *btrfs_inode; + struct list_head splice; + + INIT_LIST_HEAD(&splice); + + mutex_lock(&root->fs_info->ordered_operations_mutex); + spin_lock(&root->fs_info->ordered_extent_lock); + + list_splice_init(&root->fs_info->ordered_operations, &splice); + while (!list_empty(&splice)) { + btrfs_inode = list_entry(splice.next, struct btrfs_inode, + ordered_operations); + + list_del_init(&btrfs_inode->ordered_operations); + + btrfs_invalidate_inodes(btrfs_inode->root); + } + + spin_unlock(&root->fs_info->ordered_extent_lock); + mutex_unlock(&root->fs_info->ordered_operations_mutex); + + return 0; +} + +static int btrfs_destroy_ordered_extents(struct btrfs_root *root) +{ + struct list_head splice; + struct btrfs_ordered_extent *ordered; + struct inode *inode; + + INIT_LIST_HEAD(&splice); + + spin_lock(&root->fs_info->ordered_extent_lock); + + list_splice_init(&root->fs_info->ordered_extents, &splice); + while (!list_empty(&splice)) { + ordered = list_entry(splice.next, struct btrfs_ordered_extent, + root_extent_list); + + list_del_init(&ordered->root_extent_list); + atomic_inc(&ordered->refs); + + /* the inode may be getting freed (in sys_unlink path). */ + inode = igrab(ordered->inode); + + spin_unlock(&root->fs_info->ordered_extent_lock); + if (inode) + iput(inode); + + atomic_set(&ordered->refs, 1); + btrfs_put_ordered_extent(ordered); + + spin_lock(&root->fs_info->ordered_extent_lock); + } + + spin_unlock(&root->fs_info->ordered_extent_lock); + + return 0; +} + +static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, + struct btrfs_root *root) +{ + struct rb_node *node; + struct btrfs_delayed_ref_root *delayed_refs; + struct btrfs_delayed_ref_node *ref; + int ret = 0; + + delayed_refs = &trans->delayed_refs; + + spin_lock(&delayed_refs->lock); + if (delayed_refs->num_entries == 0) { + printk(KERN_INFO "delayed_refs has NO entry\n"); + return ret; + } + + node = rb_first(&delayed_refs->root); + while (node) { + ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); + node = rb_next(node); + + ref->in_tree = 0; + rb_erase(&ref->rb_node, &delayed_refs->root); + delayed_refs->num_entries--; + + atomic_set(&ref->refs, 1); + if (btrfs_delayed_ref_is_head(ref)) { + struct btrfs_delayed_ref_head *head; + + head = btrfs_delayed_node_to_head(ref); + mutex_lock(&head->mutex); + kfree(head->extent_op); + delayed_refs->num_heads--; + if (list_empty(&head->cluster)) + delayed_refs->num_heads_ready--; + list_del_init(&head->cluster); + mutex_unlock(&head->mutex); + } + + spin_unlock(&delayed_refs->lock); + btrfs_put_delayed_ref(ref); + + cond_resched(); + spin_lock(&delayed_refs->lock); + } + + spin_unlock(&delayed_refs->lock); + + return ret; +} + +static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t) +{ + struct btrfs_pending_snapshot *snapshot; + struct list_head splice; + + INIT_LIST_HEAD(&splice); + + list_splice_init(&t->pending_snapshots, &splice); + + while (!list_empty(&splice)) { + snapshot = list_entry(splice.next, + struct btrfs_pending_snapshot, + list); + + list_del_init(&snapshot->list); + + kfree(snapshot); + } + + return 0; +} + +static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) +{ + struct btrfs_inode *btrfs_inode; + struct list_head splice; + + INIT_LIST_HEAD(&splice); + + list_splice_init(&root->fs_info->delalloc_inodes, &splice); + + spin_lock(&root->fs_info->delalloc_lock); + + while (!list_empty(&splice)) { + btrfs_inode = list_entry(splice.next, struct btrfs_inode, + delalloc_inodes); + + list_del_init(&btrfs_inode->delalloc_inodes); + + btrfs_invalidate_inodes(btrfs_inode->root); + } + + spin_unlock(&root->fs_info->delalloc_lock); + + return 0; +} + +static int btrfs_destroy_marked_extents(struct btrfs_root *root, + struct extent_io_tree *dirty_pages, + int mark) +{ + int ret; + struct page *page; + struct inode *btree_inode = root->fs_info->btree_inode; + struct extent_buffer *eb; + u64 start = 0; + u64 end; + u64 offset; + unsigned long index; + + while (1) { + ret = find_first_extent_bit(dirty_pages, start, &start, &end, + mark); + if (ret) + break; + + clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); + while (start <= end) { + index = start >> PAGE_CACHE_SHIFT; + start = (u64)(index + 1) << PAGE_CACHE_SHIFT; + page = find_get_page(btree_inode->i_mapping, index); + if (!page) + continue; + offset = page_offset(page); + + spin_lock(&dirty_pages->buffer_lock); + eb = radix_tree_lookup( + &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, + offset >> PAGE_CACHE_SHIFT); + spin_unlock(&dirty_pages->buffer_lock); + if (eb) { + ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, + &eb->bflags); + atomic_set(&eb->refs, 1); + } + if (PageWriteback(page)) + end_page_writeback(page); + + lock_page(page); + if (PageDirty(page)) { + clear_page_dirty_for_io(page); + spin_lock_irq(&page->mapping->tree_lock); + radix_tree_tag_clear(&page->mapping->page_tree, + page_index(page), + PAGECACHE_TAG_DIRTY); + spin_unlock_irq(&page->mapping->tree_lock); + } + + page->mapping->a_ops->invalidatepage(page, 0); + unlock_page(page); + } + } + + return ret; +} + +static int btrfs_destroy_pinned_extent(struct btrfs_root *root, + struct extent_io_tree *pinned_extents) +{ + struct extent_io_tree *unpin; + u64 start; + u64 end; + int ret; + + unpin = pinned_extents; + while (1) { + ret = find_first_extent_bit(unpin, 0, &start, &end, + EXTENT_DIRTY); + if (ret) + break; + + /* opt_discard */ + ret = btrfs_error_discard_extent(root, start, end + 1 - start); + + clear_extent_dirty(unpin, start, end, GFP_NOFS); + btrfs_error_unpin_extent_range(root, start, end); + cond_resched(); + } + + return 0; +} + +static int btrfs_cleanup_transaction(struct btrfs_root *root) +{ + struct btrfs_transaction *t; + LIST_HEAD(list); + + WARN_ON(1); + + mutex_lock(&root->fs_info->trans_mutex); + mutex_lock(&root->fs_info->transaction_kthread_mutex); + + list_splice_init(&root->fs_info->trans_list, &list); + while (!list_empty(&list)) { + t = list_entry(list.next, struct btrfs_transaction, list); + if (!t) + break; + + btrfs_destroy_ordered_operations(root); + + btrfs_destroy_ordered_extents(root); + + btrfs_destroy_delayed_refs(t, root); + + btrfs_block_rsv_release(root, + &root->fs_info->trans_block_rsv, + t->dirty_pages.dirty_bytes); + + /* FIXME: cleanup wait for commit */ + t->in_commit = 1; + t->blocked = 1; + if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) + wake_up(&root->fs_info->transaction_blocked_wait); + + t->blocked = 0; + if (waitqueue_active(&root->fs_info->transaction_wait)) + wake_up(&root->fs_info->transaction_wait); + mutex_unlock(&root->fs_info->trans_mutex); + + mutex_lock(&root->fs_info->trans_mutex); + t->commit_done = 1; + if (waitqueue_active(&t->commit_wait)) + wake_up(&t->commit_wait); + mutex_unlock(&root->fs_info->trans_mutex); + + mutex_lock(&root->fs_info->trans_mutex); + + btrfs_destroy_pending_snapshots(t); + + btrfs_destroy_delalloc_inodes(root); + + spin_lock(&root->fs_info->new_trans_lock); + root->fs_info->running_transaction = NULL; + spin_unlock(&root->fs_info->new_trans_lock); + + btrfs_destroy_marked_extents(root, &t->dirty_pages, + EXTENT_DIRTY); + + btrfs_destroy_pinned_extent(root, + root->fs_info->pinned_extents); + + t->use_count = 0; + list_del_init(&t->list); + memset(t, 0, sizeof(*t)); + kmem_cache_free(btrfs_transaction_cachep, t); + } + + mutex_unlock(&root->fs_info->transaction_kthread_mutex); + mutex_unlock(&root->fs_info->trans_mutex); + + return 0; +} + static struct extent_io_ops btree_extent_io_ops = { .write_cache_pages_lock_hook = btree_lock_page_hook, .readpage_end_io_hook = btree_readpage_end_io_hook, diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 88e825a0bf21..07b20dc2fd95 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -52,6 +52,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root *root, int max_mirrors); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); int btrfs_commit_super(struct btrfs_root *root); +int btrfs_error_commit_super(struct btrfs_root *root); struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 055b837eab19..bcf303204f7f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8642,3 +8642,14 @@ out: btrfs_free_path(path); return ret; } + +int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) +{ + return unpin_extent_range(root, start, end); +} + +int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, + u64 num_bytes) +{ + return btrfs_discard_extent(root, bytenr, num_bytes); +} diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 05df688c96f4..f903433f5bdf 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -892,6 +892,17 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, if (err) goto out; + /* + * If BTRFS flips readonly due to some impossible error + * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), + * although we have opened a file as writable, we have + * to stop this write operation to ensure FS consistency. + */ + if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + err = -EROFS; + goto out; + } + file_update_time(file); BTRFS_I(inode)->sequence++; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2963376e77f4..52e903b0a293 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -54,6 +54,90 @@ static const struct super_operations btrfs_super_ops; +static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno, + char nbuf[16]) +{ + char *errstr = NULL; + + switch (errno) { + case -EIO: + errstr = "IO failure"; + break; + case -ENOMEM: + errstr = "Out of memory"; + break; + case -EROFS: + errstr = "Readonly filesystem"; + break; + default: + if (nbuf) { + if (snprintf(nbuf, 16, "error %d", -errno) >= 0) + errstr = nbuf; + } + break; + } + + return errstr; +} + +static void __save_error_info(struct btrfs_fs_info *fs_info) +{ + /* + * today we only save the error info into ram. Long term we'll + * also send it down to the disk + */ + fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR; +} + +/* NOTE: + * We move write_super stuff at umount in order to avoid deadlock + * for umount hold all lock. + */ +static void save_error_info(struct btrfs_fs_info *fs_info) +{ + __save_error_info(fs_info); +} + +/* btrfs handle error by forcing the filesystem readonly */ +static void btrfs_handle_error(struct btrfs_fs_info *fs_info) +{ + struct super_block *sb = fs_info->sb; + + if (sb->s_flags & MS_RDONLY) + return; + + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + sb->s_flags |= MS_RDONLY; + printk(KERN_INFO "btrfs is forced readonly\n"); + } +} + +/* + * __btrfs_std_error decodes expected errors from the caller and + * invokes the approciate error response. + */ +void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno) +{ + struct super_block *sb = fs_info->sb; + char nbuf[16]; + const char *errstr; + + /* + * Special case: if the error is EROFS, and we're already + * under MS_RDONLY, then it is safe here. + */ + if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) + return; + + errstr = btrfs_decode_error(fs_info, errno, nbuf); + printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n", + sb->s_id, function, line, errstr); + save_error_info(fs_info); + + btrfs_handle_error(fs_info); +} + static void btrfs_put_super(struct super_block *sb) { struct btrfs_root *root = btrfs_sb(sb); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 29e30d832ec9..bae5c7b8bbe2 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -181,6 +181,9 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, struct btrfs_trans_handle *h; struct btrfs_transaction *cur_trans; int ret; + + if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) + return ERR_PTR(-EROFS); again: h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); if (!h) -- cgit v1.2.2 From 8eb2d829ffea3677c21bd038f19e5d8ca6b43e36 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:48:01 +0800 Subject: btrfs: Fix threshold calculation for block groups smaller than 1GB If a block group is smaller than 1GB, the extent entry threadhold calculation will always set the threshold to 0. So as free space gets fragmented, btrfs will switch to use bitmap to manage free space, but then will never switch back to extents due to this bug. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 60d684266959..42f4015988ec 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1016,14 +1016,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; + u64 size = block_group->key.offset; /* * The goal is to keep the total amount of memory used per 1gb of space * at or below 32k, so we need to adjust how much memory we allow to be * used by extent based free space tracking */ - max_bytes = MAX_CACHE_BYTES_PER_GIG * - (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); + if (size < 1024 * 1024 * 1024) + max_bytes = MAX_CACHE_BYTES_PER_GIG; + else + max_bytes = MAX_CACHE_BYTES_PER_GIG * + div64_u64(size, 1024 * 1024 * 1024); /* * we want to account for 1 more bitmap than what we have so we can make -- cgit v1.2.2 From edf6e2d1ddbac7f326b34a27adbca71ece53ccce Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:50:07 +0800 Subject: btrfs: Add helper function free_bitmap() Remove some duplicated code. This prepares for the next patch. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 42f4015988ec..850104f05178 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1175,6 +1175,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, recalculate_thresholds(block_group); } +static void free_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *bitmap_info) +{ + unlink_free_space(block_group, bitmap_info); + kfree(bitmap_info->bitmap); + kfree(bitmap_info); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); +} + static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) @@ -1215,13 +1225,8 @@ again: if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); - if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + if (!bitmap_info->bytes) + free_bitmap(block_group, bitmap_info); /* * no entry after this bitmap, but we still have bytes to @@ -1254,13 +1259,8 @@ again: return -EAGAIN; goto again; - } else if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + } else if (!bitmap_info->bytes) + free_bitmap(block_group, bitmap_info); return 0; } @@ -1689,13 +1689,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ret = offset; if (entry->bitmap) { bitmap_clear_bits(block_group, entry, offset, bytes); - if (!entry->bytes) { - unlink_free_space(block_group, entry); - kfree(entry->bitmap); - kfree(entry); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + if (!entry->bytes) + free_bitmap(block_group, entry); } else { unlink_free_space(block_group, entry); entry->offset += bytes; -- cgit v1.2.2 From 70b7da304f9f9bbf1566085155895e32e775a745 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:51:45 +0800 Subject: btrfs: Free fully occupied bitmap in cluster If there's no more free space in a bitmap, we should free it. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 850104f05178..cb0137e4047f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1788,6 +1788,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, ret = search_start; bitmap_clear_bits(block_group, entry, ret, bytes); + if (entry->bytes == 0) + free_bitmap(block_group, entry); out: spin_unlock(&cluster->lock); spin_unlock(&block_group->tree_lock); -- cgit v1.2.2 From 5e71b5d5ec07e4b3fb4c78c4e4b108ff667f123f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:55:34 +0800 Subject: btrfs: Update stats when allocating from a cluster When allocating extent entry from a cluster, we should update the free_space and free_extents fields of the block group. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index cb0137e4047f..2974c4744d5c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1843,15 +1843,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, entry->offset += bytes; entry->bytes -= bytes; - if (entry->bytes == 0) { + if (entry->bytes == 0) rb_erase(&entry->offset_index, &cluster->root); - kfree(entry); - } break; } out: spin_unlock(&cluster->lock); + if (!ret) + return 0; + + spin_lock(&block_group->tree_lock); + + block_group->free_space -= bytes; + if (entry->bytes == 0) { + block_group->free_extents--; + kfree(entry); + } + + spin_unlock(&block_group->tree_lock); + return ret; } -- cgit v1.2.2 From 120d66eec0dcb966fbd03f743598b2ff2513436b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:56:50 +0800 Subject: btrfs: Add a helper try_merge_free_space() When adding a new extent, we'll firstly see if we can merge this extent to the left or/and right extent. Extract this as a helper try_merge_free_space(). As a side effect, we fix a small bug that if the new extent has non-bitmap left entry but is unmergeble, we'll directly link the extent without trying to drop it into bitmap. This also prepares for the next patch. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 75 ++++++++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 32 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2974c4744d5c..cf67dc3b7bf8 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1363,22 +1363,14 @@ out: return ret; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +bool try_merge_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) { - struct btrfs_free_space *right_info = NULL; - struct btrfs_free_space *left_info = NULL; - struct btrfs_free_space *info = NULL; - int ret = 0; - - info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); - if (!info) - return -ENOMEM; - - info->offset = offset; - info->bytes = bytes; - - spin_lock(&block_group->tree_lock); + struct btrfs_free_space *left_info; + struct btrfs_free_space *right_info; + bool merged = false; + u64 offset = info->offset; + u64 bytes = info->bytes; /* * first we want to see if there is free space adjacent to the range we @@ -1392,27 +1384,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, else left_info = tree_search_offset(block_group, offset - 1, 0, 0); - /* - * If there was no extent directly to the left or right of this new - * extent then we know we're going to have to allocate a new extent, so - * before we do that see if we need to drop this into a bitmap - */ - if ((!left_info || left_info->bitmap) && - (!right_info || right_info->bitmap)) { - ret = insert_into_bitmap(block_group, info); - - if (ret < 0) { - goto out; - } else if (ret) { - ret = 0; - goto out; - } - } - if (right_info && !right_info->bitmap) { unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); + merged = true; } if (left_info && !left_info->bitmap && @@ -1421,8 +1397,43 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, info->offset = left_info->offset; info->bytes += left_info->bytes; kfree(left_info); + merged = true; } + return merged; +} + +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + struct btrfs_free_space *info; + int ret = 0; + + info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + if (!info) + return -ENOMEM; + + info->offset = offset; + info->bytes = bytes; + + spin_lock(&block_group->tree_lock); + + if (try_merge_free_space(block_group, info)) + goto link; + + /* + * There was no extent directly to the left or right of this new + * extent then we know we're going to have to allocate a new extent, so + * before we do that see if we need to drop this into a bitmap + */ + ret = insert_into_bitmap(block_group, info); + if (ret < 0) { + goto out; + } else if (ret) { + ret = 0; + goto out; + } +link: ret = link_free_space(block_group, info); if (ret) kfree(info); -- cgit v1.2.2 From f333adb5d64bc1c4d6099072fc341c3c8f84e0cf Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:57:39 +0800 Subject: btrfs: Check mergeable free space when removing a cluster After returing extents from a cluster to the block group, some extents in the block group may be mergeable. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index cf67dc3b7bf8..a5501edc3c9f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, return entry; } -static void unlink_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) +static inline void +__unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) { rb_erase(&info->offset_index, &block_group->free_space_offset); block_group->free_extents--; +} + +static void unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) +{ + __unlink_free_space(block_group, info); block_group->free_space -= info->bytes; } @@ -1364,7 +1371,7 @@ out: } bool try_merge_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) + struct btrfs_free_space *info, bool update_stat) { struct btrfs_free_space *left_info; struct btrfs_free_space *right_info; @@ -1385,7 +1392,10 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, left_info = tree_search_offset(block_group, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { - unlink_free_space(block_group, right_info); + if (update_stat) + unlink_free_space(block_group, right_info); + else + __unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); merged = true; @@ -1393,7 +1403,10 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { - unlink_free_space(block_group, left_info); + if (update_stat) + unlink_free_space(block_group, left_info); + else + __unlink_free_space(block_group, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kfree(left_info); @@ -1418,7 +1431,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, spin_lock(&block_group->tree_lock); - if (try_merge_free_space(block_group, info)) + if (try_merge_free_space(block_group, info, true)) goto link; /* @@ -1636,6 +1649,7 @@ __btrfs_return_cluster_to_free_space( node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); BUG_ON(entry->bitmap); + try_merge_free_space(block_group, entry, false); tree_insert_offset(&block_group->free_space_offset, entry->offset, &entry->offset_index, 0); } -- cgit v1.2.2 From 83a4d54840c88a4a45c49670f044b8c7ddeaa8c7 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 27 Dec 2010 16:19:53 +0800 Subject: Btrfs: Fix memory leak at umount fs_info, which is allocated in open_ctree(), should be freed in close_ctree(). Signed-off-by: Li Zefan --- fs/btrfs/disk-io.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a5d2249e6da5..089871e5cd5a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2513,6 +2513,8 @@ int close_ctree(struct btrfs_root *root) kfree(fs_info->chunk_root); kfree(fs_info->dev_root); kfree(fs_info->csum_root); + kfree(fs_info); + return 0; } -- cgit v1.2.2 From bdc924bb4cdac92b945945c3149ab8191c92d75d Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Mon, 27 Dec 2010 16:33:15 +0800 Subject: Btrfs: Fix memory leak on finding existing super We missed a memory deallocation in commit 450ba0ea. If an existing super block is found at mount and there is no error condition then the pre-allocated tree_root and fs_info are no not used and are not freeded. Signed-off-by: Ian Kent Signed-off-by: Li Zefan --- fs/btrfs/super.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 61bd79abb805..f50253c2279d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -654,6 +654,8 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, } btrfs_close_devices(fs_devices); + kfree(fs_info); + kfree(tree_root); } else { char b[BDEVNAME_SIZE]; -- cgit v1.2.2 From 3f3d0bc0df041236fad4ffa82188a6e4ef9af75e Mon Sep 17 00:00:00 2001 From: Tero Roponen Date: Mon, 27 Dec 2010 16:43:13 +0800 Subject: Btrfs: Free correct pointer after using strsep We must save and free the original kstrdup()'ed pointer because strsep() modifies its first argument. Signed-off-by: Tero Roponen Signed-off-by: Li Zefan --- fs/btrfs/super.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f50253c2279d..78ee681465af 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -277,7 +277,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; - char *opts, *p; + char *opts, *orig, *p; int error = 0; int intarg; @@ -291,6 +291,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, opts = kstrdup(options, GFP_KERNEL); if (!opts) return -ENOMEM; + orig = opts; while ((p = strsep(&opts, ",")) != NULL) { int token; @@ -326,7 +327,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, } out_free_opts: - kfree(opts); + kfree(orig); out: /* * If no subvolume name is specified we use the default one. Allocate -- cgit v1.2.2 From d0f69686c2ae775529aadc7a8acc6f13ad41de66 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Tue, 25 Jan 2011 15:46:17 +0800 Subject: Btrfs: Don't return acl info when mounting with noacl option Steps to reproduce: # mkfs.btrfs /dev/sda2 # mount /dev/sda2 /mnt # touch /mnt/file0 # setfacl -m 'u:root:x,g::x,o::x' /mnt/file0 # umount /mnt # mount /dev/sda2 -o noacl /mnt # getfacl /mnt/file0 ... user::rw- user:root:--x group::--x mask::--x other::--x The output should be: user::rw- group::--x other::--x Signed-off-by: Miao Xie Signed-off-by: Li Zefan --- fs/btrfs/acl.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 2222d161c7b6..3c52fc8afe29 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) char *value = NULL; struct posix_acl *acl; + if (!IS_POSIXACL(inode)) + return NULL; + acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; @@ -82,6 +85,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, struct posix_acl *acl; int ret = 0; + if (!IS_POSIXACL(dentry->d_inode)) + return -EOPNOTSUPP; + acl = btrfs_get_acl(dentry->d_inode, type); if (IS_ERR(acl)) -- cgit v1.2.2 From b897abec032deb7cc3ce67392a1f544ac965ddea Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 26 Jan 2011 16:19:22 +0800 Subject: Btrfs: Fix memory leak in writepage fixup work fixup, which is allocated when starting page write to fix up the extent without ORDERED bit set, should be freed after this work is done. Signed-off-by: Miao Xie Signed-off-by: Li Zefan --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f9194438f7c..3a6edc4c5642 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1544,6 +1544,7 @@ out: out_page: unlock_page(page); page_cache_release(page); + kfree(fixup); } /* -- cgit v1.2.2 From 4d728ec7aefdca5419d2ebfb28c147e81a4b59f4 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 26 Jan 2011 14:10:43 +0800 Subject: Btrfs: Fix file clone when source offset is not 0 Suppose: - the source extent is: [0, 100] - the src offset is 10 - the clone length is 90 - the dest offset is 0 This statement: new_key.offset = key.offset + destoff - off will produce such an extent for the dest file: [ino, BTRFS_EXTENT_DATA_KEY, -10] , which is obviously wrong. Signed-off-by: Li Zefan --- fs/btrfs/ioctl.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f87552a1d7ea..1b61dab64062 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1788,7 +1788,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, memcpy(&new_key, &key, sizeof(new_key)); new_key.objectid = inode->i_ino; - new_key.offset = key.offset + destoff - off; + if (off <= key.offset) + new_key.offset = key.offset + destoff - off; + else + new_key.offset = destoff; trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { -- cgit v1.2.2 From 6b82ce8d824bd46053e46a895876cde39d9026e4 Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 26 Jan 2011 06:21:39 +0000 Subject: btrfs: fix uncheck memory allocation in btrfs_submit_compressed_read btrfs_submit_compressed_read() is lack of memory allocation checks and corresponding error route. After this fix, if it comes to "no memory" case, errno will be returned to userland step by step, and tell users this operation cannot go on. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 25 +++++++++++++++++++++++-- fs/btrfs/extent_io.c | 4 ++-- 2 files changed, 25 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f745287fbf2e..3a932f183da1 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, u64 em_len; u64 em_start; struct extent_map *em; - int ret; + int ret = -ENOMEM; u32 *sums; tree = &BTRFS_I(inode)->io_tree; @@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, compressed_len = em->block_len; cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); + if (!cb) + goto out; + atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; @@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; - cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, + cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + if (!cb->compressed_pages) + goto fail1; + bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; for (page_index = 0; page_index < nr_pages; page_index++) { cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (!cb->compressed_pages[page_index]) + goto fail2; } cb->nr_pages = nr_pages; @@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = uncompressed_len; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); + if (!comp_bio) + goto fail2; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); @@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bio_put(comp_bio); return 0; + +fail2: + for (page_index = 0; page_index < nr_pages; page_index++) + free_page((unsigned long)cb->compressed_pages[page_index]); + + kfree(cb->compressed_pages); +fail1: + kfree(cb); +out: + free_extent_map(em); + return ret; } static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8b8d3d99ae68..6411ed6ca449 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, bio_get(bio); if (tree->ops && tree->ops->submit_bio_hook) - tree->ops->submit_bio_hook(page->mapping->host, rw, bio, + ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, mirror_num, bio_flags, start); else submit_bio(rw, bio); @@ -2126,7 +2126,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, &bio_flags); if (bio) - submit_one_bio(READ, bio, 0, bio_flags); + ret = submit_one_bio(READ, bio, 0, bio_flags); return ret; } -- cgit v1.2.2 From 2a29edc6b60a5248ccab588e7ba7dad38cef0235 Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 26 Jan 2011 06:22:08 +0000 Subject: btrfs: fix several uncheck memory allocations To make btrfs more stable, add several missing necessary memory allocation checks, and when no memory, return proper errno. We've checked that some of those -ENOMEM errors will be returned to userspace, and some will be catched by BUG_ON() in the upper callers, and none will be ignored silently. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/export.c | 2 ++ fs/btrfs/file-item.c | 2 ++ fs/btrfs/file.c | 4 ++++ fs/btrfs/tree-log.c | 25 +++++++++++++++++++++++++ 4 files changed, 33 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 6f0444473594..3220ad1aafc8 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -176,6 +176,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child) int ret; path = btrfs_alloc_path(); + if (!path) + return ERR_PTR(-ENOMEM); if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a562a250ae77..d0bc72657cd7 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, root = root->fs_info->csum_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f903433f5bdf..65b2424a4116 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -945,6 +945,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / (sizeof(struct page *))); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto out; + } /* generic_write_checks can change our pos */ start_pos = pos; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 054744ac5719..c25a41d86118 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); + if (!dst_copy || !src_copy) { + btrfs_release_path(root, path); + kfree(dst_copy); + kfree(src_copy); + return -ENOMEM; + } read_extent_buffer(eb, src_copy, src_ptr, item_size); @@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, btrfs_dir_item_key_to_cpu(leaf, di, &location); name_len = btrfs_dir_name_len(leaf, di); name = kmalloc(name_len, GFP_NOFS); + if (!name) + return -ENOMEM; + read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); btrfs_release_path(root, path); @@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log, int match = 0; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + ret = btrfs_search_slot(NULL, log, key, path, 0, 0); if (ret != 0) goto out; @@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, key.offset = (u64)-1; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); + if (!name) + return -ENOMEM; + log_type = btrfs_dir_type(eb, di); read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len); @@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, root_owner = btrfs_header_owner(parent); next = btrfs_find_create_tree_block(root, bytenr, blocksize); + if (!next) + return -ENOMEM; if (*level == 1) { wc->process_func(root, next, wc, ptr_gen); @@ -2194,6 +2213,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, name, name_len, -1); if (IS_ERR(di)) { @@ -2594,6 +2616,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); + if (!ins_data) + return -ENOMEM; + ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); -- cgit v1.2.2 From 333e8105445d4f51101fc3d23199a919d66730b3 Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 26 Jan 2011 06:22:33 +0000 Subject: btrfs: fix missing break in switch phrase There is a missing break in switch, fix it. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/print-tree.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 0d126be22b63..fb2605d998e9 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) #else BUG(); #endif + break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, struct btrfs_block_group_item); -- cgit v1.2.2 From 34d19bada00f4825588b338a8ee193820f9ceeb0 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 24 Jan 2011 19:55:19 +0000 Subject: fs/btrfs/inode.c: Add missing IS_ERR test After the conditional that precedes the following code, inode may be an ERR_PTR value. This can eg result from a memory allocation failure via the call to btrfs_iget, and thus does not imply that root is different than sub_root. Thus, an IS_ERR check is added to ensure that there is no dereference of inode in this case. The semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @r@ identifier f; @@ f(...) { ... return ERR_PTR(...); } @@ identifier r.f, fld; expression x; statement S1,S2; @@ x = f(...) ... when != IS_ERR(x) ( if (IS_ERR(x) ||...) S1 else S2 | *x->fld ) // Signed-off-by: Julia Lawall Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2c9a2f7d5631..2b7d251d6ad1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4137,7 +4137,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) } srcu_read_unlock(&root->fs_info->subvol_srcu, index); - if (root != sub_root) { + if (!IS_ERR(inode) && root != sub_root) { down_read(&root->fs_info->cleanup_work_sem); if (!(inode->i_sb->s_flags & MS_RDONLY)) btrfs_orphan_cleanup(sub_root); -- cgit v1.2.2 From 3612b49598c303cfb22a4b609427f829828e2427 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Tue, 25 Jan 2011 02:51:38 +0000 Subject: btrfs: fix return value check of btrfs_join_transaction() The error check of btrfs_join_transaction()/btrfs_join_transaction_nolock() is added, and the mistake of the error check in several places is corrected. For more stable Btrfs, I think that we should reduce BUG_ON(). But, I think that long time is necessary for this. So, I propose this patch as a short-term solution. With this patch: - To more stable Btrfs, the part that should be corrected is clarified. - The panic isn't done by the NULL pointer reference etc. (even if BUG_ON() is increased temporarily) - The error code is returned in the place where the error can be easily returned. As a long-term plan: - BUG_ON() is reduced by using the forced-readonly framework, etc. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 5 +++++ fs/btrfs/extent-tree.c | 2 +- fs/btrfs/inode.c | 24 ++++++++++++++++-------- fs/btrfs/ioctl.c | 2 +- fs/btrfs/relocation.c | 26 +++++++++++++++++++++++--- fs/btrfs/transaction.c | 5 +++++ 6 files changed, 51 insertions(+), 13 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2887b8be6fdd..b36eeef19194 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1550,6 +1550,7 @@ static int transaction_kthread(void *arg) spin_unlock(&root->fs_info->new_trans_lock); trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); @@ -2464,10 +2465,14 @@ int btrfs_commit_super(struct btrfs_root *root) up_write(&root->fs_info->cleanup_work_sem); trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index bcf303204f7f..98ee139885cc 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7478,7 +7478,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) BUG_ON(reloc_root->commit_root != NULL); while (1) { trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, reloc_root); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2b7d251d6ad1..40fee137dd11 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -416,7 +416,7 @@ again: } if (start == 0) { trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -612,6 +612,7 @@ retry: GFP_NOFS); trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); ret = btrfs_reserve_extent(trans, root, async_extent->compressed_size, async_extent->compressed_size, @@ -771,7 +772,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(root == root->fs_info->tree_root); trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1049,7 +1050,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, } else { trans = btrfs_join_transaction(root, 1); } - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; cur_offset = start; @@ -1704,7 +1705,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); @@ -1721,6 +1722,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -2382,6 +2384,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) if (root->orphan_block_rsv || root->orphan_item_inserted) { trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_end_transaction(trans, root); } @@ -4350,6 +4353,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); if (nolock) ret = btrfs_end_transaction_nolock(trans, root); @@ -4375,6 +4380,7 @@ void btrfs_dirty_inode(struct inode *inode) return; trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); @@ -5179,6 +5185,8 @@ again: em = NULL; btrfs_release_path(root, path); trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return ERR_CAST(trans); goto again; } map = kmap(page); @@ -5283,8 +5291,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + len - 1, 0); trans = btrfs_join_transaction(root, 0); - if (!trans) - return ERR_PTR(-ENOMEM); + if (IS_ERR(trans)) + return ERR_CAST(trans); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -5508,7 +5516,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * while we look for nocow cross refs */ trans = btrfs_join_transaction(root, 0); - if (!trans) + if (IS_ERR(trans)) goto must_cow; if (can_nocow_odirect(trans, inode, start, len) == 1) { @@ -5643,7 +5651,7 @@ again: BUG_ON(!ordered); trans = btrfs_join_transaction(root, 1); - if (!trans) { + if (IS_ERR(trans)) { err = -ENOMEM; goto out; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index edd82becbb9e..04b4fb9144a9 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 045c9c2b2d7e..ea9965430241 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2147,6 +2147,12 @@ again: } trans = btrfs_join_transaction(rc->extent_root, 1); + if (IS_ERR(trans)) { + if (!err) + btrfs_block_rsv_release(rc->extent_root, + rc->block_rsv, num_bytes); + return PTR_ERR(trans); + } if (!err) { if (num_bytes != rc->merging_rsv_size) { @@ -3222,6 +3228,7 @@ truncate: trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); + ret = PTR_ERR(trans); goto out; } @@ -3628,6 +3635,7 @@ int prepare_to_relocate(struct reloc_control *rc) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); + BUG_ON(IS_ERR(trans)); btrfs_commit_transaction(trans, rc->extent_root); return 0; } @@ -3804,7 +3812,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) /* get rid of pinned extents */ trans = btrfs_join_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); + if (IS_ERR(trans)) + err = PTR_ERR(trans); + else + btrfs_commit_transaction(trans, rc->extent_root); out_free: btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); btrfs_free_path(path); @@ -4125,6 +4136,11 @@ int btrfs_recover_relocation(struct btrfs_root *root) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); + if (IS_ERR(trans)) { + unset_reloc_control(rc); + err = PTR_ERR(trans); + goto out_free; + } rc->merge_reloc_tree = 1; @@ -4154,9 +4170,13 @@ int btrfs_recover_relocation(struct btrfs_root *root) unset_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); -out: + if (IS_ERR(trans)) + err = PTR_ERR(trans); + else + btrfs_commit_transaction(trans, rc->extent_root); +out_free: kfree(rc); +out: while (!list_empty(&reloc_roots)) { reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index bae5c7b8bbe2..3d73c8d93bbb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; ac->newtrans = btrfs_join_transaction(root, 0); + if (IS_ERR(ac->newtrans)) { + int err = PTR_ERR(ac->newtrans); + kfree(ac); + return err; + } /* take transaction reference */ mutex_lock(&root->fs_info->trans_mutex); -- cgit v1.2.2 From abd30bb0af9d4671506502278e8631bed9e3c35c Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 24 Jan 2011 00:57:10 +0000 Subject: btrfs: check return value of btrfs_start_ioctl_transaction() properly btrfs_start_ioctl_transaction() returns ERR_PTR(), not NULL. So, it is necessary to use IS_ERR() to check the return value. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 04b4fb9144a9..12dabe28cf54 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2085,7 +2085,7 @@ static long btrfs_ioctl_trans_start(struct file *file) ret = -ENOMEM; trans = btrfs_start_ioctl_transaction(root, 0); - if (!trans) + if (IS_ERR(trans)) goto out_drop; file->private_data = trans; -- cgit v1.2.2 From dedefd7215d3ec451291ca393e5c8e4c1882c8c6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 24 Jan 2011 21:43:18 +0000 Subject: Btrfs: fix check_path_shared so it returns the right value When running xfstests 224 I kept getting ENOSPC when trying to remove the files, and this is because we were returning ret from check_path_shared while it was uninitalized, which isn't right. Fix this to return 0 properly, and now xfstests 224 doesn't freak out when it tries to clean itself up. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 40fee137dd11..5621818921f8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2718,9 +2718,10 @@ static int check_path_shared(struct btrfs_root *root, struct extent_buffer *eb; int level; u64 refs = 1; - int uninitialized_var(ret); for (level = 0; level < BTRFS_MAX_LEVEL; level++) { + int ret; + if (!path->nodes[level]) break; eb = path->nodes[level]; @@ -2731,7 +2732,7 @@ static int check_path_shared(struct btrfs_root *root, if (refs > 1) return 1; } - return ret; /* XXX callers? */ + return 0; } /* -- cgit v1.2.2 From e9e22899de661af94cb9995885fd04e4c738838b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 24 Jan 2011 21:43:19 +0000 Subject: Btrfs: do not release more reserved bytes to the global_block_rsv than we need When we do btrfs_block_rsv_release, if global_block_rsv is not full we will release all the extra bytes to global_block_rsv, even if it's only a little short of the amount of space that we need to reserve. This causes us to starve ourselves of reservable space during the transaction which will force us to shrink delalloc bytes and commit the transaction more often than we should. So instead just add the amount of bytes we need to add to the global reserve so reserved == size, and then add the rest back into the space_info for general use. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 98ee139885cc..7af618dcf2c0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3589,8 +3589,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, if (num_bytes > 0) { if (dest) { - block_rsv_add_bytes(dest, num_bytes, 0); - } else { + spin_lock(&dest->lock); + if (!dest->full) { + u64 bytes_to_add; + + bytes_to_add = dest->size - dest->reserved; + bytes_to_add = min(num_bytes, bytes_to_add); + dest->reserved += bytes_to_add; + if (dest->reserved >= dest->size) + dest->full = 1; + num_bytes -= bytes_to_add; + } + spin_unlock(&dest->lock); + } + if (num_bytes) { spin_lock(&space_info->lock); space_info->bytes_reserved -= num_bytes; spin_unlock(&space_info->lock); -- cgit v1.2.2 From 68a82277b8619e6d0f2738b1d9b160b627e81e92 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 24 Jan 2011 21:43:20 +0000 Subject: Btrfs: use the global block reserve if we cannot reserve space We call use_block_rsv right before we make an allocation in order to make sure we have enough space. Now normally people have called btrfs_start_transaction() with the appropriate amount of space that we need, so we just use some of that pre-reserved space and move along happily. The problem is where people use btrfs_join_transaction(), which doesn't actually reserve any space. So we try and reserve space here, but we cannot flush delalloc, so this forces us to return -ENOSPC when in reality we have plenty of space. The most common symptom is seeing a bunch of "couldn't dirty inode" messages in syslog. With xfstests 224 we end up falling back to start_transaction and then doing all the flush delalloc stuff which causes to hang for a very long time. So instead steal from the global reserve, which is what this is meant for anyway. With this patch and the other 2 I have sent xfstests 224 now passes successfully. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7af618dcf2c0..ff6bbfd75cf7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5646,6 +5646,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize) { struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; int ret; block_rsv = get_block_rsv(trans, root); @@ -5653,14 +5654,39 @@ use_block_rsv(struct btrfs_trans_handle *trans, if (block_rsv->size == 0) { ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, 0); - if (ret) + /* + * If we couldn't reserve metadata bytes try and use some from + * the global reserve. + */ + if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + return ERR_PTR(ret); + } else if (ret) { return ERR_PTR(ret); + } return block_rsv; } ret = block_rsv_use_bytes(block_rsv, blocksize); if (!ret) return block_rsv; + if (ret) { + WARN_ON(1); + ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, + 0); + if (!ret) { + spin_lock(&block_rsv->lock); + block_rsv->size += blocksize; + spin_unlock(&block_rsv->lock); + return block_rsv; + } else if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + } + } return ERR_PTR(-ENOSPC); } -- cgit v1.2.2 From ad0397a7a97f55fd7f70998ec208c5d8b90310ff Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 28 Jan 2011 18:44:44 +0000 Subject: Btrfs: do error checking in btrfs_del_csums Got a report of a box panicing because we got a NULL eb in read_extent_buffer. His fs was borked and btrfs_search_path returned EIO, but we don't check for errors so the box paniced. Yes I know this will just make something higher up the stack panic, but that's a problem for future Josef. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/file-item.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index d0bc72657cd7..4f19a3e1bf32 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -550,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, if (path->slots[0] == 0) goto out; path->slots[0]--; + } else if (ret < 0) { + goto out; } + leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); -- cgit v1.2.2 From 7adf5dfbb3af65a00e20b3ead224c3a1b40e4ec4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 25 Jan 2011 22:11:54 +0000 Subject: Btrfs: handle no memory properly in prepare_pages Instead of doing a BUG_ON(1) in prepare_pages if grab_cache_page() fails, just loop through the pages we've already grabbed and unlock and release them, then return -ENOMEM like we should. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/file.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 65b2424a4116..9e097fbfc78d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -792,8 +792,12 @@ again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { - err = -ENOMEM; - BUG_ON(1); + int c; + for (c = i - 1; c >= 0; c--) { + unlock_page(pages[c]); + page_cache_release(pages[c]); + } + return -ENOMEM; } wait_on_page_writeback(pages[i]); } -- cgit v1.2.2 From b1953bcec95c189b1eea690a08e89646d7750bda Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 21 Jan 2011 21:10:01 +0000 Subject: Btrfs: make shrink_delalloc a little friendlier Xfstests 224 will just sit there and spin for ever until eventually we give up flushing delalloc and exit. On my box this took several hours. I could not interrupt this process either, even though we use INTERRUPTIBLE. So do 2 things 1) Keep us from looping over and over again without reclaiming anything 2) If we get interrupted exit the loop I tested this and the test now exits in a reasonable amount of time, and can be interrupted with ctrl+c. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ff6bbfd75cf7..f96641a93fc9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3345,8 +3345,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 reserved; u64 max_reclaim; u64 reclaimed = 0; + long time_left; int pause = 1; int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; + int loops = 0; block_rsv = &root->fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; @@ -3359,7 +3361,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, max_reclaim = min(reserved, to_reclaim); - while (1) { + while (loops < 1024) { /* have the flusher threads jump in and do some IO */ smp_mb(); nr_pages = min_t(unsigned long, nr_pages, @@ -3367,8 +3369,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); spin_lock(&space_info->lock); - if (reserved > space_info->bytes_reserved) + if (reserved > space_info->bytes_reserved) { + loops = 0; reclaimed += reserved - space_info->bytes_reserved; + } else { + loops++; + } reserved = space_info->bytes_reserved; spin_unlock(&space_info->lock); @@ -3379,7 +3385,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, return -EAGAIN; __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(pause); + time_left = schedule_timeout(pause); + + /* We were interrupted, exit */ + if (time_left) + break; + pause <<= 1; if (pause > HZ / 10) pause = HZ / 10; -- cgit v1.2.2 From b31eabd86eb68d3c217e6821078249bc045e698a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 31 Jan 2011 16:48:24 -0500 Subject: Btrfs: catch errors from btrfs_sync_log btrfs_sync_log returns -EAGAIN when we need full transaction commits instead of small log commits, but sometimes we were dropping the return value. In practice, we check for this a few different ways, but this is still a bug that can leave off full log commits when we really need them. Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c25a41d86118..42dfc3077040 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2051,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, wait_log_commit(trans, log_root_tree, log_root_tree->log_transid); mutex_unlock(&log_root_tree->log_mutex); + ret = 0; goto out; } atomic_set(&log_root_tree->log_commit[index2], 1); @@ -2115,7 +2116,7 @@ out: smp_mb(); if (waitqueue_active(&root->log_commit_wait[index1])) wake_up(&root->log_commit_wait[index1]); - return 0; + return ret; } static void free_log_tree(struct btrfs_trans_handle *trans, -- cgit v1.2.2 From c87fb6fdcaf7560940b31a0c78c3e6370e3433cf Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 31 Jan 2011 19:54:59 -0500 Subject: Btrfs: avoid uninit variable warnings in ordered-data.c This one isn't really an uninit variable, but for pretty obscure reasons. Let's make it clearly correct. Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2b61e1ddcd99..083a55477375 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { struct rb_root *root = &tree->tree; - struct rb_node *prev; + struct rb_node *prev = NULL; struct rb_node *ret; struct btrfs_ordered_extent *entry; -- cgit v1.2.2 From 5df67083488ccbad925f583b698ab38f8629a016 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Tue, 1 Feb 2011 09:17:35 +0000 Subject: btrfs: checking NULL or not in some functions Because NULL is returned when the memory allocation fails, it is checked whether it is NULL. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 2 ++ fs/btrfs/extent_io.c | 2 ++ fs/btrfs/tree-log.c | 6 ++++++ 3 files changed, 10 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f96641a93fc9..9de4ff03882a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6496,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start, int ret = 0; ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; mutex_lock(&inode->i_mutex); first_index = start >> PAGE_CACHE_SHIFT; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6411ed6ca449..8862dda46ff6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1920,6 +1920,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, nr = bio_get_nr_vecs(bdev); bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); + if (!bio) + return -ENOMEM; bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 42dfc3077040..6d66e5caff97 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2751,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; dst_path = btrfs_alloc_path(); + if (!dst_path) { + btrfs_free_path(path); + return -ENOMEM; + } min_key.objectid = inode->i_ino; min_key.type = BTRFS_INODE_ITEM_KEY; -- cgit v1.2.2 From 98d5dc13e7e74b77ca3b4c3cbded9f48d2dbbbb7 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 20 Jan 2011 06:19:37 +0000 Subject: btrfs: fix return value check of btrfs_start_transaction() The error check of btrfs_start_transaction() is added, and the mistake of the error check on several places is corrected. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 7 +++++-- fs/btrfs/inode.c | 1 + fs/btrfs/ioctl.c | 10 ++++++++-- fs/btrfs/relocation.c | 3 +++ fs/btrfs/super.c | 2 ++ fs/btrfs/tree-log.c | 1 + fs/btrfs/volumes.c | 19 +++++++++++++++++-- 7 files changed, 37 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9de4ff03882a..f07ba21cbf06 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6271,6 +6271,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, BUG_ON(!wc); trans = btrfs_start_transaction(tree_root, 0); + BUG_ON(IS_ERR(trans)); + if (block_rsv) trans->block_rsv = block_rsv; @@ -6368,6 +6370,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, btrfs_end_transaction_throttle(trans, tree_root); trans = btrfs_start_transaction(tree_root, 0); + BUG_ON(IS_ERR(trans)); if (block_rsv) trans->block_rsv = block_rsv; } @@ -7587,7 +7590,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) if (found) { trans = btrfs_start_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); } @@ -7831,7 +7834,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, trans = btrfs_start_transaction(extent_root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); if (extent_key->objectid == 0) { ret = del_extent_zero(trans, extent_root, path, extent_key); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5621818921f8..36bc3f49ebf9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2357,6 +2357,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) */ if (is_bad_inode(inode)) { trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); btrfs_orphan_del(trans, inode); btrfs_end_transaction(trans, root); iput(inode); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 12dabe28cf54..02d224e8c83f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, if (new_size > old_size) { trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_unlock; + } ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans, root); } else { @@ -2141,9 +2145,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) path->leave_spinning = 1; trans = btrfs_start_transaction(root, 1); - if (!trans) { + if (IS_ERR(trans)) { btrfs_free_path(path); - return -ENOMEM; + return PTR_ERR(trans); } dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); @@ -2337,6 +2341,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp u64 transid; trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); transid = trans->transid; btrfs_commit_transaction_async(trans, root, 0); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ea9965430241..1f5556acb530 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2028,6 +2028,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, while (1) { trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); trans->block_rsv = rc->block_rsv; ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, @@ -3665,6 +3666,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) while (1) { trans = btrfs_start_transaction(rc->extent_root, 0); + BUG_ON(IS_ERR(trans)); if (update_backref_cache(trans, &rc->backref_cache)) { btrfs_end_transaction(trans, rc->extent_root); @@ -4033,6 +4035,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) int ret; trans = btrfs_start_transaction(root->fs_info->tree_root, 0); + BUG_ON(IS_ERR(trans)); memset(&root->root_item.drop_progress, 0, sizeof(root->root_item.drop_progress)); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f4e45fdded30..0209b5fc772c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -623,6 +623,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_wait_ordered_extents(root, 0, 0); trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); return ret; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 6d66e5caff97..a4bbb854dfd2 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3112,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) BUG_ON(!path); trans = btrfs_start_transaction(fs_info->tree_root, 0); + BUG_ON(IS_ERR(trans)); wc.trans = trans; wc.pin = 1; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f2d2f4ccc738..7cad59353b09 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1212,6 +1212,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, return -ENOMEM; trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + btrfs_free_path(path); + return PTR_ERR(trans); + } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; @@ -1604,6 +1608,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) } trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + kfree(device); + ret = PTR_ERR(trans); + goto error; + } + lock_chunks(root); device->barriers = 1; @@ -1872,7 +1882,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, return ret; trans = btrfs_start_transaction(root, 0); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); lock_chunks(root); @@ -2046,7 +2056,7 @@ int btrfs_balance(struct btrfs_root *dev_root) BUG_ON(ret); trans = btrfs_start_transaction(dev_root, 0); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_grow_device(trans, device, old_size); BUG_ON(ret); @@ -2212,6 +2222,11 @@ again: /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto done; + } + lock_chunks(root); device->disk_total_bytes = new_size; -- cgit v1.2.2 From 2a7dba391e5628ad665ce84ef9a6648da541ebab Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 1 Feb 2011 11:05:39 -0500 Subject: fs/vfs/security: pass last path component to LSM on inode creation SELinux would like to implement a new labeling behavior of newly created inodes. We currently label new inodes based on the parent and the creating process. This new behavior would also take into account the name of the new object when deciding the new label. This is not the (supposed) full path, just the last component of the path. This is very useful because creating /etc/shadow is different than creating /etc/passwd but the kernel hooks are unable to differentiate these operations. We currently require that userspace realize it is doing some difficult operation like that and than userspace jumps through SELinux hoops to get things set up correctly. This patch does not implement new behavior, that is obviously contained in a seperate SELinux patch, but it does pass the needed name down to the correct LSM hook. If no such name exists it is fine to pass NULL. Signed-off-by: Eric Paris --- fs/btrfs/inode.c | 13 +++++++------ fs/btrfs/xattr.c | 6 ++++-- fs/btrfs/xattr.h | 3 ++- 3 files changed, 13 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a0ff46a47895..49c04bec6a9d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -90,13 +90,14 @@ static noinline int cow_file_range(struct inode *inode, unsigned long *nr_written, int unlock); static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *dir) + struct inode *inode, struct inode *dir, + const struct qstr *qstr) { int err; err = btrfs_init_acl(trans, inode, dir); if (!err) - err = btrfs_xattr_security_init(trans, inode, dir); + err = btrfs_xattr_security_init(trans, inode, dir, qstr); return err; } @@ -4675,7 +4676,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (IS_ERR(inode)) goto out_unlock; - err = btrfs_init_inode_security(trans, inode, dir); + err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) { drop_inode = 1; goto out_unlock; @@ -4736,7 +4737,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, if (IS_ERR(inode)) goto out_unlock; - err = btrfs_init_inode_security(trans, inode, dir); + err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) { drop_inode = 1; goto out_unlock; @@ -4864,7 +4865,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) drop_on_err = 1; - err = btrfs_init_inode_security(trans, inode, dir); + err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) goto out_fail; @@ -6946,7 +6947,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (IS_ERR(inode)) goto out_unlock; - err = btrfs_init_inode_security(trans, inode, dir); + err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) { drop_inode = 1; goto out_unlock; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 698fdd2c739c..3338a7e61d25 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -352,7 +352,8 @@ int btrfs_removexattr(struct dentry *dentry, const char *name) } int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *dir) + struct inode *inode, struct inode *dir, + const struct qstr *qstr) { int err; size_t len; @@ -360,7 +361,8 @@ int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, char *suffix; char *name; - err = security_inode_init_security(inode, dir, &suffix, &value, &len); + err = security_inode_init_security(inode, dir, qstr, &suffix, &value, + &len); if (err) { if (err == -EOPNOTSUPP) return 0; diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h index 7a43fd640bbb..b3cc8039134b 100644 --- a/fs/btrfs/xattr.h +++ b/fs/btrfs/xattr.h @@ -37,6 +37,7 @@ extern int btrfs_setxattr(struct dentry *dentry, const char *name, extern int btrfs_removexattr(struct dentry *dentry, const char *name); extern int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *dir); + struct inode *inode, struct inode *dir, + const struct qstr *qstr); #endif /* __XATTR__ */ -- cgit v1.2.2 From 13dbc08987f25d9dba488a34b44b43e3844b027c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 3 Feb 2011 02:39:52 +0000 Subject: Btrfs: make sure search_bitmap finds something in remove_from_bitmap When we're cleaning up the tree log we need to be able to remove free space from the block group. The problem is if that free space spans bitmaps we would not find the space since we're looking for too many bytes. So make sure the amount of bytes we search for is limited to either the number of bytes we want, or the number of bytes left in the bitmap. This was tested by a user who was hitting the BUG() after search_bitmap. With this patch he can now mount his fs. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index a5501edc3c9f..a0390657451b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1216,6 +1216,7 @@ again: */ search_start = *offset; search_bytes = *bytes; + search_bytes = min(search_bytes, end - search_start + 1); ret = search_bitmap(block_group, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); -- cgit v1.2.2 From 3c14874acc71180553fb5aba528e3cf57c5b958b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 2 Feb 2011 15:53:47 +0000 Subject: Btrfs: exclude super blocks when we read in block groups This has been resulting in a BUT_ON(ret) after btrfs_reserve_extent in btrfs_cow_file_range. The reason is we don't actually calculate the bytes_super for a block group until we go to cache it, which means that the space_info can hand out reservations for space that it doesn't actually have, and we can run out of data space. This is also a problem if you are using space caching since we don't ever calculate bytes_super for the block groups. So instead everytime we read a block group call exclude_super_stripes, which calculates the bytes_super for the block group so it can be left out of the space_info. Then whenever caching completes we just call free_excluded_extents so that the super excluded extents are freed up. Also if we are unmounting and we hit any block groups that haven't been cached we still need to call free_excluded_extents to make sure things are cleaned up properly. Thanks, Reported-by: Arne Jansen Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f07ba21cbf06..565e22d77b1b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -320,11 +320,6 @@ static int caching_kthread(void *data) if (!path) return -ENOMEM; - exclude_super_stripes(extent_root, block_group); - spin_lock(&block_group->space_info->lock); - block_group->space_info->bytes_readonly += block_group->bytes_super; - spin_unlock(&block_group->space_info->lock); - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); /* @@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, cache->cached = BTRFS_CACHE_NO; } spin_unlock(&cache->lock); - if (ret == 1) + if (ret == 1) { + free_excluded_extents(fs_info->extent_root, cache); return 0; + } } if (load_cache_only) @@ -4036,6 +4033,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); atomic_dec(&BTRFS_I(inode)->outstanding_extents); + WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); @@ -8325,6 +8323,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) if (block_group->cached == BTRFS_CACHE_STARTED) wait_block_group_cache_done(block_group); + /* + * We haven't cached this block group, which means we could + * possibly have excluded extents on this block group. + */ + if (block_group->cached == BTRFS_CACHE_NO) + free_excluded_extents(info->extent_root, block_group); + btrfs_remove_free_space_cache(block_group); btrfs_put_block_group(block_group); @@ -8439,6 +8444,13 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; + /* + * We need to exclude the super stripes now so that the space + * info has super bytes accounted for, otherwise we'll think + * we have more space than we actually do. + */ + exclude_super_stripes(root, cache); + /* * check for two cases, either we are full, and therefore * don't need to bother with the caching work since we won't @@ -8447,12 +8459,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) * time, particularly in the full case. */ if (found_key.offset == btrfs_block_group_used(&cache->item)) { - exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; free_excluded_extents(root, cache); } else if (btrfs_block_group_used(&cache->item) == 0) { - exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; add_new_free_space(cache, root->fs_info, -- cgit v1.2.2 From 554233a6e0e8557e8e81e54cc70628d101291122 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 3 Feb 2011 03:16:25 +0000 Subject: btrfs: cleanup error handling in btrfs_unlink_inode() When btrfs_alloc_path() fails, btrfs_free_path() need not be called. Therefore, it changes the branch ahead. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 36bc3f49ebf9..c9bc0afdbfc6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2646,7 +2646,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto err; + goto out; } path->leave_spinning = 1; -- cgit v1.2.2 From 8e4eef7a60eeca0fe7503e5cbd3b24ff4941c732 Mon Sep 17 00:00:00 2001 From: Alexey Charkov Date: Wed, 2 Feb 2011 21:15:35 +0000 Subject: btrfs: Drop __exit attribute on btrfs_exit_compress As this function is called in some error paths while not removing the module, the __exit attribute prevents the kernel image from linking when btrfs is compiled in statically. Signed-off-by: Alexey Charkov Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 3a932f183da1..4d2110eafe29 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -921,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, return ret; } -void __exit btrfs_exit_compress(void) +void btrfs_exit_compress(void) { free_workspaces(); } -- cgit v1.2.2 From 3a90983dbdcb2f4f48c0d771d8e5b4d88f27fae6 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 18 Jan 2011 13:34:40 +0800 Subject: Btrfs: Fix page count calculation take offset of start position into account when calculating page count. Signed-off-by: Yan, Zheng Signed-off-by: Chris Mason --- fs/btrfs/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 9e097fbfc78d..b0ff34b96607 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -991,8 +991,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, size_t write_bytes = min(iov_iter_count(&i), nrptrs * (size_t)PAGE_CACHE_SIZE - offset); - size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + size_t num_pages = (write_bytes + offset + + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; WARN_ON(num_pages > nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs); @@ -1022,8 +1022,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, copied = btrfs_copy_from_user(pos, num_pages, write_bytes, pages, &i); - dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; if (num_pages > dirty_pages) { if (copied > 0) -- cgit v1.2.2 From eb14ab8ed24a0405fd056068b28c33a1cd846024 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 10 Feb 2011 12:35:00 -0500 Subject: Btrfs: fix page->private races There is a race where btrfs_releasepage can drop the page->private contents just as alloc_extent_buffer is setting up pages for metadata. Because of how the Btrfs page flags work, this results in us skipping the crc on the page during IO. This patch sovles the race by waiting until after the extent buffer is inserted into the radix tree before it sets page private. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 8 ++++++-- fs/btrfs/extent_io.c | 38 +++++++++++++++++++++++++++++++++++--- 2 files changed, 41 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b36eeef19194..3e1ea3e0477e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -359,10 +359,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) tree = &BTRFS_I(page->mapping->host)->io_tree; - if (page->private == EXTENT_PAGE_PRIVATE) + if (page->private == EXTENT_PAGE_PRIVATE) { + WARN_ON(1); goto out; - if (!page->private) + } + if (!page->private) { + WARN_ON(1); goto out; + } len = page->private >> 2; WARN_ON(len == 0); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8862dda46ff6..0418bf2c9757 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1946,6 +1946,7 @@ void set_page_extent_mapped(struct page *page) static void set_page_extent_head(struct page *page, unsigned long len) { + WARN_ON(!PagePrivate(page)); set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); } @@ -3195,7 +3196,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, } if (!PageUptodate(p)) uptodate = 0; - unlock_page(p); + + /* + * see below about how we avoid a nasty race with release page + * and why we unlock later + */ + if (i != 0) + unlock_page(p); } if (uptodate) set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); @@ -3219,9 +3226,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, atomic_inc(&eb->refs); spin_unlock(&tree->buffer_lock); radix_tree_preload_end(); + + /* + * there is a race where release page may have + * tried to find this extent buffer in the radix + * but failed. It will tell the VM it is safe to + * reclaim the, and it will clear the page private bit. + * We must make sure to set the page private bit properly + * after the extent buffer is in the radix tree so + * it doesn't get lost + */ + set_page_extent_mapped(eb->first_page); + set_page_extent_head(eb->first_page, eb->len); + if (!page0) + unlock_page(eb->first_page); return eb; free_eb: + if (eb->first_page && !page0) + unlock_page(eb->first_page); + if (!atomic_dec_and_test(&eb->refs)) return exists; btrfs_release_extent_buffer(eb); @@ -3272,10 +3296,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, continue; lock_page(page); + WARN_ON(!PagePrivate(page)); + + set_page_extent_mapped(page); if (i == 0) set_page_extent_head(page, eb->len); - else - set_page_private(page, EXTENT_PAGE_PRIVATE); clear_page_dirty_for_io(page); spin_lock_irq(&page->mapping->tree_lock); @@ -3465,6 +3490,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); + + WARN_ON(!PagePrivate(page)); + + set_page_extent_mapped(page); + if (i == 0) + set_page_extent_head(page, eb->len); + if (inc_all_pages) page_cache_get(page); if (!PageUptodate(page)) { -- cgit v1.2.2 From e3f24cc521cb7ba60ac137abd1939e4e03435e80 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 14 Feb 2011 12:52:08 -0500 Subject: Btrfs: don't release pages when we can't clear the uptodate bits Btrfs tracks uptodate state in an rbtree as well as in the page bits. This is supposed to enable us to use block sizes other than the page size, but there are a few parts still missing before that completely works. But, our readpage routine trusts this additional range based tracking of uptodateness, much in the same way the buffer head up to date bits are trusted for the other filesystems. The problem is that sometimes we need to allocate memory in order to split records in the rbtree, even when we are just clearing bits. This can be difficult when our clearing function is called GFP_ATOMIC, which can happen in the releasepage path. So, what happens today looks like this: releasepage called with GFP_ATOMIC btrfs_releasepage calls clear_extent_bit clear_extent_bit fails to allocate ram, leaving the up to date bit set btrfs_releasepage returns success The end result is the page being gone, but btrfs thinking the range is up to date. Later on if someone tries to read that same page, the btrfs readpage code will return immediately thinking the page is already up to date. This commit fixes things to fail the releasepage when we can't clear the extent state bits. It covers both data pages and metadata tree blocks. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 0418bf2c9757..e7aeba242701 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2822,9 +2822,17 @@ int try_release_extent_state(struct extent_map_tree *map, * at this point we can safely clear everything except the * locked bit and the nodatasum bit */ - clear_extent_bit(tree, start, end, + ret = clear_extent_bit(tree, start, end, ~(EXTENT_LOCKED | EXTENT_NODATASUM), 0, 0, NULL, mask); + + /* if clear_extent_bit failed for enomem reasons, + * we can't allow the release to continue. + */ + if (ret < 0) + ret = 0; + else + ret = 1; } return ret; } -- cgit v1.2.2 From 6848ad6461e551849ba3c32d945d4f45e96453a6 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 14 Feb 2011 16:00:03 -0500 Subject: Btrfs: Fix balance panic Mark the cloned backref_node as checked in clone_backref_node() Signed-off-by: Yan, Zheng Signed-off-by: Chris Mason --- fs/btrfs/relocation.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 1f5556acb530..0825e4ed9447 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1157,6 +1157,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, new_node->bytenr = dest->node->start; new_node->level = node->level; new_node->lowest = node->lowest; + new_node->checked = 1; new_node->root = dest; if (!node->lowest) { -- cgit v1.2.2 From 51788b1bdd0d68345bab0af4301e7fa429277228 Mon Sep 17 00:00:00 2001 From: Dan Rosenberg Date: Mon, 14 Feb 2011 16:04:23 -0500 Subject: btrfs: prevent heap corruption in btrfs_ioctl_space_info() Commit bf5fc093c5b625e4259203f1cee7ca73488a5620 refactored btrfs_ioctl_space_info() and introduced several security issues. space_args.space_slots is an unsigned 64-bit type controlled by a possibly unprivileged caller. The comparison as a signed int type allows providing values that are treated as negative and cause the subsequent allocation size calculation to wrap, or be truncated to 0. By providing a size that's truncated to 0, kmalloc() will return ZERO_SIZE_PTR. It's also possible to provide a value smaller than the slot count. The subsequent loop ignores the allocation size when copying data in, resulting in a heap overflow or write to ZERO_SIZE_PTR. The fix changes the slot count type and comparison typecast to u64, which prevents truncation or signedness errors, and also ensures that we don't copy more data than we've allocated in the subsequent loop. Note that zero-size allocations are no longer possible since there is already an explicit check for space_args.space_slots being 0 and truncation of this value is no longer an issue. Signed-off-by: Dan Rosenberg Signed-off-by: Josef Bacik Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 02d224e8c83f..be2d4f6aaa5e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2208,7 +2208,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) int num_types = 4; int alloc_size; int ret = 0; - int slot_count = 0; + u64 slot_count = 0; int i, c; if (copy_from_user(&space_args, @@ -2247,7 +2247,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) goto out; } - slot_count = min_t(int, space_args.space_slots, slot_count); + slot_count = min_t(u64, space_args.space_slots, slot_count); alloc_size = sizeof(*dest) * slot_count; @@ -2267,6 +2267,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; + if (!slot_count) + break; + info = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp, &root->fs_info->space_info, @@ -2288,7 +2291,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) memcpy(dest, &space, sizeof(space)); dest++; space_args.total_spaces++; + slot_count--; } + if (!slot_count) + break; } up_read(&info->groups_sem); } -- cgit v1.2.2 From 67100f255dba284bcbb5ce795355dad1cff35658 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sun, 6 Feb 2011 19:58:21 +0000 Subject: Btrfs - Fix memory leak in btrfs_init_new_device() Memory allocated by calling kstrdup() should be freed. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7cad59353b09..dadaaa8005c8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1603,12 +1603,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ret = find_next_devid(root, &device->devid); if (ret) { + kfree(device->name); kfree(device); goto error; } trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { + kfree(device->name); kfree(device); ret = PTR_ERR(trans); goto error; -- cgit v1.2.2 From c26a920373a983b52223eed5a13b97404d8b4158 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 14 Feb 2011 00:45:29 +0000 Subject: Btrfs: check return value of alloc_extent_map() I add the check on the return value of alloc_extent_map() to several places. In addition, alloc_extent_map() returns only the address or NULL. Therefore, check by IS_ERR() is unnecessary. So, I remove IS_ERR() checking. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/extent_map.c | 4 ++-- fs/btrfs/file.c | 1 + fs/btrfs/inode.c | 3 +++ 4 files changed, 7 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 565e22d77b1b..a7aaa10c5302 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6584,7 +6584,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, u64 end = start + extent_key->offset - 1; em = alloc_extent_map(GFP_NOFS); - BUG_ON(!em || IS_ERR(em)); + BUG_ON(!em); em->start = start; em->len = extent_key->offset; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index b0e1fce12530..2b6c12e983b3 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -51,8 +51,8 @@ struct extent_map *alloc_extent_map(gfp_t mask) { struct extent_map *em; em = kmem_cache_alloc(extent_map_cache, mask); - if (!em || IS_ERR(em)) - return em; + if (!em) + return NULL; em->in_tree = 0; em->flags = 0; em->compress_type = BTRFS_COMPRESS_NONE; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b0ff34b96607..65338a1d14ad 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -185,6 +185,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split = alloc_extent_map(GFP_NOFS); if (!split2) split2 = alloc_extent_map(GFP_NOFS); + BUG_ON(!split || !split2); write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c9bc0afdbfc6..8d392ed73d57 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -644,6 +644,7 @@ retry: async_extent->ram_size - 1, 0); em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = async_extent->start; em->len = async_extent->ram_size; em->orig_start = em->start; @@ -820,6 +821,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(ret); em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = start; em->orig_start = em->start; ram_size = ins.offset; @@ -1169,6 +1171,7 @@ out_check: struct extent_map_tree *em_tree; em_tree = &BTRFS_I(inode)->extent_tree; em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = cur_offset; em->orig_start = em->start; em->len = num_bytes; -- cgit v1.2.2 From 91435650c233b93e0da389db74f4b2c11c5ad2d4 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 16 Feb 2011 13:10:41 -0500 Subject: Btrfs: put ENOSPC debugging under a mount option ENOSPC in btrfs is getting to the point where the extra debugging isn't required. I've put it under mount -o enospc_debug just in case someone is having difficult problems. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/extent-tree.c | 2 +- fs/btrfs/super.c | 7 ++++++- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 72195378bef9..6297701bc19c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1254,6 +1254,7 @@ struct btrfs_root { #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) +#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a7aaa10c5302..d375fc04a065 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5377,7 +5377,7 @@ again: num_bytes, data, 1); goto again; } - if (ret == -ENOSPC) { + if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0209b5fc772c..db0a827252bd 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -155,7 +155,8 @@ enum { Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, - Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, + Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, + Opt_enospc_debug, Opt_err, }; static match_table_t tokens = { @@ -184,6 +185,7 @@ static match_table_t tokens = { {Opt_space_cache, "space_cache"}, {Opt_clear_cache, "clear_cache"}, {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, + {Opt_enospc_debug, "enospc_debug"}, {Opt_err, NULL}, }; @@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_user_subvol_rm_allowed: btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); break; + case Opt_enospc_debug: + btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); -- cgit v1.2.2 From c87f08ca44e83b2c8d28f63f9c33f3a270a04bbe Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 16 Feb 2011 13:57:04 -0500 Subject: Btrfs: allow balance to explicitly allocate chunks as it relocates Btrfs device shrinking and balancing ends up reallocating all the blocks in order to allow COW to move them to new destinations. It is somewhat awkward in terms of ENOSPC because most of the enospc code is built around the idea that some operation on a reference counted tree triggers allocations in the non-reference counted trees. This commit changes the balancing code to deal with enospc by trying to allocate a new chunk. If that allocation succeeds, we go ahead and retry whatever failed due to enospc. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/extent-tree.c | 7 +++++++ fs/btrfs/relocation.c | 13 ++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6297701bc19c..28188a786da0 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2219,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end); int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes); +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 type); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d375fc04a065..100e409e9053 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8066,6 +8066,13 @@ out: return ret; } +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 type) +{ + u64 alloc_flags = get_alloc_profile(root, type); + return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); +} + /* * helper to account the unused space of all the readonly block group in the * list. takes mirrors into account. diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0825e4ed9447..31ade5802ae8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) u32 item_size; int ret; int err = 0; + int progress = 0; path = btrfs_alloc_path(); if (!path) @@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) } while (1) { + progress++; trans = btrfs_start_transaction(rc->extent_root, 0); BUG_ON(IS_ERR(trans)); - +restart: if (update_backref_cache(trans, &rc->backref_cache)) { btrfs_end_transaction(trans, rc->extent_root); continue; @@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) } } } + if (trans && progress && err == -ENOSPC) { + ret = btrfs_force_chunk_alloc(trans, rc->extent_root, + rc->block_group->flags); + if (ret == 0) { + err = 0; + progress = 0; + goto restart; + } + } btrfs_release_path(rc->extent_root, path); clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, -- cgit v1.2.2 From b4dc2b8c694ead005b828f5fb7fa1134db5b6275 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 16 Feb 2011 06:06:34 +0000 Subject: Btrfs: Fix BTRFS_IOC_SUBVOL_SETFLAGS ioctl - Check user-specified flags correctly - Check the inode owership - Search root item in root tree but not fs tree Reported-by: Dan Rosenberg Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index be2d4f6aaa5e..5fdb2abc4fa7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; - if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) + if (flags & BTRFS_SUBVOL_CREATE_ASYNC) return -EINVAL; if (flags & ~BTRFS_SUBVOL_RDONLY) return -EOPNOTSUPP; + if (!is_owner_or_cap(inode)) + return -EACCES; + down_write(&root->fs_info->subvol_sem); /* nothing to do */ @@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, goto out_reset; } - ret = btrfs_update_root(trans, root, + ret = btrfs_update_root(trans, root->fs_info->tree_root, &root->root_key, &root->root_item); btrfs_commit_transaction(trans, root); -- cgit v1.2.2 From ca9b688c1c9a21635cfc8af8b68565b154185196 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 16 Feb 2011 06:06:41 +0000 Subject: Btrfs: Avoid accessing unmapped kernel address When decompressing a chunk of data, we'll copy the data out to a working buffer if the data is stored in more than one page, otherwise we'll use the mapped page directly to avoid memory copy. In the latter case, we'll end up accessing the kernel address after we've unmapped the page in a corner case. Reported-by: Juan Francisco Cantero Hurtado Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/lzo.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index cc9b450399df..a178f5ebea78 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws, unsigned long tot_out; unsigned long tot_len; char *buf; + bool may_late_unmap, need_unmap; data_in = kmap(pages_in[0]); tot_len = read_compress_length(data_in); @@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws, tot_in += in_len; working_bytes = in_len; + may_late_unmap = need_unmap = false; /* fast path: avoid using the working buffer */ if (in_page_bytes_left >= in_len) { buf = data_in + in_offset; bytes = in_len; + may_late_unmap = true; goto cont; } @@ -329,14 +332,17 @@ cont: if (working_bytes == 0 && tot_in >= tot_len) break; - kunmap(pages_in[page_in_index]); - page_in_index++; - if (page_in_index >= total_pages_in) { + if (page_in_index + 1 >= total_pages_in) { ret = -1; - data_in = NULL; goto done; } - data_in = kmap(pages_in[page_in_index]); + + if (may_late_unmap) + need_unmap = true; + else + kunmap(pages_in[page_in_index]); + + data_in = kmap(pages_in[++page_in_index]); in_page_bytes_left = PAGE_CACHE_SIZE; in_offset = 0; @@ -346,6 +352,8 @@ cont: out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, &out_len); + if (need_unmap) + kunmap(pages_in[page_in_index - 1]); if (ret != LZO_E_OK) { printk(KERN_WARNING "btrfs decompress failed\n"); ret = -1; @@ -363,8 +371,7 @@ cont: break; } done: - if (data_in) - kunmap(pages_in[page_in_index]); + kunmap(pages_in[page_in_index]); return ret; } -- cgit v1.2.2 From 9b3517e9136824346227b7b04f8f7ea1f3a726cc Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 15 Feb 2011 18:14:25 +0000 Subject: Btrfs: make btrfs_rm_device() fail gracefully If shrinking done as part of the online device removal fails add that device back to the allocation list and increment the rw_devices counter. This fixes two bugs: 1) we could have a perfectly good device out of alloc list for no good reason; 2) in the btrfs consisting of two devices, failure in btrfs_rm_device() could lead to a situation where it was impossible to remove any of the devices because of the "unable to remove the only writeable device" error. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index dadaaa8005c8..f31c33119bb6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1337,11 +1337,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) ret = btrfs_shrink_device(device, 0); if (ret) - goto error_brelse; + goto error_undo; ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); if (ret) - goto error_brelse; + goto error_undo; device->in_fs_metadata = 0; @@ -1415,6 +1415,13 @@ out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); return ret; +error_undo: + if (device->writeable) { + list_add(&device->dev_alloc_list, + &root->fs_info->fs_devices->alloc_list); + root->fs_info->fs_devices->rw_devices++; + } + goto error_brelse; } /* -- cgit v1.2.2 From fb01aa85b8b29c1a4e1f4a28ea54175de6bf7559 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 15 Feb 2011 18:12:57 +0000 Subject: Btrfs: set FMODE_EXCL in btrfs_device->mode This fixes a bug introduced in d4d77629, where the device added online (and therefore initialized via btrfs_init_new_device()) would be left with the positive bdev->bd_holders after unmount. Since d4d77629 we no longer OR FMODE_EXCL explicitly on blkdev_put(), set it in btrfs_device->mode. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f31c33119bb6..94334d952280 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1639,7 +1639,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; - device->mode = 0; + device->mode = FMODE_EXCL; set_blocksize(device->bdev, 4096); if (seeding_dev) { -- cgit v1.2.2 From ec29ed5b407d618a8128f5942aade9e1758aa14b Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 23 Feb 2011 16:23:20 -0500 Subject: Btrfs: fix fiemap bugs with delalloc The Btrfs fiemap code wasn't properly returning delalloc extents, so applications that trust fiemap to decide if there are holes in the file see holes instead of delalloc. This reworks the btrfs fiemap code, adding a get_extent helper that searches for delalloc ranges and also adding a helper for extent_fiemap that skips past holes in the file. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 138 ++++++++++++++++++++++++++++++++++++--------------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 126 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 224 insertions(+), 42 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e7aeba242701..ff45b80d90f0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode, */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, - unsigned long bits) + unsigned long bits, int contig) { struct rb_node *node; struct extent_state *state; u64 cur_start = *start; u64 total_bytes = 0; + u64 last = 0; int found = 0; if (search_end <= cur_start) { @@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree, state = rb_entry(node, struct extent_state, rb_node); if (state->start > search_end) break; - if (state->end >= cur_start && (state->state & bits)) { + if (contig && found && state->start > last + 1) + break; + if (state->end >= cur_start && (state->state & bits) == bits) { total_bytes += min(search_end, state->end) + 1 - max(cur_start, state->start); if (total_bytes >= max_bytes) @@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree, *start = state->start; found = 1; } + last = state->end; + } else if (contig && found) { + break; } node = rb_next(node); if (!node) @@ -2912,6 +2918,46 @@ out: return sector; } +/* + * helper function for fiemap, which doesn't want to see any holes. + * This maps until we find something past 'last' + */ +static struct extent_map *get_extent_skip_holes(struct inode *inode, + u64 offset, + u64 last, + get_extent_t *get_extent) +{ + u64 sectorsize = BTRFS_I(inode)->root->sectorsize; + struct extent_map *em; + u64 len; + + if (offset >= last) + return NULL; + + while(1) { + len = last - offset; + if (len == 0) + break; + len = (len + sectorsize - 1) & ~(sectorsize - 1); + em = get_extent(inode, NULL, 0, offset, len, 0); + if (!em || IS_ERR(em)) + return em; + + /* if this isn't a hole return it */ + if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && + em->block_start != EXTENT_MAP_HOLE) { + return em; + } + + /* this is a hole, advance to the next extent */ + offset = extent_map_end(em); + free_extent_map(em); + if (offset >= last) + break; + } + return NULL; +} + int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent) { @@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u32 flags = 0; u32 found_type; u64 last; + u64 last_for_get_extent = 0; u64 disko = 0; + u64 isize = i_size_read(inode); struct btrfs_key found_key; struct extent_map *em = NULL; struct extent_state *cached_state = NULL; struct btrfs_path *path; struct btrfs_file_extent_item *item; int end = 0; - u64 em_start = 0, em_len = 0; + u64 em_start = 0; + u64 em_len = 0; + u64 em_end = 0; unsigned long emflags; - int hole = 0; if (len == 0) return -EINVAL; @@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return -ENOMEM; path->leave_spinning = 1; + /* + * lookup the last file extent. We're not using i_size here + * because there might be preallocation past i_size + */ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, path, inode->i_ino, -1, 0); if (ret < 0) { @@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); found_type = btrfs_key_type(&found_key); - /* No extents, just return */ + /* No extents, but there might be delalloc bits */ if (found_key.objectid != inode->i_ino || found_type != BTRFS_EXTENT_DATA_KEY) { - btrfs_free_path(path); - return 0; + /* have to trust i_size as the end */ + last = (u64)-1; + last_for_get_extent = isize; + } else { + /* + * remember the start of the last extent. There are a + * bunch of different factors that go into the length of the + * extent, so its much less complex to remember where it started + */ + last = found_key.offset; + last_for_get_extent = last + 1; } - last = found_key.offset; btrfs_free_path(path); + /* + * we might have some extents allocated but more delalloc past those + * extents. so, we trust isize unless the start of the last extent is + * beyond isize + */ + if (last < isize) { + last = (u64)-1; + last_for_get_extent = isize; + } + lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, &cached_state, GFP_NOFS); - em = get_extent(inode, NULL, 0, off, max - off, 0); + + em = get_extent_skip_holes(inode, off, last_for_get_extent, + get_extent); if (!em) goto out; if (IS_ERR(em)) { @@ -2973,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } while (!end) { - hole = 0; - off = em->start + em->len; + off = extent_map_end(em); if (off >= max) end = 1; - if (em->block_start == EXTENT_MAP_HOLE) { - hole = 1; - goto next; - } - em_start = em->start; em_len = em->len; - + em_end = extent_map_end(em); + emflags = em->flags; disko = 0; flags = 0; @@ -3004,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) flags |= FIEMAP_EXTENT_ENCODED; -next: - emflags = em->flags; free_extent_map(em); em = NULL; - if (!end) { - em = get_extent(inode, NULL, 0, off, max - off, 0); - if (!em) - goto out; - if (IS_ERR(em)) { - ret = PTR_ERR(em); - goto out; - } - emflags = em->flags; - } - - if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { + if ((em_start >= last) || em_len == (u64)-1 || + (last == (u64)-1 && isize <= em_end)) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - if (em_start == last) { + /* now scan forward to see if this is really the last extent. */ + em = get_extent_skip_holes(inode, off, last_for_get_extent, + get_extent); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto out; + } + if (!em) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - - if (!hole) { - ret = fiemap_fill_next_extent(fieinfo, em_start, disko, - em_len, flags); - if (ret) - goto out_free; - } + ret = fiemap_fill_next_extent(fieinfo, em_start, disko, + em_len, flags); + if (ret) + goto out_free; } out_free: free_extent_map(em); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7083cfafd061..9318dfefd59c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -191,7 +191,7 @@ void extent_io_exit(void); u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, - u64 max_bytes, unsigned long bits); + u64 max_bytes, unsigned long bits, int contig); void free_extent_state(struct extent_state *state); int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8d392ed73d57..44b926646e33 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start) private = 0; if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, - (u64)-1, 1, EXTENT_DIRTY)) { + (u64)-1, 1, EXTENT_DIRTY, 0)) { ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start, &private_failure); if (ret == 0) { @@ -5282,6 +5282,128 @@ out: return em; } +struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) +{ + struct extent_map *em; + struct extent_map *hole_em = NULL; + u64 range_start = start; + u64 end; + u64 found; + u64 found_end; + int err = 0; + + em = btrfs_get_extent(inode, page, pg_offset, start, len, create); + if (IS_ERR(em)) + return em; + if (em) { + /* + * if our em maps to a hole, there might + * actually be delalloc bytes behind it + */ + if (em->block_start != EXTENT_MAP_HOLE) + return em; + else + hole_em = em; + } + + /* check to see if we've wrapped (len == -1 or similar) */ + end = start + len; + if (end < start) + end = (u64)-1; + else + end -= 1; + + em = NULL; + + /* ok, we didn't find anything, lets look for delalloc */ + found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, + end, len, EXTENT_DELALLOC, 1); + found_end = range_start + found; + if (found_end < range_start) + found_end = (u64)-1; + + /* + * we didn't find anything useful, return + * the original results from get_extent() + */ + if (range_start > end || found_end <= start) { + em = hole_em; + hole_em = NULL; + goto out; + } + + /* adjust the range_start to make sure it doesn't + * go backwards from the start they passed in + */ + range_start = max(start,range_start); + found = found_end - range_start; + + if (found > 0) { + u64 hole_start = start; + u64 hole_len = len; + + em = alloc_extent_map(GFP_NOFS); + if (!em) { + err = -ENOMEM; + goto out; + } + /* + * when btrfs_get_extent can't find anything it + * returns one huge hole + * + * make sure what it found really fits our range, and + * adjust to make sure it is based on the start from + * the caller + */ + if (hole_em) { + u64 calc_end = extent_map_end(hole_em); + + if (calc_end <= start || (hole_em->start > end)) { + free_extent_map(hole_em); + hole_em = NULL; + } else { + hole_start = max(hole_em->start, start); + hole_len = calc_end - hole_start; + } + } + em->bdev = NULL; + if (hole_em && range_start > hole_start) { + /* our hole starts before our delalloc, so we + * have to return just the parts of the hole + * that go until the delalloc starts + */ + em->len = min(hole_len, + range_start - hole_start); + em->start = hole_start; + em->orig_start = hole_start; + /* + * don't adjust block start at all, + * it is fixed at EXTENT_MAP_HOLE + */ + em->block_start = hole_em->block_start; + em->block_len = hole_len; + } else { + em->start = range_start; + em->len = found; + em->orig_start = range_start; + em->block_start = EXTENT_MAP_DELALLOC; + em->block_len = found; + } + } else if (hole_em) { + return hole_em; + } +out: + + free_extent_map(hole_em); + if (err) { + free_extent_map(em); + return ERR_PTR(err); + } + return em; +} + static struct extent_map *btrfs_new_extent_direct(struct inode *inode, u64 start, u64 len) { @@ -6104,7 +6226,7 @@ out: static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { - return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); + return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); } int btrfs_readpage(struct file *file, struct page *page) -- cgit v1.2.2 From ae0e47f02aaedbfdd5e4bec73f79b714d758223d Mon Sep 17 00:00:00 2001 From: "Justin P. Mattock" Date: Tue, 1 Mar 2011 15:06:02 +0100 Subject: Remove one to many n's in a word Signed-off-by: Justin P. Mattock Signed-off-by: Jiri Kosina --- fs/btrfs/disk-io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fdce8799b98d..7b658d2107b4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2489,7 +2489,7 @@ int close_ctree(struct btrfs_root *root) * ERROR state on disk. * * 2. when btrfs flips readonly just in btrfs_commit_super, - * and in such case, btrfs cannnot write sb via btrfs_commit_super, + * and in such case, btrfs cannot write sb via btrfs_commit_super, * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, * btrfs will cleanup all FS resources first and write sb then. */ -- cgit v1.2.2 From b1bf862e9dad431175a1174379476299dbfdc017 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 28 Feb 2011 09:52:08 -0500 Subject: Btrfs: fix regressions in copy_from_user handling Commit 914ee295af418e936ec20a08c1663eaabe4cd07a fixed deadlocks in btrfs_file_write where we would catch page faults on pages we had locked. But, there were a few problems: 1) The x86-32 iov_iter_copy_from_user_atomic code always fails to copy data when the amount to copy is more than 4K and the offset to start copying from is not page aligned. The result was btrfs_file_write looping forever retrying the iov_iter_copy_from_user_atomic We deal with this by changing btrfs_file_write to drop down to single page copies when iov_iter_copy_from_user_atomic starts returning failure. 2) The btrfs_file_write code was leaking delalloc reservations when iov_iter_copy_from_user_atomic returned zero. The looping above would result in the entire filesystem running out of delalloc reservations and constantly trying to flush things to disk. 3) btrfs_file_write will lock down page cache pages, make sure any writeback is finished, do the copy_from_user and then release them. Before the loop runs we check the first and last pages in the write to see if they are only being partially modified. If the start or end of the write isn't aligned, we make sure the corresponding pages are up to date so that we don't introduce garbage into the file. With the copy_from_user changes, we're allowing the VM to reclaim the pages after a partial update from copy_from_user, but we're not making sure the page cache page is up to date when we loop around to resume the write. We deal with this by pushing the up to date checks down into the page prep code. This fits better with how the rest of file_write works. Signed-off-by: Chris Mason Reported-by: Mitch Harder cc: stable@kernel.org --- fs/btrfs/file.c | 101 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 59 insertions(+), 42 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 65338a1d14ad..13664b315fe2 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -761,6 +761,27 @@ out: return 0; } +/* + * on error we return an unlocked page and the error value + * on success we return a locked page and 0 + */ +static int prepare_uptodate_page(struct page *page, u64 pos) +{ + int ret = 0; + + if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { + ret = btrfs_readpage(NULL, page); + if (ret) + return ret; + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + return -EIO; + } + } + return 0; +} + /* * this gets pages into the page cache and locks them down, it also properly * waits for data=ordered extents to finish before allowing the pages to be @@ -776,6 +797,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, unsigned long index = pos >> PAGE_CACHE_SHIFT; struct inode *inode = fdentry(file)->d_inode; int err = 0; + int faili = 0; u64 start_pos; u64 last_pos; @@ -793,15 +815,24 @@ again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { - int c; - for (c = i - 1; c >= 0; c--) { - unlock_page(pages[c]); - page_cache_release(pages[c]); - } - return -ENOMEM; + faili = i - 1; + err = -ENOMEM; + goto fail; + } + + if (i == 0) + err = prepare_uptodate_page(pages[i], pos); + if (i == num_pages - 1) + err = prepare_uptodate_page(pages[i], + pos + write_bytes); + if (err) { + page_cache_release(pages[i]); + faili = i - 1; + goto fail; } wait_on_page_writeback(pages[i]); } + err = 0; if (start_pos < inode->i_size) { struct btrfs_ordered_extent *ordered; lock_extent_bits(&BTRFS_I(inode)->io_tree, @@ -841,6 +872,14 @@ again: WARN_ON(!PageLocked(pages[i])); } return 0; +fail: + while (faili >= 0) { + unlock_page(pages[faili]); + page_cache_release(pages[faili]); + faili--; + } + return err; + } static ssize_t btrfs_file_aio_write(struct kiocb *iocb, @@ -850,7 +889,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, struct file *file = iocb->ki_filp; struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; - struct page *pinned[2]; struct page **pages = NULL; struct iov_iter i; loff_t *ppos = &iocb->ki_pos; @@ -871,9 +909,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || (file->f_flags & O_DIRECT)); - pinned[0] = NULL; - pinned[1] = NULL; - start_pos = pos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); @@ -961,32 +996,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, first_index = pos >> PAGE_CACHE_SHIFT; last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; - /* - * there are lots of better ways to do this, but this code - * makes sure the first and last page in the file range are - * up to date and ready for cow - */ - if ((pos & (PAGE_CACHE_SIZE - 1))) { - pinned[0] = grab_cache_page(inode->i_mapping, first_index); - if (!PageUptodate(pinned[0])) { - ret = btrfs_readpage(NULL, pinned[0]); - BUG_ON(ret); - wait_on_page_locked(pinned[0]); - } else { - unlock_page(pinned[0]); - } - } - if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) { - pinned[1] = grab_cache_page(inode->i_mapping, last_index); - if (!PageUptodate(pinned[1])) { - ret = btrfs_readpage(NULL, pinned[1]); - BUG_ON(ret); - wait_on_page_locked(pinned[1]); - } else { - unlock_page(pinned[1]); - } - } - while (iov_iter_count(&i) > 0) { size_t offset = pos & (PAGE_CACHE_SIZE - 1); size_t write_bytes = min(iov_iter_count(&i), @@ -1023,8 +1032,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, copied = btrfs_copy_from_user(pos, num_pages, write_bytes, pages, &i); - dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + + /* + * if we have trouble faulting in the pages, fall + * back to one page at a time + */ + if (copied < write_bytes) + nrptrs = 1; + + if (copied == 0) + dirty_pages = 0; + else + dirty_pages = (copied + offset + + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; if (num_pages > dirty_pages) { if (copied > 0) @@ -1068,10 +1089,6 @@ out: err = ret; kfree(pages); - if (pinned[0]) - page_cache_release(pinned[0]); - if (pinned[1]) - page_cache_release(pinned[1]); *ppos = pos; /* -- cgit v1.2.2 From 31339acd07b4ba687906702085127895a56eb920 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 7 Mar 2011 11:10:24 -0500 Subject: Btrfs: deal with short returns from copy_from_user When copy_from_user is only able to copy some of the bytes we requested, we may end up creating a partially up to date page. To avoid garbage in the page, we need to treat a partial copy as a zero length copy. This makes the rest of the file_write code drop the page and retry the whole copy instead of marking the partially up to date page as dirty. Signed-off-by: Chris Mason cc: stable@kernel.org --- fs/btrfs/file.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 13664b315fe2..ab22ca4f237f 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -69,6 +69,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, /* Flush processor's dcache for this page */ flush_dcache_page(page); + + /* + * if we get a partial write, we can end up with + * partially up to date pages. These add + * a lot of complexity, so make sure they don't + * happen by forcing this copy to be retried. + * + * The rest of the btrfs_file_write code will fall + * back to page at a time copies after we return 0. + */ + if (!PageUptodate(page) && copied < count) + copied = 0; + iov_iter_advance(i, copied); write_bytes -= copied; total_copied += copied; -- cgit v1.2.2 From ea8efc74bd0402b4d5f663d007b4e25fa29ea778 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 8 Mar 2011 11:54:40 -0500 Subject: Btrfs: make sure not to return overlapping extents to fiemap The btrfs fiemap code was incorrectly returning duplicate or overlapping extents in some cases. cp was blindly trusting this result and we would end up with a destination file that was bigger than the original because some bytes were copied twice. The fix here adjusts our offsets to make sure we're always moving forward in the fiemap results. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ff45b80d90f0..9fcb5ede6b72 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3046,17 +3046,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } while (!end) { - off = extent_map_end(em); - if (off >= max) - end = 1; + u64 offset_in_extent; + + /* break if the extent we found is outside the range */ + if (em->start >= max || extent_map_end(em) < off) + break; + + /* + * get_extent may return an extent that starts before our + * requested range. We have to make sure the ranges + * we return to fiemap always move forward and don't + * overlap, so adjust the offsets here + */ + em_start = max(em->start, off); - em_start = em->start; - em_len = em->len; + /* + * record the offset from the start of the extent + * for adjusting the disk offset below + */ + offset_in_extent = em_start - em->start; em_end = extent_map_end(em); + em_len = em_end - em_start; emflags = em->flags; disko = 0; flags = 0; + /* + * bump off for our next call to get_extent + */ + off = extent_map_end(em); + if (off >= max) + end = 1; + if (em->block_start == EXTENT_MAP_LAST_BYTE) { end = 1; flags |= FIEMAP_EXTENT_LAST; @@ -3067,7 +3088,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, flags |= (FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN); } else { - disko = em->block_start; + disko = em->block_start + offset_in_extent; } if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) flags |= FIEMAP_EXTENT_ENCODED; -- cgit v1.2.2 From 7eaceaccab5f40bbfda044629a6298616aeaed50 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Mar 2011 08:52:07 +0100 Subject: block: remove per-queue plugging Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe --- fs/btrfs/disk-io.c | 79 ----------------------------------------------- fs/btrfs/inode.c | 1 - fs/btrfs/volumes.c | 91 +++++++----------------------------------------------- 3 files changed, 11 insertions(+), 160 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e1aa8d607bc7..ada1f6bd0a57 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = { .writepages = btree_writepages, .releasepage = btree_releasepage, .invalidatepage = btree_invalidatepage, - .sync_page = block_sync_page, #ifdef CONFIG_MIGRATION .migratepage = btree_migratepage, #endif @@ -1330,82 +1329,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) return ret; } -/* - * this unplugs every device on the box, and it is only used when page - * is null - */ -static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) -{ - struct btrfs_device *device; - struct btrfs_fs_info *info; - - info = (struct btrfs_fs_info *)bdi->unplug_io_data; - list_for_each_entry(device, &info->fs_devices->devices, dev_list) { - if (!device->bdev) - continue; - - bdi = blk_get_backing_dev_info(device->bdev); - if (bdi->unplug_io_fn) - bdi->unplug_io_fn(bdi, page); - } -} - -static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) -{ - struct inode *inode; - struct extent_map_tree *em_tree; - struct extent_map *em; - struct address_space *mapping; - u64 offset; - - /* the generic O_DIRECT read code does this */ - if (1 || !page) { - __unplug_io_fn(bdi, page); - return; - } - - /* - * page->mapping may change at any time. Get a consistent copy - * and use that for everything below - */ - smp_mb(); - mapping = page->mapping; - if (!mapping) - return; - - inode = mapping->host; - - /* - * don't do the expensive searching for a small number of - * devices - */ - if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) { - __unplug_io_fn(bdi, page); - return; - } - - offset = page_offset(page); - - em_tree = &BTRFS_I(inode)->extent_tree; - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); - read_unlock(&em_tree->lock); - if (!em) { - __unplug_io_fn(bdi, page); - return; - } - - if (em->block_start >= EXTENT_MAP_LAST_BYTE) { - free_extent_map(em); - __unplug_io_fn(bdi, page); - return; - } - offset = offset - em->start; - btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree, - em->block_start + offset, page); - free_extent_map(em); -} - /* * If this fails, caller must call bdi_destroy() to get rid of the * bdi again. @@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) return err; bdi->ra_pages = default_backing_dev_info.ra_pages; - bdi->unplug_io_fn = btrfs_unplug_io_fn; - bdi->unplug_io_data = info; bdi->congested_fn = btrfs_congested_fn; bdi->congested_data = info; return 0; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fb9bd7832b6d..462e08e724b0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7218,7 +7218,6 @@ static const struct address_space_operations btrfs_aops = { .writepage = btrfs_writepage, .writepages = btrfs_writepages, .readpages = btrfs_readpages, - .sync_page = block_sync_page, .direct_IO = btrfs_direct_IO, .invalidatepage = btrfs_invalidatepage, .releasepage = btrfs_releasepage, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index af7dbca15276..6e0e82a1b188 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) struct bio *cur; int again = 0; unsigned long num_run; - unsigned long num_sync_run; unsigned long batch_run = 0; unsigned long limit; unsigned long last_waited = 0; @@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; - /* we want to make sure that every time we switch from the sync - * list to the normal list, we unplug - */ - num_sync_run = 0; - loop: spin_lock(&device->io_lock); @@ -223,15 +217,6 @@ loop_lock: spin_unlock(&device->io_lock); - /* - * if we're doing the regular priority list, make sure we unplug - * for any high prio bios we've sent down - */ - if (pending_bios == &device->pending_bios && num_sync_run > 0) { - num_sync_run = 0; - blk_run_backing_dev(bdi, NULL); - } - while (pending) { rmb(); @@ -259,19 +244,11 @@ loop_lock: BUG_ON(atomic_read(&cur->bi_cnt) == 0); - if (cur->bi_rw & REQ_SYNC) - num_sync_run++; - submit_bio(cur->bi_rw, cur); num_run++; batch_run++; - if (need_resched()) { - if (num_sync_run) { - blk_run_backing_dev(bdi, NULL); - num_sync_run = 0; - } + if (need_resched()) cond_resched(); - } /* * we made progress, there is more work to do and the bdi @@ -304,13 +281,8 @@ loop_lock: * against it before looping */ last_waited = ioc->last_waited; - if (need_resched()) { - if (num_sync_run) { - blk_run_backing_dev(bdi, NULL); - num_sync_run = 0; - } + if (need_resched()) cond_resched(); - } continue; } spin_lock(&device->io_lock); @@ -323,22 +295,6 @@ loop_lock: } } - if (num_sync_run) { - num_sync_run = 0; - blk_run_backing_dev(bdi, NULL); - } - /* - * IO has already been through a long path to get here. Checksumming, - * async helper threads, perhaps compression. We've done a pretty - * good job of collecting a batch of IO and should just unplug - * the device right away. - * - * This will help anyone who is waiting on the IO, they might have - * already unplugged, but managed to do so before the bio they - * cared about found its way down here. - */ - blk_run_backing_dev(bdi, NULL); - cond_resched(); if (again) goto loop; @@ -2948,7 +2904,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num, static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, u64 logical, u64 *length, struct btrfs_multi_bio **multi_ret, - int mirror_num, struct page *unplug_page) + int mirror_num) { struct extent_map *em; struct map_lookup *map; @@ -2980,11 +2936,6 @@ again: em = lookup_extent_mapping(em_tree, logical, *length); read_unlock(&em_tree->lock); - if (!em && unplug_page) { - kfree(multi); - return 0; - } - if (!em) { printk(KERN_CRIT "unable to find logical %llu len %llu\n", (unsigned long long)logical, @@ -3040,13 +2991,13 @@ again: *length = em->len - offset; } - if (!multi_ret && !unplug_page) + if (!multi_ret) goto out; num_stripes = 1; stripe_index = 0; if (map->type & BTRFS_BLOCK_GROUP_RAID1) { - if (unplug_page || (rw & REQ_WRITE)) + if (rw & REQ_WRITE) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; @@ -3068,7 +3019,7 @@ again: stripe_index = do_div(stripe_nr, factor); stripe_index *= map->sub_stripes; - if (unplug_page || (rw & REQ_WRITE)) + if (rw & REQ_WRITE) num_stripes = map->sub_stripes; else if (mirror_num) stripe_index += mirror_num - 1; @@ -3088,22 +3039,10 @@ again: BUG_ON(stripe_index >= map->num_stripes); for (i = 0; i < num_stripes; i++) { - if (unplug_page) { - struct btrfs_device *device; - struct backing_dev_info *bdi; - - device = map->stripes[stripe_index].dev; - if (device->bdev) { - bdi = blk_get_backing_dev_info(device->bdev); - if (bdi->unplug_io_fn) - bdi->unplug_io_fn(bdi, unplug_page); - } - } else { - multi->stripes[i].physical = - map->stripes[stripe_index].physical + - stripe_offset + stripe_nr * map->stripe_len; - multi->stripes[i].dev = map->stripes[stripe_index].dev; - } + multi->stripes[i].physical = + map->stripes[stripe_index].physical + + stripe_offset + stripe_nr * map->stripe_len; + multi->stripes[i].dev = map->stripes[stripe_index].dev; stripe_index++; } if (multi_ret) { @@ -3121,7 +3060,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, struct btrfs_multi_bio **multi_ret, int mirror_num) { return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, - mirror_num, NULL); + mirror_num); } int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, @@ -3189,14 +3128,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, return 0; } -int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, - u64 logical, struct page *page) -{ - u64 length = PAGE_CACHE_SIZE; - return __btrfs_map_block(map_tree, READ, logical, &length, - NULL, 0, page); -} - static void end_bio_multi_stripe(struct bio *bio, int err) { struct btrfs_multi_bio *multi = bio->bi_private; -- cgit v1.2.2 From 721a9602e6607417c6bc15b18e97a2f35266c690 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 9 Mar 2011 11:56:30 +0100 Subject: block: kill off REQ_UNPLUG With the plugging now being explicitly controlled by the submitter, callers need not pass down unplugging hints to the block layer. If they want to unplug, it's because they manually plugged on their own - in which case, they should just unplug at will. Signed-off-by: Jens Axboe --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 92ac5192c518..b76f7cd47401 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2182,7 +2182,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unsigned long nr_written = 0; if (wbc->sync_mode == WB_SYNC_ALL) - write_flags = WRITE_SYNC_PLUG; + write_flags = WRITE_SYNC; else write_flags = WRITE; -- cgit v1.2.2 From b4966b7770349deb05e3dd2bd2c65d2d044abbbb Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Wed, 9 Mar 2011 16:46:42 +0000 Subject: btrfs: fix dip leak The btrfs DIO code leaks dip structs when dip->csums allocation fails; bio->bi_end_io isn't set at the point where the free_ordered branch is consequently taken, thus bio_endio doesn't call the function which would free it in the normal case. Fix. Signed-off-by: Daniel J Blueman Acked-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 44b926646e33..e7a8303328b2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6058,6 +6058,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, if (!skip_sum) { dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); if (!dip->csums) { + kfree(dip); ret = -ENOMEM; goto free_ordered; } -- cgit v1.2.2 From 7e6b6465e6efbca3985258996be9c189da96c8bf Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Fri, 18 Feb 2011 09:21:17 +0000 Subject: btrfs: fix not enough reserved space btrfs_link() will insert 3 items(inode ref, dir name item and dir index item) into the b+ tree and update 2 items(its inode, and parent's inode) in the b+ tree. So we should reserve space for these 5 items, not 3 items. Reported-by: Tsutomu Itoh Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e7a8303328b2..db67821ccac2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4823,10 +4823,11 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, goto fail; /* - * 1 item for inode ref + * 2 items for inode and inode ref * 2 items for dir items + * 1 item for parent inode */ - trans = btrfs_start_transaction(root, 3); + trans = btrfs_start_transaction(root, 5); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto fail; -- cgit v1.2.2 From 36e39c40b3facc9b489a13f1d301fc53ff6960a3 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sat, 12 Mar 2011 07:08:42 -0500 Subject: Btrfs: break out of shrink_delalloc earlier Josef had changed shrink_delalloc to exit after three shrink attempts, which wasn't quite enough because new writers could race in and steal free space. But it also fixed deadlocks and stalls as we tried to recover delalloc reservations. The code was tweaked to loop 1024 times, and would reset the counter any time a small amount of progress was made. This was too drastic, and with a lot of writers we can end up stuck in shrink_delalloc forever. The shrink_delalloc loop is fairly complex because the caller is looping too, and the caller will go ahead and force a transaction commit to make sure we reclaim space. This reworks things to exit shrink_delalloc when we've forced some writeback and the delalloc reservations have gone down. This means the writeback has not just started but has also finished at least some of the metadata changes required to reclaim delalloc space. If we've got this wrong, we're returning ENOSPC too early, which is a big improvement over the current behavior of hanging the machine. Test 224 in xfstests hammers on this nicely, and with 1000 writers trying to fill a 1GB drive we get our first ENOSPC at 93% full. The other writers are able to continue until we get 100%. This is a worst case test for btrfs because the 1000 writers are doing small IO, and the small FS size means we don't have a lot of room for metadata chunks. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 9 +++++++++ fs/btrfs/extent-tree.c | 35 +++++++++++++++++++++++------------ 2 files changed, 32 insertions(+), 12 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 28188a786da0..8b4b9d158a0a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -729,6 +729,15 @@ struct btrfs_space_info { u64 disk_total; /* total bytes on disk, takes mirrors into account */ + /* + * we bump reservation progress every time we decrement + * bytes_reserved. This way people waiting for reservations + * know something good has happened and they can check + * for progress. The number here isn't to be trusted, it + * just shows reclaim activity + */ + unsigned long reservation_progress; + int full; /* indicates that we cannot allocate any more chunks for this space */ int force_alloc; /* set if we need to force a chunk alloc for diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 100e409e9053..f1db57d4a016 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3343,15 +3343,16 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 max_reclaim; u64 reclaimed = 0; long time_left; - int pause = 1; int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; int loops = 0; + unsigned long progress; block_rsv = &root->fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; smp_mb(); reserved = space_info->bytes_reserved; + progress = space_info->reservation_progress; if (reserved == 0) return 0; @@ -3366,31 +3367,36 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); spin_lock(&space_info->lock); - if (reserved > space_info->bytes_reserved) { - loops = 0; + if (reserved > space_info->bytes_reserved) reclaimed += reserved - space_info->bytes_reserved; - } else { - loops++; - } reserved = space_info->bytes_reserved; spin_unlock(&space_info->lock); + loops++; + if (reserved == 0 || reclaimed >= max_reclaim) break; if (trans && trans->transaction->blocked) return -EAGAIN; - __set_current_state(TASK_INTERRUPTIBLE); - time_left = schedule_timeout(pause); + time_left = schedule_timeout_interruptible(1); /* We were interrupted, exit */ if (time_left) break; - pause <<= 1; - if (pause > HZ / 10) - pause = HZ / 10; + /* we've kicked the IO a few times, if anything has been freed, + * exit. There is no sense in looping here for a long time + * when we really need to commit the transaction, or there are + * just too many writers without enough free space + */ + + if (loops > 3) { + smp_mb(); + if (progress != space_info->reservation_progress) + break; + } } return reclaimed >= to_reclaim; @@ -3613,6 +3619,7 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, if (num_bytes) { spin_lock(&space_info->lock); space_info->bytes_reserved -= num_bytes; + space_info->reservation_progress++; spin_unlock(&space_info->lock); } } @@ -3845,6 +3852,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) if (block_rsv->reserved >= block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; sinfo->bytes_reserved -= num_bytes; + sinfo->reservation_progress++; block_rsv->reserved = block_rsv->size; block_rsv->full = 1; } @@ -4006,7 +4014,6 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) to_reserve = 0; } spin_unlock(&BTRFS_I(inode)->accounting_lock); - to_reserve += calc_csum_metadata_size(inode, num_bytes); ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); if (ret) @@ -4134,6 +4141,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, btrfs_set_block_group_used(&cache->item, old_val); cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; + cache->space_info->reservation_progress++; cache->space_info->bytes_used += num_bytes; cache->space_info->disk_used += num_bytes * factor; spin_unlock(&cache->lock); @@ -4185,6 +4193,7 @@ static int pin_down_extent(struct btrfs_root *root, if (reserved) { cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; + cache->space_info->reservation_progress++; } spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); @@ -4235,6 +4244,7 @@ static int update_reserved_bytes(struct btrfs_block_group_cache *cache, space_info->bytes_readonly += num_bytes; cache->reserved -= num_bytes; space_info->bytes_reserved -= num_bytes; + space_info->reservation_progress++; } spin_unlock(&cache->lock); spin_unlock(&space_info->lock); @@ -4713,6 +4723,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, if (ret) { spin_lock(&cache->space_info->lock); cache->space_info->bytes_reserved -= buf->len; + cache->space_info->reservation_progress++; spin_unlock(&cache->space_info->lock); } goto out; -- cgit v1.2.2 From 5fe0c2378884e68beb532f5890cc0e3539ac747b Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sat, 29 Jan 2011 18:43:25 +0530 Subject: exportfs: Return the minimum required handle size The exportfs encode handle function should return the minimum required handle size. This helps user to find out the handle size by passing 0 handle size in the first step and then redoing to the call again with the returned handle size value. Acked-by: Serge Hallyn Signed-off-by: Aneesh Kumar K.V Signed-off-by: Al Viro --- fs/btrfs/export.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ff27d7a477b2..b4ffad859adb 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -21,9 +21,13 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, int len = *max_len; int type; - if ((len < BTRFS_FID_SIZE_NON_CONNECTABLE) || - (connectable && len < BTRFS_FID_SIZE_CONNECTABLE)) + if (connectable && (len < BTRFS_FID_SIZE_CONNECTABLE)) { + *max_len = BTRFS_FID_SIZE_CONNECTABLE; return 255; + } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) { + *max_len = BTRFS_FID_SIZE_NON_CONNECTABLE; + return 255; + } len = BTRFS_FID_SIZE_NON_CONNECTABLE; type = FILEID_BTRFS_WITHOUT_PARENT; -- cgit v1.2.2 From f17b6042073e7000a90063f7edbca59a5bd1caa2 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sat, 29 Jan 2011 18:43:30 +0530 Subject: fs: Remove i_nlink check from file system link callback Now that VFS check for inode->i_nlink == 0 and returns proper error, remove similar check from file system Signed-off-by: Aneesh Kumar K.V Signed-off-by: Al Viro --- fs/btrfs/inode.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0efdb65953c5..c23f050f47c2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4806,9 +4806,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, int err; int drop_inode = 0; - if (inode->i_nlink == 0) - return -ENOENT; - /* do not allow sys_link's with other subvols of the same device */ if (root->objectid != BTRFS_I(inode)->root->objectid) return -EPERM; -- cgit v1.2.2 From 9f570b8d48b6677b5557d86fb3ca148215e295f2 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 25 Jan 2011 12:42:37 -0500 Subject: Btrfs: fix formatting in file.c Sorry, but these were bugging me. Just cleanup some of the formatting in file.c. Signed-off-by: Josef Bacik --- fs/btrfs/file.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f447b783bb84..4d4975592668 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -88,9 +88,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, total_copied += copied; /* Return to btrfs_file_aio_write to fault page */ - if (unlikely(copied == 0)) { + if (unlikely(copied == 0)) break; - } if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { offset += copied; @@ -162,13 +161,14 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, ClearPageChecked(p); set_page_dirty(p); } - if (end_pos > isize) { + + /* + * we've only changed i_size in ram, and we haven't updated + * the disk i_size. There is no need to log the inode + * at this time. + */ + if (end_pos > isize) i_size_write(inode, end_pos); - /* we've only changed i_size in ram, and we haven't updated - * the disk i_size. There is no need to log the inode - * at this time. - */ - } return 0; } -- cgit v1.2.2 From d0215f3e5ebb5803cd6ec067b10c5e00a3ad7cfc Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 25 Jan 2011 14:57:24 -0500 Subject: Btrfs: simplify our write path Our aio_write function is huge and kind of hard to follow at times. So this patch fixes this by breaking out the buffered and direct write paths out into seperate functions so it's a little clearer what's going on. I've also fixed some wrong typing that we had and added the ability to handle getting an error back from btrfs_set_extent_delalloc. Tested this with xfstests and everything came out fine. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/file.c | 355 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 180 insertions(+), 175 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 4d4975592668..f2a80e570a6c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -45,14 +45,14 @@ * and be replaced with calls into generic code. */ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, - int write_bytes, + size_t write_bytes, struct page **prepared_pages, struct iov_iter *i) { size_t copied = 0; + size_t total_copied = 0; int pg = 0; int offset = pos & (PAGE_CACHE_SIZE - 1); - int total_copied = 0; while (write_bytes > 0) { size_t count = min_t(size_t, @@ -129,13 +129,12 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) * this also makes the decision about creating an inline extent vs * doing real data extents, marking pages dirty and delalloc as required. */ -static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct file *file, - struct page **pages, - size_t num_pages, - loff_t pos, - size_t write_bytes) +static noinline int dirty_and_release_pages(struct btrfs_root *root, + struct file *file, + struct page **pages, + size_t num_pages, + loff_t pos, + size_t write_bytes) { int err = 0; int i; @@ -153,7 +152,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, end_of_last_block = start_pos + num_bytes - 1; err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, NULL); - BUG_ON(err); + if (err) + return err; for (i = 0; i < num_pages; i++) { struct page *p = pages[i]; @@ -896,127 +896,38 @@ fail: } -static ssize_t btrfs_file_aio_write(struct kiocb *iocb, - const struct iovec *iov, - unsigned long nr_segs, loff_t pos) +static noinline ssize_t __btrfs_buffered_write(struct file *file, + struct iov_iter *i, + loff_t pos) { - struct file *file = iocb->ki_filp; struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; struct page **pages = NULL; - struct iov_iter i; - loff_t *ppos = &iocb->ki_pos; - loff_t start_pos; - ssize_t num_written = 0; - ssize_t err = 0; - size_t count; - size_t ocount; - int ret = 0; - int nrptrs; unsigned long first_index; unsigned long last_index; - int will_write; - int buffered = 0; - int copied = 0; - int dirty_pages = 0; - - will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || - (file->f_flags & O_DIRECT)); - - start_pos = pos; - - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - - mutex_lock(&inode->i_mutex); - - err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); - if (err) - goto out; - count = ocount; - - current->backing_dev_info = inode->i_mapping->backing_dev_info; - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); - if (err) - goto out; - - if (count == 0) - goto out; - - err = file_remove_suid(file); - if (err) - goto out; - - /* - * If BTRFS flips readonly due to some impossible error - * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), - * although we have opened a file as writable, we have - * to stop this write operation to ensure FS consistency. - */ - if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { - err = -EROFS; - goto out; - } - - file_update_time(file); - BTRFS_I(inode)->sequence++; - - if (unlikely(file->f_flags & O_DIRECT)) { - num_written = generic_file_direct_write(iocb, iov, &nr_segs, - pos, ppos, count, - ocount); - /* - * the generic O_DIRECT will update in-memory i_size after the - * DIOs are done. But our endio handlers that update the on - * disk i_size never update past the in memory i_size. So we - * need one more update here to catch any additions to the - * file - */ - if (inode->i_size != BTRFS_I(inode)->disk_i_size) { - btrfs_ordered_update_i_size(inode, inode->i_size, NULL); - mark_inode_dirty(inode); - } - - if (num_written < 0) { - ret = num_written; - num_written = 0; - goto out; - } else if (num_written == count) { - /* pick up pos changes done by the generic code */ - pos = *ppos; - goto out; - } - /* - * We are going to do buffered for the rest of the range, so we - * need to make sure to invalidate the buffered pages when we're - * done. - */ - buffered = 1; - pos += num_written; - } + size_t num_written = 0; + int nrptrs; + int ret; - iov_iter_init(&i, iov, nr_segs, count, num_written); - nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) / + nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / (sizeof(struct page *))); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); - if (!pages) { - ret = -ENOMEM; - goto out; - } - - /* generic_write_checks can change our pos */ - start_pos = pos; + if (!pages) + return -ENOMEM; first_index = pos >> PAGE_CACHE_SHIFT; - last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; + last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT; - while (iov_iter_count(&i) > 0) { + while (iov_iter_count(i) > 0) { size_t offset = pos & (PAGE_CACHE_SIZE - 1); - size_t write_bytes = min(iov_iter_count(&i), + size_t write_bytes = min(iov_iter_count(i), nrptrs * (size_t)PAGE_CACHE_SIZE - offset); size_t num_pages = (write_bytes + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + size_t dirty_pages; + size_t copied; WARN_ON(num_pages > nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs); @@ -1025,15 +936,15 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, * Fault pages before locking them in prepare_pages * to avoid recursive lock */ - if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) { + if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { ret = -EFAULT; - goto out; + break; } ret = btrfs_delalloc_reserve_space(inode, num_pages << PAGE_CACHE_SHIFT); if (ret) - goto out; + break; ret = prepare_pages(root, file, pages, num_pages, pos, first_index, last_index, @@ -1041,11 +952,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, if (ret) { btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT); - goto out; + break; } copied = btrfs_copy_from_user(pos, num_pages, - write_bytes, pages, &i); + write_bytes, pages, i); /* * if we have trouble faulting in the pages, fall @@ -1061,6 +972,13 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + /* + * If we had a short copy we need to release the excess delaloc + * bytes we reserved. We need to increment outstanding_extents + * because btrfs_delalloc_release_space will decrement it, but + * we still have an outstanding extent for the chunk we actually + * managed to copy. + */ if (num_pages > dirty_pages) { if (copied > 0) atomic_inc( @@ -1071,39 +989,157 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, } if (copied > 0) { - dirty_and_release_pages(NULL, root, file, pages, - dirty_pages, pos, copied); + ret = dirty_and_release_pages(root, file, pages, + dirty_pages, pos, + copied); + if (ret) { + btrfs_delalloc_release_space(inode, + dirty_pages << PAGE_CACHE_SHIFT); + btrfs_drop_pages(pages, num_pages); + break; + } } btrfs_drop_pages(pages, num_pages); - if (copied > 0) { - if (will_write) { - filemap_fdatawrite_range(inode->i_mapping, pos, - pos + copied - 1); - } else { - balance_dirty_pages_ratelimited_nr( - inode->i_mapping, - dirty_pages); - if (dirty_pages < - (root->leafsize >> PAGE_CACHE_SHIFT) + 1) - btrfs_btree_balance_dirty(root, 1); - btrfs_throttle(root); - } - } + cond_resched(); + + balance_dirty_pages_ratelimited_nr(inode->i_mapping, + dirty_pages); + if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) + btrfs_btree_balance_dirty(root, 1); + btrfs_throttle(root); pos += copied; num_written += copied; + } - cond_resched(); + kfree(pages); + + return num_written ? num_written : ret; +} + +static ssize_t __btrfs_direct_write(struct kiocb *iocb, + const struct iovec *iov, + unsigned long nr_segs, loff_t pos, + loff_t *ppos, size_t count, size_t ocount) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = fdentry(file)->d_inode; + struct iov_iter i; + ssize_t written; + ssize_t written_buffered; + loff_t endbyte; + int err; + + written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, + count, ocount); + + /* + * the generic O_DIRECT will update in-memory i_size after the + * DIOs are done. But our endio handlers that update the on + * disk i_size never update past the in memory i_size. So we + * need one more update here to catch any additions to the + * file + */ + if (inode->i_size != BTRFS_I(inode)->disk_i_size) { + btrfs_ordered_update_i_size(inode, inode->i_size, NULL); + mark_inode_dirty(inode); } + + if (written < 0 || written == count) + return written; + + pos += written; + count -= written; + iov_iter_init(&i, iov, nr_segs, count, written); + written_buffered = __btrfs_buffered_write(file, &i, pos); + if (written_buffered < 0) { + err = written_buffered; + goto out; + } + endbyte = pos + written_buffered - 1; + err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); + if (err) + goto out; + written += written_buffered; + *ppos = pos + written_buffered; + invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, + endbyte >> PAGE_CACHE_SHIFT); out: - mutex_unlock(&inode->i_mutex); - if (ret) - err = ret; + return written ? written : err; +} - kfree(pages); - *ppos = pos; +static ssize_t btrfs_file_aio_write(struct kiocb *iocb, + const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + loff_t *ppos = &iocb->ki_pos; + ssize_t num_written = 0; + ssize_t err = 0; + size_t count, ocount; + + vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + + mutex_lock(&inode->i_mutex); + + err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); + if (err) { + mutex_unlock(&inode->i_mutex); + goto out; + } + count = ocount; + + current->backing_dev_info = inode->i_mapping->backing_dev_info; + err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); + if (err) { + mutex_unlock(&inode->i_mutex); + goto out; + } + + if (count == 0) { + mutex_unlock(&inode->i_mutex); + goto out; + } + + err = file_remove_suid(file); + if (err) { + mutex_unlock(&inode->i_mutex); + goto out; + } + + /* + * If BTRFS flips readonly due to some impossible error + * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), + * although we have opened a file as writable, we have + * to stop this write operation to ensure FS consistency. + */ + if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + mutex_unlock(&inode->i_mutex); + err = -EROFS; + goto out; + } + + file_update_time(file); + BTRFS_I(inode)->sequence++; + + if (unlikely(file->f_flags & O_DIRECT)) { + num_written = __btrfs_direct_write(iocb, iov, nr_segs, + pos, ppos, count, ocount); + } else { + struct iov_iter i; + + iov_iter_init(&i, iov, nr_segs, count, num_written); + + num_written = __btrfs_buffered_write(file, &i, pos); + if (num_written > 0) + *ppos = pos + num_written; + } + + mutex_unlock(&inode->i_mutex); /* * we want to make sure fsync finds this change @@ -1118,43 +1154,12 @@ out: * one running right now. */ BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; - - if (num_written > 0 && will_write) { - struct btrfs_trans_handle *trans; - - err = btrfs_wait_ordered_range(inode, start_pos, num_written); - if (err) + if (num_written > 0 || num_written == -EIOCBQUEUED) { + err = generic_write_sync(file, pos, num_written); + if (err < 0 && num_written > 0) num_written = err; - - if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) { - num_written = PTR_ERR(trans); - goto done; - } - mutex_lock(&inode->i_mutex); - ret = btrfs_log_dentry_safe(trans, root, - file->f_dentry); - mutex_unlock(&inode->i_mutex); - if (ret == 0) { - ret = btrfs_sync_log(trans, root); - if (ret == 0) - btrfs_end_transaction(trans, root); - else - btrfs_commit_transaction(trans, root); - } else if (ret != BTRFS_NO_LOG_SYNC) { - btrfs_commit_transaction(trans, root); - } else { - btrfs_end_transaction(trans, root); - } - } - if (file->f_flags & O_DIRECT && buffered) { - invalidate_mapping_pages(inode->i_mapping, - start_pos >> PAGE_CACHE_SHIFT, - (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); - } } -done: +out: current->backing_dev_info = NULL; return num_written ? num_written : err; } -- cgit v1.2.2 From 4a64001f0047956e283f7ada9843dfc3f3b5d8c8 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 25 Jan 2011 15:10:08 -0500 Subject: Btrfs: fix how we deal with the pages array in the write path Really we don't need to memset the pages array at all, since we know how many pages we're going to use in the array and pass that around. So don't memset, just trust we're not idiots and we pass num_pages around properly. Signed-off-by: Josef Bacik --- fs/btrfs/file.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f2a80e570a6c..24a19c2743ca 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -108,8 +108,6 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { - if (!pages[i]) - break; /* page checked is some magic around finding pages that * have been modified without going through btrfs_set_page_dirty * clear it here @@ -824,7 +822,6 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, return err; } - memset(pages, 0, num_pages * sizeof(struct page *)); again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); @@ -930,7 +927,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, size_t copied; WARN_ON(num_pages > nrptrs); - memset(pages, 0, sizeof(struct page *) * nrptrs); /* * Fault pages before locking them in prepare_pages @@ -946,6 +942,11 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, if (ret) break; + /* + * This is going to setup the pages array with the number of + * pages we want, so we don't really need to worry about the + * contents of pages from loop to loop + */ ret = prepare_pages(root, file, pages, num_pages, pos, first_index, last_index, write_bytes); -- cgit v1.2.2 From 57a45ced94fe48a701361d64230fc16eefa189dd Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 25 Jan 2011 16:30:38 -0500 Subject: Btrfs: change reserved_extents to an atomic_t We track delayed allocation per inodes via 2 counters, one is outstanding_extents and reserved_extents. Outstanding_extents is already an atomic_t, but reserved_extents is not and is protected by a spinlock. So convert this to an atomic_t and instead of using a spinlock, use atomic_cmpxchg when releasing delalloc bytes. This makes our inode 72 bytes smaller, and reduces locking overhead (albiet it was minimal to begin with). Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/btrfs_inode.h | 3 +-- fs/btrfs/extent-tree.c | 42 ++++++++++++++++++++++++++---------------- fs/btrfs/inode.c | 5 ++--- 3 files changed, 29 insertions(+), 21 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index ccc991c542df..57c3bb2884ce 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -136,9 +136,8 @@ struct btrfs_inode { * items we think we'll end up using, and reserved_extents is the number * of extent items we've reserved metadata for. */ - spinlock_t accounting_lock; atomic_t outstanding_extents; - int reserved_extents; + atomic_t reserved_extents; /* * ordered_data_close is set by truncate when a file that used diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7b3089b5c2df..27376c97d85f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3996,6 +3996,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; u64 to_reserve; int nr_extents; + int reserved_extents; int ret; if (btrfs_transaction_in_commit(root->fs_info)) @@ -4003,25 +4004,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); - spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; - if (nr_extents > BTRFS_I(inode)->reserved_extents) { - nr_extents -= BTRFS_I(inode)->reserved_extents; + reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents); + + if (nr_extents > reserved_extents) { + nr_extents -= reserved_extents; to_reserve = calc_trans_metadata_size(root, nr_extents); } else { nr_extents = 0; to_reserve = 0; } - spin_unlock(&BTRFS_I(inode)->accounting_lock); + to_reserve += calc_csum_metadata_size(inode, num_bytes); ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); if (ret) return ret; - spin_lock(&BTRFS_I(inode)->accounting_lock); - BTRFS_I(inode)->reserved_extents += nr_extents; + atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents); atomic_inc(&BTRFS_I(inode)->outstanding_extents); - spin_unlock(&BTRFS_I(inode)->accounting_lock); block_rsv_add_bytes(block_rsv, to_reserve, 1); @@ -4036,20 +4036,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) struct btrfs_root *root = BTRFS_I(inode)->root; u64 to_free; int nr_extents; + int reserved_extents; num_bytes = ALIGN(num_bytes, root->sectorsize); atomic_dec(&BTRFS_I(inode)->outstanding_extents); WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); - spin_lock(&BTRFS_I(inode)->accounting_lock); - nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); - if (nr_extents < BTRFS_I(inode)->reserved_extents) { - nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; - BTRFS_I(inode)->reserved_extents -= nr_extents; - } else { - nr_extents = 0; - } - spin_unlock(&BTRFS_I(inode)->accounting_lock); + reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents); + do { + int old, new; + + nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); + if (nr_extents >= reserved_extents) { + nr_extents = 0; + break; + } + old = reserved_extents; + nr_extents = reserved_extents - nr_extents; + new = reserved_extents - nr_extents; + old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents, + reserved_extents, new); + if (likely(old == reserved_extents)) + break; + reserved_extents = old; + } while (1); to_free = calc_csum_metadata_size(inode, num_bytes); if (nr_extents > 0) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9007bbd01dbf..d97b69afbbfb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6632,9 +6632,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->index_cnt = (u64)-1; ei->last_unlink_trans = 0; - spin_lock_init(&ei->accounting_lock); atomic_set(&ei->outstanding_extents, 0); - ei->reserved_extents = 0; + atomic_set(&ei->reserved_extents, 0); ei->ordered_data_close = 0; ei->orphan_meta_reserved = 0; @@ -6670,7 +6669,7 @@ void btrfs_destroy_inode(struct inode *inode) WARN_ON(!list_empty(&inode->i_dentry)); WARN_ON(inode->i_data.nrpages); WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents)); - WARN_ON(BTRFS_I(inode)->reserved_extents); + WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents)); /* * This can happen where we create an inode, but somebody else also -- cgit v1.2.2 From dc89e9824464e91fa0b06267864ceabe3186fd8b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 28 Jan 2011 17:05:48 -0500 Subject: Btrfs: use a slab for the free space entries Since we alloc/free free space entries a whole lot, lets use a slab to keep track of them. This makes some of my tests slightly faster. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 1 + fs/btrfs/free-space-cache.c | 34 ++++++++++++++++++---------------- fs/btrfs/inode.c | 10 ++++++++++ 3 files changed, 29 insertions(+), 16 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7f78cc78fdd0..2c98d209e6ac 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -40,6 +40,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_transaction_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; +extern struct kmem_cache *btrfs_free_space_cachep; struct btrfs_ordered_sum; #define BTRFS_MAGIC "_BHRfS_M" diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index a0390657451b..0282033041e1 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -393,7 +393,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, break; need_loop = 1; - e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + e = kmem_cache_zalloc(btrfs_free_space_cachep, + GFP_NOFS); if (!e) { kunmap(page); unlock_page(page); @@ -405,7 +406,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, e->bytes = le64_to_cpu(entry->bytes); if (!e->bytes) { kunmap(page); - kfree(e); + kmem_cache_free(btrfs_free_space_cachep, e); unlock_page(page); page_cache_release(page); goto free_cache; @@ -420,7 +421,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); if (!e->bitmap) { kunmap(page); - kfree(e); + kmem_cache_free( + btrfs_free_space_cachep, e); unlock_page(page); page_cache_release(page); goto free_cache; @@ -1187,7 +1189,7 @@ static void free_bitmap(struct btrfs_block_group_cache *block_group, { unlink_free_space(block_group, bitmap_info); kfree(bitmap_info->bitmap); - kfree(bitmap_info); + kmem_cache_free(btrfs_free_space_cachep, bitmap_info); block_group->total_bitmaps--; recalculate_thresholds(block_group); } @@ -1342,8 +1344,8 @@ new_bitmap: /* no pre-allocated info, allocate a new one */ if (!info) { - info = kzalloc(sizeof(struct btrfs_free_space), - GFP_NOFS); + info = kmem_cache_zalloc(btrfs_free_space_cachep, + GFP_NOFS); if (!info) { spin_lock(&block_group->tree_lock); ret = -ENOMEM; @@ -1365,7 +1367,7 @@ out: if (info) { if (info->bitmap) kfree(info->bitmap); - kfree(info); + kmem_cache_free(btrfs_free_space_cachep, info); } return ret; @@ -1398,7 +1400,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, else __unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; - kfree(right_info); + kmem_cache_free(btrfs_free_space_cachep, right_info); merged = true; } @@ -1410,7 +1412,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, __unlink_free_space(block_group, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; - kfree(left_info); + kmem_cache_free(btrfs_free_space_cachep, left_info); merged = true; } @@ -1423,7 +1425,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info; int ret = 0; - info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); if (!info) return -ENOMEM; @@ -1450,7 +1452,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, link: ret = link_free_space(block_group, info); if (ret) - kfree(info); + kmem_cache_free(btrfs_free_space_cachep, info); out: spin_unlock(&block_group->tree_lock); @@ -1520,7 +1522,7 @@ again: kfree(info->bitmap); block_group->total_bitmaps--; } - kfree(info); + kmem_cache_free(btrfs_free_space_cachep, info); goto out_lock; } @@ -1556,7 +1558,7 @@ again: /* the hole we're creating ends at the end * of the info struct, just free the info */ - kfree(info); + kmem_cache_free(btrfs_free_space_cachep, info); } spin_unlock(&block_group->tree_lock); @@ -1689,7 +1691,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) unlink_free_space(block_group, info); if (info->bitmap) kfree(info->bitmap); - kfree(info); + kmem_cache_free(btrfs_free_space_cachep, info); if (need_resched()) { spin_unlock(&block_group->tree_lock); cond_resched(); @@ -1722,7 +1724,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, entry->offset += bytes; entry->bytes -= bytes; if (!entry->bytes) - kfree(entry); + kmem_cache_free(btrfs_free_space_cachep, entry); else link_free_space(block_group, entry); } @@ -1884,7 +1886,7 @@ out: block_group->free_space -= bytes; if (entry->bytes == 0) { block_group->free_extents--; - kfree(entry); + kmem_cache_free(btrfs_free_space_cachep, entry); } spin_unlock(&block_group->tree_lock); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d97b69afbbfb..2d2e079713d7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -50,6 +50,7 @@ #include "tree-log.h" #include "compression.h" #include "locking.h" +#include "free-space-cache.h" struct btrfs_iget_args { u64 ino; @@ -70,6 +71,7 @@ static struct kmem_cache *btrfs_inode_cachep; struct kmem_cache *btrfs_trans_handle_cachep; struct kmem_cache *btrfs_transaction_cachep; struct kmem_cache *btrfs_path_cachep; +struct kmem_cache *btrfs_free_space_cachep; #define S_SHIFT 12 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { @@ -6761,6 +6763,8 @@ void btrfs_destroy_cachep(void) kmem_cache_destroy(btrfs_transaction_cachep); if (btrfs_path_cachep) kmem_cache_destroy(btrfs_path_cachep); + if (btrfs_free_space_cachep) + kmem_cache_destroy(btrfs_free_space_cachep); } int btrfs_init_cachep(void) @@ -6789,6 +6793,12 @@ int btrfs_init_cachep(void) if (!btrfs_path_cachep) goto fail; + btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache", + sizeof(struct btrfs_free_space), 0, + SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); + if (!btrfs_free_space_cachep) + goto fail; + return 0; fail: btrfs_destroy_cachep(); -- cgit v1.2.2 From a41ad394a03b802497958d7c98a9dcf607266645 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 31 Jan 2011 15:30:16 -0500 Subject: Btrfs: convert to the new truncate sequence ->truncate() is going away, instead all of the work needs to be done in ->setattr(). So this converts us over to do this. It's fairly straightforward, just get rid of our .truncate inode operation and call btrfs_truncate() directly from btrfs_setsize. This works out better for us since truncate can technically return ENOSPC, and before we had no way of letting anybody know. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 2 +- fs/btrfs/file.c | 5 ++-- fs/btrfs/inode.c | 80 +++++++++++++++++++++++++------------------------------- 3 files changed, 40 insertions(+), 47 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2c98d209e6ac..34142d5647df 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2537,7 +2537,7 @@ void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending); void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); -int btrfs_cont_expand(struct inode *inode, loff_t size); +int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); int btrfs_invalidate_inodes(struct btrfs_root *root); void btrfs_add_delayed_iput(struct inode *inode); void btrfs_run_delayed_iputs(struct btrfs_root *root); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 24a19c2743ca..3786eca2a905 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -817,7 +817,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; if (start_pos > inode->i_size) { - err = btrfs_cont_expand(inode, start_pos); + err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); if (err) return err; } @@ -1330,7 +1330,8 @@ static long btrfs_fallocate(struct file *file, int mode, goto out; if (alloc_start > inode->i_size) { - ret = btrfs_cont_expand(inode, alloc_start); + ret = btrfs_cont_expand(inode, i_size_read(inode), + alloc_start); if (ret) goto out; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2d2e079713d7..3662ffec17d9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -84,7 +84,8 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, }; -static void btrfs_truncate(struct inode *inode); +static int btrfs_setsize(struct inode *inode, loff_t newsize); +static int btrfs_truncate(struct inode *inode); static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); static noinline int cow_file_range(struct inode *inode, struct page *locked_page, @@ -2371,6 +2372,11 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) /* if we have links, this was a truncate, lets do that */ if (inode->i_nlink) { + if (!S_ISREG(inode->i_mode)) { + WARN_ON(1); + iput(inode); + continue; + } nr_truncate++; btrfs_truncate(inode); } else { @@ -3538,7 +3544,7 @@ out: return ret; } -int btrfs_cont_expand(struct inode *inode, loff_t size) +int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -3546,7 +3552,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) struct extent_map *em = NULL; struct extent_state *cached_state = NULL; u64 mask = root->sectorsize - 1; - u64 hole_start = (inode->i_size + mask) & ~mask; + u64 hole_start = (oldsize + mask) & ~mask; u64 block_end = (size + mask) & ~mask; u64 last_byte; u64 cur_offset; @@ -3617,27 +3623,17 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) return err; } -static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) +static int btrfs_setsize(struct inode *inode, loff_t newsize) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; + loff_t oldsize = i_size_read(inode); unsigned long nr; int ret; - if (attr->ia_size == inode->i_size) + if (newsize == oldsize) return 0; - if (attr->ia_size > inode->i_size) { - unsigned long limit; - limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; - if (attr->ia_size > inode->i_sb->s_maxbytes) - return -EFBIG; - if (limit != RLIM_INFINITY && attr->ia_size > limit) { - send_sig(SIGXFSZ, current, 0); - return -EFBIG; - } - } - trans = btrfs_start_transaction(root, 5); if (IS_ERR(trans)) return PTR_ERR(trans); @@ -3651,16 +3647,16 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); - if (attr->ia_size > inode->i_size) { - ret = btrfs_cont_expand(inode, attr->ia_size); + if (newsize > oldsize) { + i_size_write(inode, newsize); + btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); + truncate_pagecache(inode, oldsize, newsize); + ret = btrfs_cont_expand(inode, oldsize, newsize); if (ret) { - btrfs_truncate(inode); + btrfs_setsize(inode, oldsize); return ret; } - i_size_write(inode, attr->ia_size); - btrfs_ordered_update_i_size(inode, inode->i_size, NULL); - trans = btrfs_start_transaction(root, 0); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); @@ -3676,22 +3672,22 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); - return 0; - } + } else { - /* - * We're truncating a file that used to have good data down to - * zero. Make sure it gets into the ordered flush list so that - * any new writes get down to disk quickly. - */ - if (attr->ia_size == 0) - BTRFS_I(inode)->ordered_data_close = 1; + /* + * We're truncating a file that used to have good data down to + * zero. Make sure it gets into the ordered flush list so that + * any new writes get down to disk quickly. + */ + if (newsize == 0) + BTRFS_I(inode)->ordered_data_close = 1; - /* we don't support swapfiles, so vmtruncate shouldn't fail */ - ret = vmtruncate(inode, attr->ia_size); - BUG_ON(ret); + /* we don't support swapfiles, so vmtruncate shouldn't fail */ + truncate_setsize(inode, newsize); + ret = btrfs_truncate(inode); + } - return 0; + return ret; } static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) @@ -3708,7 +3704,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) return err; if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { - err = btrfs_setattr_size(inode, attr); + err = btrfs_setsize(inode, attr->ia_size); if (err) return err; } @@ -6478,7 +6474,7 @@ out: return ret; } -static void btrfs_truncate(struct inode *inode) +static int btrfs_truncate(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; @@ -6486,14 +6482,9 @@ static void btrfs_truncate(struct inode *inode) unsigned long nr; u64 mask = root->sectorsize - 1; - if (!S_ISREG(inode->i_mode)) { - WARN_ON(1); - return; - } - ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); if (ret) - return; + return ret; btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_ordered_update_i_size(inode, inode->i_size, NULL); @@ -6568,6 +6559,8 @@ static void btrfs_truncate(struct inode *inode) ret = btrfs_end_transaction_throttle(trans, root); BUG_ON(ret); btrfs_btree_balance_dirty(root, nr); + + return ret; } /* @@ -7367,7 +7360,6 @@ static const struct address_space_operations btrfs_symlink_aops = { }; static const struct inode_operations btrfs_file_inode_operations = { - .truncate = btrfs_truncate, .getattr = btrfs_getattr, .setattr = btrfs_setattr, .setxattr = btrfs_setxattr, -- cgit v1.2.2 From 3893e33b0bebee2f67d96b6c15259dc884523c20 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 31 Jan 2011 16:03:11 -0500 Subject: Btrfs: cleanup error handling in the truncate path Now that we can handle having errors in the truncate path lets make sure we return errors instead of doing BUG_ON() and such. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 64 +++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 19 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3662ffec17d9..d83025063ee7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3597,13 +3597,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) err = btrfs_drop_extents(trans, inode, cur_offset, cur_offset + hole_size, &hint_byte, 1); - BUG_ON(err); + if (err) + break; err = btrfs_insert_file_extent(trans, root, inode->i_ino, cur_offset, 0, 0, hole_size, 0, hole_size, 0, 0, 0); - BUG_ON(err); + if (err) + break; btrfs_drop_extent_cache(inode, hole_start, last_byte - 1, 0); @@ -3641,7 +3643,10 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize) btrfs_set_trans_block_group(trans, inode); ret = btrfs_orphan_add(trans, inode); - BUG_ON(ret); + if (ret) { + btrfs_end_transaction(trans, root); + return ret; + } nr = trans->blocks_used; btrfs_end_transaction(trans, root); @@ -3658,17 +3663,24 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize) } trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) + return PTR_ERR(trans); + btrfs_set_trans_block_group(trans, inode); trans->block_rsv = root->orphan_block_rsv; BUG_ON(!trans->block_rsv); + /* + * If this fails just leave the orphan item so that it can get + * cleaned up next time we mount. + */ ret = btrfs_update_inode(trans, root, inode); - BUG_ON(ret); - if (inode->i_nlink > 0) { - ret = btrfs_orphan_del(trans, inode); - BUG_ON(ret); + if (ret) { + btrfs_end_transaction(trans, root); + return ret; } + if (inode->i_nlink > 0) + ret = btrfs_orphan_del(trans, inode); nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); @@ -6478,6 +6490,7 @@ static int btrfs_truncate(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; + int err = 0; struct btrfs_trans_handle *trans; unsigned long nr; u64 mask = root->sectorsize - 1; @@ -6490,7 +6503,8 @@ static int btrfs_truncate(struct inode *inode) btrfs_ordered_update_i_size(inode, inode->i_size, NULL); trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = root->orphan_block_rsv; @@ -6517,29 +6531,38 @@ static int btrfs_truncate(struct inode *inode) while (1) { if (!trans) { trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = root->orphan_block_rsv; } ret = btrfs_block_rsv_check(trans, root, root->orphan_block_rsv, 0, 5); - if (ret) { - BUG_ON(ret != -EAGAIN); + if (ret == -EAGAIN) { ret = btrfs_commit_transaction(trans, root); - BUG_ON(ret); + if (ret) + return ret; trans = NULL; continue; + } else if (ret) { + err = ret; + break; } ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, BTRFS_EXTENT_DATA_KEY); - if (ret != -EAGAIN) + if (ret != -EAGAIN) { + err = ret; break; + } ret = btrfs_update_inode(trans, root, inode); - BUG_ON(ret); + if (ret) { + err = ret; + break; + } nr = trans->blocks_used; btrfs_end_transaction(trans, root); @@ -6549,18 +6572,21 @@ static int btrfs_truncate(struct inode *inode) if (ret == 0 && inode->i_nlink > 0) { ret = btrfs_orphan_del(trans, inode); - BUG_ON(ret); + if (ret) + err = ret; } ret = btrfs_update_inode(trans, root, inode); - BUG_ON(ret); + if (ret && !err) + err = ret; nr = trans->blocks_used; ret = btrfs_end_transaction_throttle(trans, root); - BUG_ON(ret); + if (ret && !err) + err = ret; btrfs_btree_balance_dirty(root, nr); - return ret; + return err; } /* -- cgit v1.2.2 From 66b4ffd110f9b48b8d8c1319ee446b53b8d073bf Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 31 Jan 2011 16:22:42 -0500 Subject: Btrfs: handle errors in btrfs_orphan_cleanup If we cannot truncate an inode for some reason we will never delete the orphan item associated with that inode, which means that we will loop forever in btrfs_orphan_cleanup. Instead of doing this just return error so we fail to mount. It sucks, but hey it's better than hanging. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 15 ++++++++++++--- fs/btrfs/extent-tree.c | 3 ++- fs/btrfs/inode.c | 47 +++++++++++++++++++++++++++++++---------------- fs/btrfs/ioctl.c | 4 +++- fs/btrfs/relocation.c | 2 +- 6 files changed, 50 insertions(+), 23 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 34142d5647df..841330f3d68d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2529,7 +2529,7 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans, struct inode *inode); int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); -void btrfs_orphan_cleanup(struct btrfs_root *root); +int btrfs_orphan_cleanup(struct btrfs_root *root); void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending, u64 *bytes_to_reserve); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e1aa8d607bc7..495b1ac45f8c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2058,9 +2058,14 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!(sb->s_flags & MS_RDONLY)) { down_read(&fs_info->cleanup_work_sem); - btrfs_orphan_cleanup(fs_info->fs_root); - btrfs_orphan_cleanup(fs_info->tree_root); + err = btrfs_orphan_cleanup(fs_info->fs_root); + if (!err) + err = btrfs_orphan_cleanup(fs_info->tree_root); up_read(&fs_info->cleanup_work_sem); + if (err) { + close_ctree(tree_root); + return ERR_PTR(err); + } } return tree_root; @@ -2435,8 +2440,12 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) root_objectid = gang[ret - 1]->root_key.objectid + 1; for (i = 0; i < ret; i++) { + int err; + root_objectid = gang[i]->root_key.objectid; - btrfs_orphan_cleanup(gang[i]); + err = btrfs_orphan_cleanup(gang[i]); + if (err) + return err; } root_objectid++; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 27376c97d85f..a8f4e8d2ba60 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7619,7 +7619,8 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); BUG_ON(!reloc_root); - btrfs_orphan_cleanup(reloc_root); + ret = btrfs_orphan_cleanup(reloc_root); + BUG_ON(ret); return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d83025063ee7..0600265cb9b0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2284,7 +2284,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) * this cleans up any orphans that may be left on the list from the last use * of this root. */ -void btrfs_orphan_cleanup(struct btrfs_root *root) +int btrfs_orphan_cleanup(struct btrfs_root *root) { struct btrfs_path *path; struct extent_buffer *leaf; @@ -2294,10 +2294,13 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) int ret = 0, nr_unlink = 0, nr_truncate = 0; if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) - return; + return 0; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) { + ret = -ENOMEM; + goto out; + } path->reada = -1; key.objectid = BTRFS_ORPHAN_OBJECTID; @@ -2306,11 +2309,8 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) { - printk(KERN_ERR "Error searching slot for orphan: %d" - "\n", ret); - break; - } + if (ret < 0) + goto out; /* * if ret == 0 means we found what we were searching for, which @@ -2318,6 +2318,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) * find the key and see if we have stuff that matches */ if (ret > 0) { + ret = 0; if (path->slots[0] == 0) break; path->slots[0]--; @@ -2345,7 +2346,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) found_key.type = BTRFS_INODE_ITEM_KEY; found_key.offset = 0; inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); - BUG_ON(IS_ERR(inode)); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); + goto out; + } /* * add this inode to the orphan list so btrfs_orphan_del does @@ -2363,7 +2367,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) */ if (is_bad_inode(inode)) { trans = btrfs_start_transaction(root, 0); - BUG_ON(IS_ERR(trans)); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out; + } btrfs_orphan_del(trans, inode); btrfs_end_transaction(trans, root); iput(inode); @@ -2378,16 +2385,16 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) continue; } nr_truncate++; - btrfs_truncate(inode); + ret = btrfs_truncate(inode); } else { nr_unlink++; } /* this will do delete_inode and everything for us */ iput(inode); + if (ret) + goto out; } - btrfs_free_path(path); - root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; if (root->orphan_block_rsv) @@ -2396,14 +2403,20 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) if (root->orphan_block_rsv || root->orphan_item_inserted) { trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - btrfs_end_transaction(trans, root); + if (!IS_ERR(trans)) + btrfs_end_transaction(trans, root); } if (nr_unlink) printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); if (nr_truncate) printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); + +out: + if (ret) + printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret); + btrfs_free_path(path); + return ret; } /* @@ -4156,8 +4169,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) if (!IS_ERR(inode) && root != sub_root) { down_read(&root->fs_info->cleanup_work_sem); if (!(inode->i_sb->s_flags & MS_RDONLY)) - btrfs_orphan_cleanup(sub_root); + ret = btrfs_orphan_cleanup(sub_root); up_read(&root->fs_info->cleanup_work_sem); + if (ret) + inode = ERR_PTR(ret); } return inode; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5fdb2abc4fa7..ad9b8c0e930b 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -409,7 +409,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, if (ret) goto fail; - btrfs_orphan_cleanup(pending_snapshot->snap); + ret = btrfs_orphan_cleanup(pending_snapshot->snap); + if (ret) + goto fail; parent = dget_parent(dentry); inode = btrfs_lookup_dentry(parent->d_inode, dentry); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 31ade5802ae8..c863c8447015 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4209,7 +4209,7 @@ out: if (IS_ERR(fs_root)) err = PTR_ERR(fs_root); else - btrfs_orphan_cleanup(fs_root); + err = btrfs_orphan_cleanup(fs_root); } return err; } -- cgit v1.2.2 From ded5db9de78f963979e1605f859de67626f54693 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 4 Mar 2011 14:09:46 -0500 Subject: Btrfs: make sure to remove the orphan item from the in-memory list This fixes a problem where if truncate fails the inode will still be on the in memory orphan list. This is will make us complain when the inode gets destroyed because it's still on the orphan list. So if we fail just remove us from the in memory list and carry on. Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0600265cb9b0..3bd0ff63bf30 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6589,6 +6589,12 @@ static int btrfs_truncate(struct inode *inode) ret = btrfs_orphan_del(trans, inode); if (ret) err = ret; + } else if (ret && inode->i_nlink > 0) { + /* + * Failed to do the truncate, remove us from the in memory + * orphan list. + */ + ret = btrfs_orphan_del(NULL, inode); } ret = btrfs_update_inode(trans, root, inode); -- cgit v1.2.2 From f0cd846e9221811d87047f1428cf5226e7236efe Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 4 Mar 2011 14:37:08 -0500 Subject: Btrfs: only add orphan items when truncating We don't need an orphan item when expanding files, we just need them for truncating them, so only add the orphan item in btrfs_truncate instead of in btrfs_setsize. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 45 ++++++++++++++++++--------------------------- 1 file changed, 18 insertions(+), 27 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3bd0ff63bf30..206b60362cec 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3649,22 +3649,6 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize) if (newsize == oldsize) return 0; - trans = btrfs_start_transaction(root, 5); - if (IS_ERR(trans)) - return PTR_ERR(trans); - - btrfs_set_trans_block_group(trans, inode); - - ret = btrfs_orphan_add(trans, inode); - if (ret) { - btrfs_end_transaction(trans, root); - return ret; - } - - nr = trans->blocks_used; - btrfs_end_transaction(trans, root); - btrfs_btree_balance_dirty(root, nr); - if (newsize > oldsize) { i_size_write(inode, newsize); btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); @@ -3675,25 +3659,15 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize) return ret; } - trans = btrfs_start_transaction(root, 0); + trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, inode); - trans->block_rsv = root->orphan_block_rsv; - BUG_ON(!trans->block_rsv); - - /* - * If this fails just leave the orphan item so that it can get - * cleaned up next time we mount. - */ ret = btrfs_update_inode(trans, root, inode); if (ret) { btrfs_end_transaction(trans, root); return ret; } - if (inode->i_nlink > 0) - ret = btrfs_orphan_del(trans, inode); nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); @@ -6517,6 +6491,23 @@ static int btrfs_truncate(struct inode *inode) btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_ordered_update_i_size(inode, inode->i_size, NULL); + trans = btrfs_start_transaction(root, 5); + if (IS_ERR(trans)) + return PTR_ERR(trans); + + btrfs_set_trans_block_group(trans, inode); + + ret = btrfs_orphan_add(trans, inode); + if (ret) { + btrfs_end_transaction(trans, root); + return ret; + } + + nr = trans->blocks_used; + btrfs_end_transaction(trans, root); + btrfs_btree_balance_dirty(root, nr); + + /* Now start a transaction for the truncate */ trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) return PTR_ERR(trans); -- cgit v1.2.2 From 930f028abe39dfd0849b53131d19c4b67aacbe67 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 4 Mar 2011 14:41:41 -0500 Subject: Btrfs: use mark_inode_dirty when expanding the file Mark_inode_dirty will call btrfs_dirty_inode which will take care of updating the inode. This makes setsize a little cleaner since we don't have to start a transaction and update the inode in there, we can just call mark_inode_dirty. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 206b60362cec..64d57e032b4e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3640,10 +3640,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) static int btrfs_setsize(struct inode *inode, loff_t newsize) { - struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_trans_handle *trans; loff_t oldsize = i_size_read(inode); - unsigned long nr; int ret; if (newsize == oldsize) @@ -3659,18 +3656,7 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize) return ret; } - trans = btrfs_start_transaction(root, 1); - if (IS_ERR(trans)) - return PTR_ERR(trans); - - ret = btrfs_update_inode(trans, root, inode); - if (ret) { - btrfs_end_transaction(trans, root); - return ret; - } - nr = trans->blocks_used; - btrfs_end_transaction(trans, root); - btrfs_btree_balance_dirty(root, nr); + mark_inode_dirty(inode); } else { /* -- cgit v1.2.2 From 695a0d0da09e75c4475bbb303def159023ef72ca Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 4 Mar 2011 15:46:53 -0500 Subject: Btrfs: add a comment explaining what btrfs_cont_expand does Everytime I have to deal with btrfs_cont_expand I stare at it for 20 minutes trying to remember what exactly it does and why the hell we need it. So add a comment to save future-Josef some time. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 64d57e032b4e..888dbdb3b128 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3557,6 +3557,12 @@ out: return ret; } +/* + * This function puts in dummy file extents for the area we're creating a hole + * for. So if we are truncating this file to a larger size we need to insert + * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for + * the range between oldsize and size + */ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) { struct btrfs_trans_handle *trans; -- cgit v1.2.2 From 850265335f792f5d39ab24e5fb7160bac28d77e5 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 15 Mar 2011 14:52:12 -0400 Subject: Btrfs: return error if the range we want to map is bogus Currently if we have corrupt metadata map_extent_buffer will complain about it, but not return an error so the caller has no idea a problem was hit. Fix this. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent_io.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 714adc4ac4c2..1bbd26b4fc5c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3690,6 +3690,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, "wanted %lu %lu\n", (unsigned long long)eb->start, eb->len, start, min_len); WARN_ON(1); + return -EINVAL; } p = extent_buffer_page(eb, i); -- cgit v1.2.2 From a826d6dcb32d811b4c81df57a5ef1367516586b0 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 16 Mar 2011 13:42:43 -0400 Subject: Btrfs: check items for correctness as we search Currently if we have corrupted items things will blow up in spectacular ways. So as we read in blocks and they are leaves, check the entire leaf to make sure all of the items are correct and point to valid parts in the leaf for the item data the are responsible for. If the item is corrupt we will kick back EIO and not read any of the copies since they are likely to not be correct either. This will catch generic corruptions, it will be up to the individual callers of btrfs_search_slot to make sure their items are right. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 123 ------------------------------------------------- fs/btrfs/disk-io.c | 90 +++++++++++++++++++++++++++++++++++- fs/btrfs/extent-tree.c | 5 ++ fs/btrfs/extent_io.h | 1 + 4 files changed, 95 insertions(+), 124 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index b5baff0dccfe..73e53009e126 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -732,122 +732,6 @@ static inline unsigned int leaf_data_end(struct btrfs_root *root, return btrfs_item_offset_nr(leaf, nr - 1); } -/* - * extra debugging checks to make sure all the items in a key are - * well formed and in the proper order - */ -static int check_node(struct btrfs_root *root, struct btrfs_path *path, - int level) -{ - struct extent_buffer *parent = NULL; - struct extent_buffer *node = path->nodes[level]; - struct btrfs_disk_key parent_key; - struct btrfs_disk_key node_key; - int parent_slot; - int slot; - struct btrfs_key cpukey; - u32 nritems = btrfs_header_nritems(node); - - if (path->nodes[level + 1]) - parent = path->nodes[level + 1]; - - slot = path->slots[level]; - BUG_ON(nritems == 0); - if (parent) { - parent_slot = path->slots[level + 1]; - btrfs_node_key(parent, &parent_key, parent_slot); - btrfs_node_key(node, &node_key, 0); - BUG_ON(memcmp(&parent_key, &node_key, - sizeof(struct btrfs_disk_key))); - BUG_ON(btrfs_node_blockptr(parent, parent_slot) != - btrfs_header_bytenr(node)); - } - BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root)); - if (slot != 0) { - btrfs_node_key_to_cpu(node, &cpukey, slot - 1); - btrfs_node_key(node, &node_key, slot); - BUG_ON(comp_keys(&node_key, &cpukey) <= 0); - } - if (slot < nritems - 1) { - btrfs_node_key_to_cpu(node, &cpukey, slot + 1); - btrfs_node_key(node, &node_key, slot); - BUG_ON(comp_keys(&node_key, &cpukey) >= 0); - } - return 0; -} - -/* - * extra checking to make sure all the items in a leaf are - * well formed and in the proper order - */ -static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, - int level) -{ - struct extent_buffer *leaf = path->nodes[level]; - struct extent_buffer *parent = NULL; - int parent_slot; - struct btrfs_key cpukey; - struct btrfs_disk_key parent_key; - struct btrfs_disk_key leaf_key; - int slot = path->slots[0]; - - u32 nritems = btrfs_header_nritems(leaf); - - if (path->nodes[level + 1]) - parent = path->nodes[level + 1]; - - if (nritems == 0) - return 0; - - if (parent) { - parent_slot = path->slots[level + 1]; - btrfs_node_key(parent, &parent_key, parent_slot); - btrfs_item_key(leaf, &leaf_key, 0); - - BUG_ON(memcmp(&parent_key, &leaf_key, - sizeof(struct btrfs_disk_key))); - BUG_ON(btrfs_node_blockptr(parent, parent_slot) != - btrfs_header_bytenr(leaf)); - } - if (slot != 0 && slot < nritems - 1) { - btrfs_item_key(leaf, &leaf_key, slot); - btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1); - if (comp_keys(&leaf_key, &cpukey) <= 0) { - btrfs_print_leaf(root, leaf); - printk(KERN_CRIT "slot %d offset bad key\n", slot); - BUG_ON(1); - } - if (btrfs_item_offset_nr(leaf, slot - 1) != - btrfs_item_end_nr(leaf, slot)) { - btrfs_print_leaf(root, leaf); - printk(KERN_CRIT "slot %d offset bad\n", slot); - BUG_ON(1); - } - } - if (slot < nritems - 1) { - btrfs_item_key(leaf, &leaf_key, slot); - btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1); - BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0); - if (btrfs_item_offset_nr(leaf, slot) != - btrfs_item_end_nr(leaf, slot + 1)) { - btrfs_print_leaf(root, leaf); - printk(KERN_CRIT "slot %d offset bad\n", slot); - BUG_ON(1); - } - } - BUG_ON(btrfs_item_offset_nr(leaf, 0) + - btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root)); - return 0; -} - -static noinline int check_block(struct btrfs_root *root, - struct btrfs_path *path, int level) -{ - return 0; - if (level == 0) - return check_leaf(root, path, level); - return check_node(root, path, level); -} /* * search for key in the extent_buffer. The items start at offset p, @@ -1188,7 +1072,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, } } /* double check we haven't messed things up */ - check_block(root, path, level); if (orig_ptr != btrfs_node_blockptr(path->nodes[level], path->slots[level])) BUG(); @@ -1798,12 +1681,6 @@ cow_done: if (!cow) btrfs_unlock_up_safe(p, level + 1); - ret = check_block(root, p, level); - if (ret) { - ret = -1; - goto done; - } - ret = bin_search(b, key, level, &slot); if (level != 0) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 495b1ac45f8c..9f31e110b481 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -323,6 +323,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, int num_copies = 0; int mirror_num = 0; + clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; while (1) { ret = read_extent_buffer_pages(io_tree, eb, start, 1, @@ -331,6 +332,14 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, !verify_parent_transid(io_tree, eb, parent_transid)) return ret; + /* + * This buffer's crc is fine, but its contents are corrupted, so + * there is no reason to read the other copies, they won't be + * any less wrong. + */ + if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) + return ret; + num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) @@ -419,6 +428,73 @@ static int check_tree_block_fsid(struct btrfs_root *root, return ret; } +#define CORRUPT(reason, eb, root, slot) \ + printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \ + "root=%llu, slot=%d\n", reason, \ + (unsigned long long)btrfs_header_bytenr(eb), \ + (unsigned long long)root->objectid, slot) + +static noinline int check_leaf(struct btrfs_root *root, + struct extent_buffer *leaf) +{ + struct btrfs_key key; + struct btrfs_key leaf_key; + u32 nritems = btrfs_header_nritems(leaf); + int slot; + + if (nritems == 0) + return 0; + + /* Check the 0 item */ + if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != + BTRFS_LEAF_DATA_SIZE(root)) { + CORRUPT("invalid item offset size pair", leaf, root, 0); + return -EIO; + } + + /* + * Check to make sure each items keys are in the correct order and their + * offsets make sense. We only have to loop through nritems-1 because + * we check the current slot against the next slot, which verifies the + * next slot's offset+size makes sense and that the current's slot + * offset is correct. + */ + for (slot = 0; slot < nritems - 1; slot++) { + btrfs_item_key_to_cpu(leaf, &leaf_key, slot); + btrfs_item_key_to_cpu(leaf, &key, slot + 1); + + /* Make sure the keys are in the right order */ + if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { + CORRUPT("bad key order", leaf, root, slot); + return -EIO; + } + + /* + * Make sure the offset and ends are right, remember that the + * item data starts at the end of the leaf and grows towards the + * front. + */ + if (btrfs_item_offset_nr(leaf, slot) != + btrfs_item_end_nr(leaf, slot + 1)) { + CORRUPT("slot offset bad", leaf, root, slot); + return -EIO; + } + + /* + * Check to make sure that we don't point outside of the leaf, + * just incase all the items are consistent to eachother, but + * all point outside of the leaf. + */ + if (btrfs_item_end_nr(leaf, slot) > + BTRFS_LEAF_DATA_SIZE(root)) { + CORRUPT("slot end outside of leaf", leaf, root, slot); + return -EIO; + } + } + + return 0; +} + #ifdef CONFIG_DEBUG_LOCK_ALLOC void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level) { @@ -485,8 +561,20 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, btrfs_set_buffer_lockdep_class(eb, found_level); ret = csum_tree_block(root, eb, 1); - if (ret) + if (ret) { ret = -EIO; + goto err; + } + + /* + * If this is a leaf block and it is corrupt, set the corrupt bit so + * that we don't try and read the other copies of this block, just + * return -EIO. + */ + if (found_level == 0 && check_leaf(root, eb)) { + set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); + ret = -EIO; + } end = min_t(u64, eb->len, PAGE_CACHE_SIZE); end = eb->start + end - 1; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a8f4e8d2ba60..cd794c35a636 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4754,6 +4754,11 @@ pin: } } out: + /* + * Deleting the buffer, clear the corrupt flag since it doesn't matter + * anymore. + */ + clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); btrfs_put_block_group(cache); } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 9318dfefd59c..f62c5442835d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -31,6 +31,7 @@ #define EXTENT_BUFFER_UPTODATE 0 #define EXTENT_BUFFER_BLOCKING 1 #define EXTENT_BUFFER_DIRTY 2 +#define EXTENT_BUFFER_CORRUPT 3 /* these are flags for extent_clear_unlock_delalloc */ #define EXTENT_CLEAR_UNLOCK_PAGE 0x1 -- cgit v1.2.2 From 41415730a1050499fbd63b3f7dd59b3a4c3bb91a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 16 Mar 2011 13:59:32 -0400 Subject: Btrfs: check return value of btrfs_search_slot properly Doing an audit of where we use btrfs_search_slot only showed one place where we don't check the return value of btrfs_search_slot properly. Just fix mark_extent_written to see if btrfs_search_slot failed and act accordingly. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/file.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 3786eca2a905..a85b044cf39e 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -608,6 +608,8 @@ again: key.offset = split; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret < 0) + goto out; if (ret > 0 && path->slots[0] > 0) path->slots[0]--; -- cgit v1.2.2 From 22a94d44bd6876a90630338229da6c0436d46593 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 16 Mar 2011 16:47:17 -0400 Subject: Btrfs: add checks to verify dir items are correct We need to make sure the dir items we get are valid dir items. So any time we try and read one check it with verify_dir_item, which will do various sanity checks to make sure it looks sane. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 3 +++ fs/btrfs/dir-item.c | 35 +++++++++++++++++++++++++++++++++++ fs/btrfs/inode.c | 3 +++ fs/btrfs/tree-log.c | 7 +++++++ fs/btrfs/xattr.c | 2 ++ 5 files changed, 50 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 841330f3d68d..6036fdb88c53 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2393,6 +2393,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 dir, const char *name, u16 name_len, int mod); +int verify_dir_item(struct btrfs_root *root, + struct extent_buffer *leaf, + struct btrfs_dir_item *dir_item); /* orphan.c */ int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index f0cad5ae5be7..02c97ad61b6d 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -377,6 +377,9 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, leaf = path->nodes[0]; dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); + if (verify_dir_item(root, leaf, dir_item)) + return NULL; + total_len = btrfs_item_size_nr(leaf, path->slots[0]); while (cur < total_len) { this_len = sizeof(*dir_item) + @@ -429,3 +432,35 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, } return ret; } + +int verify_dir_item(struct btrfs_root *root, + struct extent_buffer *leaf, + struct btrfs_dir_item *dir_item) +{ + u16 namelen = BTRFS_NAME_LEN; + u8 type = btrfs_dir_type(leaf, dir_item); + + if (type >= BTRFS_FT_MAX) { + printk(KERN_CRIT "btrfs: invalid dir item type: %d\n", + (int)type); + return 1; + } + + if (type == BTRFS_FT_XATTR) + namelen = XATTR_NAME_MAX; + + if (btrfs_dir_name_len(leaf, dir_item) > namelen) { + printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n", + (unsigned)btrfs_dir_data_len(leaf, dir_item)); + return 1; + } + + /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */ + if (btrfs_dir_data_len(leaf, dir_item) > BTRFS_MAX_XATTR_SIZE(root)) { + printk(KERN_CRIT "btrfs: invalid dir item data len: %u\n", + (unsigned)btrfs_dir_data_len(leaf, dir_item)); + return 1; + } + + return 0; +} diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 888dbdb3b128..e010000d4bc9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4272,6 +4272,9 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, while (di_cur < di_total) { struct btrfs_key location; + if (verify_dir_item(root, leaf, di)) + break; + name_len = btrfs_dir_name_len(leaf, di); if (name_len <= sizeof(tmp_name)) { name_ptr = tmp_name; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a4bbb854dfd2..429cfcfadf90 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1286,6 +1286,8 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, ptr_end = ptr + item_size; while (ptr < ptr_end) { di = (struct btrfs_dir_item *)ptr; + if (verify_dir_item(root, eb, di)) + return -EIO; name_len = btrfs_dir_name_len(eb, di); ret = replay_one_name(trans, root, path, eb, di, key); BUG_ON(ret); @@ -1412,6 +1414,11 @@ again: ptr_end = ptr + item_size; while (ptr < ptr_end) { di = (struct btrfs_dir_item *)ptr; + if (verify_dir_item(root, eb, di)) { + ret = -EIO; + goto out; + } + name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); if (!name) { diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index a5776531dc2b..e5d22f280956 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -242,6 +242,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) break; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); + if (verify_dir_item(root, leaf, di)) + continue; name_len = btrfs_dir_name_len(leaf, di); total_size += name_len + 1; -- cgit v1.2.2 From 7d0d2e8e6b6f7da221a25238cf490a095c8c4788 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 18 Mar 2011 15:13:42 -0400 Subject: Btrfs: check free space in block group before searching for a cluster The free space cluster stuff is heavy duty, so there is no sense in going through the entire song and dance if there isn't enough space in the block group to begin with. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 0282033041e1..f631df870f64 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1999,6 +1999,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, min_bytes = max(bytes, (bytes + empty_size) >> 2); spin_lock(&block_group->tree_lock); + + /* + * If we know we don't have enough space to make a cluster don't even + * bother doing all the work to try and find one. + */ + if (block_group->free_space < min_bytes) { + spin_unlock(&block_group->tree_lock); + return -ENOSPC; + } + spin_lock(&cluster->lock); /* someone already found a cluster, hooray */ -- cgit v1.2.2 From d0a365e84a886ce6b5b7f7a76be0bb24934ec8f0 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 18 Mar 2011 15:27:43 -0400 Subject: Btrfs: deal with min_bytes appropriately when looking for a cluster We do all this fun stuff with min_bytes, but either don't use it in the case of just normal extents, or use it completely wrong in the case of bitmaps. So fix this for both cases 1) In the extent case, stop looking for space with window_free >= min_bytes instead of bytes + empty_size. 2) In the bitmap case, we were looking for streches of free space that was at least min_bytes in size, which was not right at all. So instead search for stretches of free space that are at least bytes in size (this will make a difference when we have > page size blocks) and then only search for min_bytes amount of free space. Thanks, Reviewed-by: Li Zefan Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f631df870f64..63776ae72f9e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1910,8 +1910,8 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, i = offset_to_bit(entry->offset, block_group->sectorsize, max_t(u64, offset, entry->offset)); - search_bits = bytes_to_bits(min_bytes, block_group->sectorsize); - total_bits = bytes_to_bits(bytes, block_group->sectorsize); + search_bits = bytes_to_bits(bytes, block_group->sectorsize); + total_bits = bytes_to_bits(min_bytes, block_group->sectorsize); again: found_bits = 0; @@ -2034,8 +2034,7 @@ again: if (entry->bitmap && entry->bytes > bytes + empty_size) { ret = btrfs_bitmap_cluster(block_group, entry, cluster, - offset, bytes + empty_size, - min_bytes); + offset, bytes, min_bytes); if (!ret) goto got_it; } @@ -2065,7 +2064,7 @@ again: while (1) { /* out window is just right, lets fill it */ - if (window_free >= bytes + empty_size) + if (window_free >= min_bytes) break; node = rb_next(&last->offset_index); -- cgit v1.2.2 From 32cb0840ce8e13901fe71a9a8e834a531802ffc4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 18 Mar 2011 16:16:21 -0400 Subject: Btrfs: don't be as aggressive about using bitmaps We have been creating bitmaps for small extents unconditionally forever. This was great when testing to make sure the bitmap stuff was working, but is overkill normally. So instead of always adding small chunks of free space to bitmaps, only start doing it if we go past half of our extent threshold. This will keeps us from creating a bitmap for just one small free extent at the front of the block group, and will make the allocator a little faster as a result. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 63776ae72f9e..4ab35ea0443f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1287,9 +1287,22 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, * If we are below the extents threshold then we can add this as an * extent, and don't have to deal with the bitmap */ - if (block_group->free_extents < block_group->extents_thresh && - info->bytes > block_group->sectorsize * 4) - return 0; + if (block_group->free_extents < block_group->extents_thresh) { + /* + * If this block group has some small extents we don't want to + * use up all of our free slots in the cache with them, we want + * to reserve them to larger extents, however if we have plent + * of cache left then go ahead an dadd them, no sense in adding + * the overhead of a bitmap if we don't have to. + */ + if (info->bytes <= block_group->sectorsize * 4) { + if (block_group->free_extents * 2 <= + block_group->extents_thresh) + return 0; + } else { + return 0; + } + } /* * some block groups are so tiny they can't be enveloped by a bitmap, so -- cgit v1.2.2 From 565d76cb7d5fd7cb010fd690602280a69ab116ef Mon Sep 17 00:00:00 2001 From: Jim Keniston Date: Tue, 22 Mar 2011 16:35:12 -0700 Subject: zlib: slim down zlib_deflate() workspace when possible Instead of always creating a huge (268K) deflate_workspace with the maximum compression parameters (windowBits=15, memLevel=8), allow the caller to obtain a smaller workspace by specifying smaller parameter values. For example, when capturing oops and panic reports to a medium with limited capacity, such as NVRAM, compression may be the only way to capture the whole report. In this case, a small workspace (24K works fine) is a win, whether you allocate the workspace when you need it (i.e., during an oops or panic) or at boot time. I've verified that this patch works with all accepted values of windowBits (positive and negative), memLevel, and compression level. Signed-off-by: Jim Keniston Cc: Herbert Xu Cc: David Miller Cc: Chris Mason Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/btrfs/zlib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index f5ec2d44150d..faccd47c6c46 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -57,7 +57,8 @@ static struct list_head *zlib_alloc_workspace(void) if (!workspace) return ERR_PTR(-ENOMEM); - workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); + workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize( + MAX_WBITS, MAX_MEM_LEVEL)); workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); if (!workspace->def_strm.workspace || -- cgit v1.2.2 From 2e1496707560ecf98e9b0604622c0990f94861d3 Mon Sep 17 00:00:00 2001 From: "Serge E. Hallyn" Date: Wed, 23 Mar 2011 16:43:26 -0700 Subject: userns: rename is_owner_or_cap to inode_owner_or_capable And give it a kernel-doc comment. [akpm@linux-foundation.org: btrfs changed in linux-next] Signed-off-by: Serge E. Hallyn Cc: "Eric W. Biederman" Cc: Daniel Lezcano Acked-by: David Howells Cc: James Morris Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/btrfs/acl.c | 2 +- fs/btrfs/ioctl.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 9c949348510b..de34bfad9ec3 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -170,7 +170,7 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, int ret; struct posix_acl *acl = NULL; - if (!is_owner_or_cap(dentry->d_inode)) + if (!inode_owner_or_capable(dentry->d_inode)) return -EPERM; if (!IS_POSIXACL(dentry->d_inode)) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5fdb2abc4fa7..d1bace3df9b6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -158,7 +158,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) FS_SYNC_FL | FS_DIRSYNC_FL)) return -EOPNOTSUPP; - if (!is_owner_or_cap(inode)) + if (!inode_owner_or_capable(inode)) return -EACCES; mutex_lock(&inode->i_mutex); @@ -1077,7 +1077,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (flags & ~BTRFS_SUBVOL_RDONLY) return -EOPNOTSUPP; - if (!is_owner_or_cap(inode)) + if (!inode_owner_or_capable(inode)) return -EACCES; down_write(&root->fs_info->subvol_sem); -- cgit v1.2.2 From 4e69b598f6cfb0940b75abf7e179d6020e94ad1e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 21 Mar 2011 10:11:24 -0400 Subject: Btrfs: cleanup how we setup free space clusters This patch makes the free space cluster refilling code a little easier to understand, and fixes some things with the bitmap part of it. Currently we either want to refill a cluster with 1) All normal extent entries (those without bitmaps) 2) A bitmap entry with enough space The current code has this ugly jump around logic that will first try and fill up the cluster with extent entries and then if it can't do that it will try and find a bitmap to use. So instead split this out into two functions, one that tries to find only normal entries, and one that tries to find bitmaps. This also fixes a suboptimal thing we would do with bitmaps. If we used a bitmap we would just tell the cluster that we were pointing at a bitmap and it would do the tree search in the block group for that entry every time we tried to make an allocation. Instead of doing that now we just add it to the clusters group. I tested this with my ENOSPC tests and xfstests and it survived. Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 3 - fs/btrfs/free-space-cache.c | 364 ++++++++++++++++++++++---------------------- 2 files changed, 182 insertions(+), 185 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6036fdb88c53..0ee679b6c1b7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -783,9 +783,6 @@ struct btrfs_free_cluster { /* first extent starting offset */ u64 window_start; - /* if this cluster simply points at a bitmap in the block group */ - bool points_to_bitmap; - struct btrfs_block_group_cache *block_group; /* * when a cluster is allocated from a block group, we put the diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 4ab35ea0443f..f03ef97c3b21 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1644,30 +1644,28 @@ __btrfs_return_cluster_to_free_space( { struct btrfs_free_space *entry; struct rb_node *node; - bool bitmap; spin_lock(&cluster->lock); if (cluster->block_group != block_group) goto out; - bitmap = cluster->points_to_bitmap; cluster->block_group = NULL; cluster->window_start = 0; list_del_init(&cluster->block_group_list); - cluster->points_to_bitmap = false; - - if (bitmap) - goto out; node = rb_first(&cluster->root); while (node) { + bool bitmap; + entry = rb_entry(node, struct btrfs_free_space, offset_index); node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); - BUG_ON(entry->bitmap); - try_merge_free_space(block_group, entry, false); + + bitmap = (entry->bitmap != NULL); + if (!bitmap) + try_merge_free_space(block_group, entry, false); tree_insert_offset(&block_group->free_space_offset, - entry->offset, &entry->offset_index, 0); + entry->offset, &entry->offset_index, bitmap); } cluster->root = RB_ROOT; @@ -1790,50 +1788,24 @@ int btrfs_return_cluster_to_free_space( static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, + struct btrfs_free_space *entry, u64 bytes, u64 min_start) { - struct btrfs_free_space *entry; int err; u64 search_start = cluster->window_start; u64 search_bytes = bytes; u64 ret = 0; - spin_lock(&block_group->tree_lock); - spin_lock(&cluster->lock); - - if (!cluster->points_to_bitmap) - goto out; - - if (cluster->block_group != block_group) - goto out; - - /* - * search_start is the beginning of the bitmap, but at some point it may - * be a good idea to point to the actual start of the free area in the - * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only - * to 1 to make sure we get the bitmap entry - */ - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, search_start), - 1, 0); - if (!entry || !entry->bitmap) - goto out; - search_start = min_start; search_bytes = bytes; err = search_bitmap(block_group, entry, &search_start, &search_bytes); if (err) - goto out; + return 0; ret = search_start; bitmap_clear_bits(block_group, entry, ret, bytes); - if (entry->bytes == 0) - free_bitmap(block_group, entry); -out: - spin_unlock(&cluster->lock); - spin_unlock(&block_group->tree_lock); return ret; } @@ -1851,10 +1823,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, struct rb_node *node; u64 ret = 0; - if (cluster->points_to_bitmap) - return btrfs_alloc_from_bitmap(block_group, cluster, bytes, - min_start); - spin_lock(&cluster->lock); if (bytes > cluster->max_size) goto out; @@ -1867,9 +1835,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, goto out; entry = rb_entry(node, struct btrfs_free_space, offset_index); - while(1) { - if (entry->bytes < bytes || entry->offset < min_start) { + if (entry->bytes < bytes || + (!entry->bitmap && entry->offset < min_start)) { struct rb_node *node; node = rb_next(&entry->offset_index); @@ -1879,10 +1847,27 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, offset_index); continue; } - ret = entry->offset; - entry->offset += bytes; - entry->bytes -= bytes; + if (entry->bitmap) { + ret = btrfs_alloc_from_bitmap(block_group, + cluster, entry, bytes, + min_start); + if (ret == 0) { + struct rb_node *node; + node = rb_next(&entry->offset_index); + if (!node) + break; + entry = rb_entry(node, struct btrfs_free_space, + offset_index); + continue; + } + } else { + + ret = entry->offset; + + entry->offset += bytes; + entry->bytes -= bytes; + } if (entry->bytes == 0) rb_erase(&entry->offset_index, &cluster->root); @@ -1899,6 +1884,11 @@ out: block_group->free_space -= bytes; if (entry->bytes == 0) { block_group->free_extents--; + if (entry->bitmap) { + kfree(entry->bitmap); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); + } kmem_cache_free(btrfs_free_space_cachep, entry); } @@ -1919,6 +1909,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, unsigned long found_bits; unsigned long start = 0; unsigned long total_found = 0; + int ret; bool found = false; i = offset_to_bit(entry->offset, block_group->sectorsize, @@ -1941,7 +1932,7 @@ again: } if (!found_bits) - return -1; + return -ENOSPC; if (!found) { start = i; @@ -1965,11 +1956,144 @@ again: cluster->window_start = start * block_group->sectorsize + entry->offset; - cluster->points_to_bitmap = true; + rb_erase(&entry->offset_index, &block_group->free_space_offset); + ret = tree_insert_offset(&cluster->root, entry->offset, + &entry->offset_index, 1); + BUG_ON(ret); return 0; } +/* + * This searches the block group for just extents to fill the cluster with. + */ +static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, + u64 offset, u64 bytes, u64 min_bytes) +{ + struct btrfs_free_space *first = NULL; + struct btrfs_free_space *entry = NULL; + struct btrfs_free_space *prev = NULL; + struct btrfs_free_space *last; + struct rb_node *node; + u64 window_start; + u64 window_free; + u64 max_extent; + u64 max_gap = 128 * 1024; + + entry = tree_search_offset(block_group, offset, 0, 1); + if (!entry) + return -ENOSPC; + + /* + * We don't want bitmaps, so just move along until we find a normal + * extent entry. + */ + while (entry->bitmap) { + node = rb_next(&entry->offset_index); + if (!node) + return -ENOSPC; + entry = rb_entry(node, struct btrfs_free_space, offset_index); + } + + window_start = entry->offset; + window_free = entry->bytes; + max_extent = entry->bytes; + first = entry; + last = entry; + prev = entry; + + while (window_free <= min_bytes) { + node = rb_next(&entry->offset_index); + if (!node) + return -ENOSPC; + entry = rb_entry(node, struct btrfs_free_space, offset_index); + + if (entry->bitmap) + continue; + /* + * we haven't filled the empty size and the window is + * very large. reset and try again + */ + if (entry->offset - (prev->offset + prev->bytes) > max_gap || + entry->offset - window_start > (min_bytes * 2)) { + first = entry; + window_start = entry->offset; + window_free = entry->bytes; + last = entry; + max_extent = entry->bytes; + } else { + last = entry; + window_free += entry->bytes; + if (entry->bytes > max_extent) + max_extent = entry->bytes; + } + prev = entry; + } + + cluster->window_start = first->offset; + + node = &first->offset_index; + + /* + * now we've found our entries, pull them out of the free space + * cache and put them into the cluster rbtree + */ + do { + int ret; + + entry = rb_entry(node, struct btrfs_free_space, offset_index); + node = rb_next(&entry->offset_index); + if (entry->bitmap) + continue; + + rb_erase(&entry->offset_index, &block_group->free_space_offset); + ret = tree_insert_offset(&cluster->root, entry->offset, + &entry->offset_index, 0); + BUG_ON(ret); + } while (node && entry != last); + + cluster->max_size = max_extent; + + return 0; +} + +/* + * This specifically looks for bitmaps that may work in the cluster, we assume + * that we have already failed to find extents that will work. + */ +static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, + u64 offset, u64 bytes, u64 min_bytes) +{ + struct btrfs_free_space *entry; + struct rb_node *node; + int ret = -ENOSPC; + + if (block_group->total_bitmaps == 0) + return -ENOSPC; + + entry = tree_search_offset(block_group, + offset_to_bitmap(block_group, offset), + 0, 1); + if (!entry) + return -ENOSPC; + + node = &entry->offset_index; + do { + entry = rb_entry(node, struct btrfs_free_space, offset_index); + node = rb_next(&entry->offset_index); + if (!entry->bitmap) + continue; + if (entry->bytes < min_bytes) + continue; + ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, + bytes, min_bytes); + } while (ret && node); + + return ret; +} + /* * here we try to find a cluster of blocks in a block group. The goal * is to find at least bytes free and up to empty_size + bytes free. @@ -1984,15 +2108,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size) { - struct btrfs_free_space *entry = NULL; - struct rb_node *node; - struct btrfs_free_space *next; - struct btrfs_free_space *last = NULL; u64 min_bytes; - u64 window_start; - u64 window_free; - u64 max_extent = 0; - bool found_bitmap = false; int ret; /* for metadata, allow allocates with more holes */ @@ -2029,134 +2145,19 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, ret = 0; goto out; } -again: - entry = tree_search_offset(block_group, offset, found_bitmap, 1); - if (!entry) { - ret = -ENOSPC; - goto out; - } - - /* - * If found_bitmap is true, we exhausted our search for extent entries, - * and we just want to search all of the bitmaps that we can find, and - * ignore any extent entries we find. - */ - while (entry->bitmap || found_bitmap || - (!entry->bitmap && entry->bytes < min_bytes)) { - struct rb_node *node = rb_next(&entry->offset_index); - - if (entry->bitmap && entry->bytes > bytes + empty_size) { - ret = btrfs_bitmap_cluster(block_group, entry, cluster, - offset, bytes, min_bytes); - if (!ret) - goto got_it; - } - - if (!node) { - ret = -ENOSPC; - goto out; - } - entry = rb_entry(node, struct btrfs_free_space, offset_index); - } - - /* - * We already searched all the extent entries from the passed in offset - * to the end and didn't find enough space for the cluster, and we also - * didn't find any bitmaps that met our criteria, just go ahead and exit - */ - if (found_bitmap) { - ret = -ENOSPC; - goto out; - } - - cluster->points_to_bitmap = false; - window_start = entry->offset; - window_free = entry->bytes; - last = entry; - max_extent = entry->bytes; - - while (1) { - /* out window is just right, lets fill it */ - if (window_free >= min_bytes) - break; - - node = rb_next(&last->offset_index); - if (!node) { - if (found_bitmap) - goto again; - ret = -ENOSPC; - goto out; - } - next = rb_entry(node, struct btrfs_free_space, offset_index); - - /* - * we found a bitmap, so if this search doesn't result in a - * cluster, we know to go and search again for the bitmaps and - * start looking for space there - */ - if (next->bitmap) { - if (!found_bitmap) - offset = next->offset; - found_bitmap = true; - last = next; - continue; - } - - /* - * we haven't filled the empty size and the window is - * very large. reset and try again - */ - if (next->offset - (last->offset + last->bytes) > 128 * 1024 || - next->offset - window_start > (bytes + empty_size) * 2) { - entry = next; - window_start = entry->offset; - window_free = entry->bytes; - last = entry; - max_extent = entry->bytes; - } else { - last = next; - window_free += next->bytes; - if (entry->bytes > max_extent) - max_extent = entry->bytes; - } - } - - cluster->window_start = entry->offset; - - /* - * now we've found our entries, pull them out of the free space - * cache and put them into the cluster rbtree - * - * The cluster includes an rbtree, but only uses the offset index - * of each free space cache entry. - */ - while (1) { - node = rb_next(&entry->offset_index); - if (entry->bitmap && node) { - entry = rb_entry(node, struct btrfs_free_space, - offset_index); - continue; - } else if (entry->bitmap && !node) { - break; - } - - rb_erase(&entry->offset_index, &block_group->free_space_offset); - ret = tree_insert_offset(&cluster->root, entry->offset, - &entry->offset_index, 0); - BUG_ON(ret); - if (!node || entry == last) - break; + ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes, + min_bytes); + if (ret) + ret = setup_cluster_bitmap(block_group, cluster, offset, + bytes, min_bytes); - entry = rb_entry(node, struct btrfs_free_space, offset_index); + if (!ret) { + atomic_inc(&block_group->count); + list_add_tail(&cluster->block_group_list, + &block_group->cluster_list); + cluster->block_group = block_group; } - - cluster->max_size = max_extent; -got_it: - ret = 0; - atomic_inc(&block_group->count); - list_add_tail(&cluster->block_group_list, &block_group->cluster_list); - cluster->block_group = block_group; out: spin_unlock(&cluster->lock); spin_unlock(&block_group->tree_lock); @@ -2173,7 +2174,6 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) spin_lock_init(&cluster->refill_lock); cluster->root = RB_ROOT; cluster->max_size = 0; - cluster->points_to_bitmap = false; INIT_LIST_HEAD(&cluster->block_group_list); cluster->block_group = NULL; } -- cgit v1.2.2 From 98bc3149fad639c8f50c7110b961a2a2fe085eed Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 22 Mar 2011 11:00:46 -0400 Subject: Btrfs: don't allocate dip->csums when doing writes When doing direct writes we store the checksums in the ordered sum stuff in the ordered extent for writing them when the write completes, so we don't even use the dip->csums array. So if we're writing, don't bother allocating dip->csums since we won't use it anyway. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e010000d4bc9..570cd44fe91b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5944,6 +5944,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, int nr_pages = 0; u32 *csums = dip->csums; int ret = 0; + int write = rw & REQ_WRITE; bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); if (!bio) @@ -5980,7 +5981,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, goto out_err; } - if (!skip_sum) + /* Write's use the ordered csums */ + if (!write && !skip_sum) csums = csums + nr_pages; start_sector += submit_len >> 9; file_offset += submit_len; @@ -6048,7 +6050,8 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, } dip->csums = NULL; - if (!skip_sum) { + /* Write's use the ordered csum stuff, so we don't need dip->csums */ + if (!write && !skip_sum) { dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); if (!dip->csums) { kfree(dip); -- cgit v1.2.2 From c0da7aa1a2d8fcafe271a7077599253c8ed94bb2 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 22 Mar 2011 11:05:07 -0400 Subject: Btrfs: mark the bio with an error if we have a failure in dio I noticed that dio_end_io calls the appropriate endio function with an error, but the endio functions don't actually do anything with that error, they assume that if there was an error then the bio will not be uptodate. So if we had checksum failures we would never pass back EIO. So if there is an error in our endio functions make sure to clear the uptodate flag on the bio. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 570cd44fe91b..e9813bd7d556 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5744,6 +5744,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) kfree(dip->csums); kfree(dip); + + /* If we had a csum failure make sure to clear the uptodate flag */ + if (err) + clear_bit(BIO_UPTODATE, &bio->bi_flags); dio_end_io(bio, err); } @@ -5845,6 +5849,10 @@ out_done: kfree(dip->csums); kfree(dip); + + /* If we had an error make sure to clear the uptodate flag */ + if (err) + clear_bit(BIO_UPTODATE, &bio->bi_flags); dio_end_io(bio, err); } -- cgit v1.2.2 From 240f62c8756df285da11469259b3900f32883168 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 23 Mar 2011 14:54:42 -0400 Subject: Btrfs: use RCU instead of a spinlock to protect the root node The pointer to the extent buffer for the root of each tree is protected by a spinlock so that we can safely read the pointer and take a reference on the extent buffer. But now that the extent buffers are freed via RCU, we can safely use rcu_read_lock instead. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 73e53009e126..8680110f0a5a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -147,10 +147,11 @@ noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) struct extent_buffer *btrfs_root_node(struct btrfs_root *root) { struct extent_buffer *eb; - spin_lock(&root->node_lock); - eb = root->node; + + rcu_read_lock(); + eb = rcu_dereference(root->node); extent_buffer_get(eb); - spin_unlock(&root->node_lock); + rcu_read_unlock(); return eb; } @@ -165,14 +166,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) while (1) { eb = btrfs_root_node(root); btrfs_tree_lock(eb); - - spin_lock(&root->node_lock); - if (eb == root->node) { - spin_unlock(&root->node_lock); + if (eb == root->node) break; - } - spin_unlock(&root->node_lock); - btrfs_tree_unlock(eb); free_extent_buffer(eb); } @@ -458,10 +453,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, else parent_start = 0; - spin_lock(&root->node_lock); - root->node = cow; extent_buffer_get(cow); - spin_unlock(&root->node_lock); + rcu_assign_pointer(root->node, cow); btrfs_free_tree_block(trans, root, buf, parent_start, last_ref); @@ -930,9 +923,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, goto enospc; } - spin_lock(&root->node_lock); - root->node = child; - spin_unlock(&root->node_lock); + rcu_assign_pointer(root->node, child); add_root_to_dirty_list(root); btrfs_tree_unlock(child); @@ -2007,10 +1998,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(c); - spin_lock(&root->node_lock); old = root->node; - root->node = c; - spin_unlock(&root->node_lock); + rcu_assign_pointer(root->node, c); /* the super has an extra ref to root->node */ free_extent_buffer(old); -- cgit v1.2.2 From 1abe9b8a138c9988ba8f7bfded6453649a31541f Mon Sep 17 00:00:00 2001 From: liubo Date: Thu, 24 Mar 2011 11:18:59 +0000 Subject: Btrfs: add initial tracepoint support for btrfs Tracepoints can provide insight into why btrfs hits bugs and be greatly helpful for debugging, e.g dd-7822 [000] 2121.641088: btrfs_inode_request: root = 5(FS_TREE), gen = 4, ino = 256, blocks = 8, disk_i_size = 0, last_trans = 8, logged_trans = 0 dd-7822 [000] 2121.641100: btrfs_inode_new: root = 5(FS_TREE), gen = 8, ino = 257, blocks = 0, disk_i_size = 0, last_trans = 0, logged_trans = 0 btrfs-transacti-7804 [001] 2146.935420: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29368320 (orig_level = 0), cow_buf = 29388800 (cow_level = 0) btrfs-transacti-7804 [001] 2146.935473: btrfs_cow_block: root = 1(ROOT_TREE), refs = 2, orig_buf = 29364224 (orig_level = 0), cow_buf = 29392896 (cow_level = 0) btrfs-transacti-7804 [001] 2146.972221: btrfs_transaction_commit: root = 1(ROOT_TREE), gen = 8 flush-btrfs-2-7821 [001] 2155.824210: btrfs_chunk_alloc: root = 3(CHUNK_TREE), offset = 1103101952, size = 1073741824, num_stripes = 1, sub_stripes = 0, type = DATA flush-btrfs-2-7821 [001] 2155.824241: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29388800 (orig_level = 0), cow_buf = 29396992 (cow_level = 0) flush-btrfs-2-7821 [001] 2155.824255: btrfs_cow_block: root = 4(DEV_TREE), refs = 2, orig_buf = 29372416 (orig_level = 0), cow_buf = 29401088 (cow_level = 0) flush-btrfs-2-7821 [000] 2155.824329: btrfs_cow_block: root = 3(CHUNK_TREE), refs = 2, orig_buf = 20971520 (orig_level = 0), cow_buf = 20975616 (cow_level = 0) btrfs-endio-wri-7800 [001] 2155.898019: btrfs_cow_block: root = 5(FS_TREE), refs = 2, orig_buf = 29384704 (orig_level = 0), cow_buf = 29405184 (cow_level = 0) btrfs-endio-wri-7800 [001] 2155.898043: btrfs_cow_block: root = 7(CSUM_TREE), refs = 2, orig_buf = 29376512 (orig_level = 0), cow_buf = 29409280 (cow_level = 0) Here is what I have added: 1) ordere_extent: btrfs_ordered_extent_add btrfs_ordered_extent_remove btrfs_ordered_extent_start btrfs_ordered_extent_put These provide critical information to understand how ordered_extents are updated. 2) extent_map: btrfs_get_extent extent_map is used in both read and write cases, and it is useful for tracking how btrfs specific IO is running. 3) writepage: __extent_writepage btrfs_writepage_end_io_hook Pages are cirtical resourses and produce a lot of corner cases during writeback, so it is valuable to know how page is written to disk. 4) inode: btrfs_inode_new btrfs_inode_request btrfs_inode_evict These can show where and when a inode is created, when a inode is evicted. 5) sync: btrfs_sync_file btrfs_sync_fs These show sync arguments. 6) transaction: btrfs_transaction_commit In transaction based filesystem, it will be useful to know the generation and who does commit. 7) back reference and cow: btrfs_delayed_tree_ref btrfs_delayed_data_ref btrfs_delayed_ref_head btrfs_cow_block Btrfs natively supports back references, these tracepoints are helpful on understanding btrfs's COW mechanism. 8) chunk: btrfs_chunk_alloc btrfs_chunk_free Chunk is a link between physical offset and logical offset, and stands for space infomation in btrfs, and these are helpful on tracing space things. 9) reserved_extent: btrfs_reserved_extent_alloc btrfs_reserved_extent_free These can show how btrfs uses its space. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 3 +++ fs/btrfs/ctree.h | 1 + fs/btrfs/delayed-ref.c | 6 ++++++ fs/btrfs/extent-tree.c | 4 ++++ fs/btrfs/extent_io.c | 2 ++ fs/btrfs/file.c | 1 + fs/btrfs/inode.c | 12 ++++++++++++ fs/btrfs/ordered-data.c | 8 ++++++++ fs/btrfs/super.c | 5 +++++ fs/btrfs/transaction.c | 2 ++ fs/btrfs/volumes.c | 16 +++++----------- fs/btrfs/volumes.h | 11 +++++++++++ 12 files changed, 60 insertions(+), 11 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 8680110f0a5a..465b5d7d6b48 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -535,6 +535,9 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, ret = __btrfs_cow_block(trans, root, buf, parent, parent_slot, cow_ret, search_start, 0); + + trace_btrfs_cow_block(root, buf, *cow_ret); + return ret; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0ee679b6c1b7..9d0f59142afa 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "extent_io.h" #include "extent_map.h" diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index e807b143b857..bce28f653899 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -483,6 +483,8 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans, INIT_LIST_HEAD(&head_ref->cluster); mutex_init(&head_ref->mutex); + trace_btrfs_delayed_ref_head(ref, head_ref, action); + existing = tree_insert(&delayed_refs->root, &ref->rb_node); if (existing) { @@ -537,6 +539,8 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans, } full_ref->level = level; + trace_btrfs_delayed_tree_ref(ref, full_ref, action); + existing = tree_insert(&delayed_refs->root, &ref->rb_node); if (existing) { @@ -591,6 +595,8 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans, full_ref->objectid = owner; full_ref->offset = offset; + trace_btrfs_delayed_data_ref(ref, full_ref, action); + existing = tree_insert(&delayed_refs->root, &ref->rb_node); if (existing) { diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd794c35a636..86ea471d3801 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5412,6 +5412,8 @@ again: dump_space_info(sinfo, num_bytes, 1); } + trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); + return ret; } @@ -5433,6 +5435,8 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) update_reserved_bytes(cache, len, 0, 1); btrfs_put_block_group(cache); + trace_btrfs_reserved_extent_free(root, start, len); + return ret; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1bbd26b4fc5c..77c65a0bea34 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2192,6 +2192,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, else write_flags = WRITE; + trace___extent_writepage(page, inode, wbc); + WARN_ON(!PageLocked(page)); pg_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index a85b044cf39e..656bc0a892b1 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1205,6 +1205,7 @@ int btrfs_sync_file(struct file *file, int datasync) int ret = 0; struct btrfs_trans_handle *trans; + trace_btrfs_sync_file(file, datasync); /* we wait first, since the writeback may change the inode */ root->log_batch++; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e9813bd7d556..eaa271484199 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1787,6 +1787,8 @@ out: static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate) { + trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); + ClearPagePrivate2(page); return btrfs_finish_ordered_io(page->mapping->host, start, end); } @@ -3718,6 +3720,8 @@ void btrfs_evict_inode(struct inode *inode) unsigned long nr; int ret; + trace_btrfs_inode_evict(inode); + truncate_inode_pages(&inode->i_data, 0); if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || root == root->fs_info->tree_root)) @@ -4510,6 +4514,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, return ERR_PTR(-ENOMEM); if (dir) { + trace_btrfs_inode_request(dir); + ret = btrfs_set_inode_index(dir, index); if (ret) { iput(inode); @@ -4584,6 +4590,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, insert_inode_hash(inode); inode_tree_add(inode); + + trace_btrfs_inode_new(inode); + return inode; fail: if (dir) @@ -5261,6 +5270,9 @@ insert: } write_unlock(&em_tree->lock); out: + + trace_btrfs_get_extent(root, em); + if (path) btrfs_free_path(path); if (trans) { diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 083a55477375..a1c940425307 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -202,6 +202,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, INIT_LIST_HEAD(&entry->list); INIT_LIST_HEAD(&entry->root_extent_list); + trace_btrfs_ordered_extent_add(inode, entry); + spin_lock(&tree->lock); node = tree_insert(&tree->tree, file_offset, &entry->rb_node); @@ -387,6 +389,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) struct list_head *cur; struct btrfs_ordered_sum *sum; + trace_btrfs_ordered_extent_put(entry->inode, entry); + if (atomic_dec_and_test(&entry->refs)) { while (!list_empty(&entry->list)) { cur = entry->list.next; @@ -420,6 +424,8 @@ static int __btrfs_remove_ordered_extent(struct inode *inode, spin_lock(&root->fs_info->ordered_extent_lock); list_del_init(&entry->root_extent_list); + trace_btrfs_ordered_extent_remove(inode, entry); + /* * we have no more ordered extents for this inode and * no dirty pages. We can safely remove it from the @@ -585,6 +591,8 @@ void btrfs_start_ordered_extent(struct inode *inode, u64 start = entry->file_offset; u64 end = start + entry->len - 1; + trace_btrfs_ordered_extent_start(inode, entry); + /* * pages in the range can be dirty, clean or writeback. We * start IO on any dirty ones so the wait doesn't stall waiting diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index d39a9895d932..2edfc039f098 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -52,6 +52,9 @@ #include "export.h" #include "compression.h" +#define CREATE_TRACE_POINTS +#include + static const struct super_operations btrfs_super_ops; static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno, @@ -620,6 +623,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) struct btrfs_root *root = btrfs_sb(sb); int ret; + trace_btrfs_sync_fs(wait); + if (!wait) { filemap_flush(root->fs_info->btree_inode->i_mapping); return 0; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3d73c8d93bbb..5b4bc685bb0e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1389,6 +1389,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, put_transaction(cur_trans); put_transaction(cur_trans); + trace_btrfs_transaction_commit(root); + mutex_unlock(&root->fs_info->trans_mutex); if (current->journal_info == trans) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index dd13eb81ee40..8ba3c9ebff93 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -33,17 +33,6 @@ #include "volumes.h" #include "async-thread.h" -struct map_lookup { - u64 type; - int io_align; - int io_width; - int stripe_len; - int sector_size; - int num_stripes; - int sub_stripes; - struct btrfs_bio_stripe stripes[]; -}; - static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device); @@ -1923,6 +1912,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, BUG_ON(ret); + trace_btrfs_chunk_free(root, map, chunk_offset, em->len); + if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); BUG_ON(ret); @@ -2650,6 +2641,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, *num_bytes = chunk_bytes_by_type(type, calc_size, map->num_stripes, sub_stripes); + trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); + em = alloc_extent_map(GFP_NOFS); if (!em) { ret = -ENOMEM; @@ -2758,6 +2751,7 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, item_size); BUG_ON(ret); } + kfree(chunk); return 0; } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 7fb59d45fe8c..7b38d0668b51 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -145,6 +145,17 @@ struct btrfs_device_info { u64 max_avail; }; +struct map_lookup { + u64 type; + int io_align; + int io_width; + int stripe_len; + int sector_size; + int num_stripes; + int sub_stripes; + struct btrfs_bio_stripe stripes[]; +}; + /* Used to sort the devices by max_avail(descending sort) */ int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); -- cgit v1.2.2 From db5b493ac78e46c7b6bad22cd25d8041564cd8ea Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Wed, 23 Mar 2011 08:14:16 +0000 Subject: Btrfs: cleanup some BUG_ON() This patch changes some BUG_ON() to the error return. (but, most callers still use BUG_ON()) Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 3 ++- fs/btrfs/disk-io.c | 5 ++++- fs/btrfs/extent-tree.c | 25 ++++++++++++++++++------- fs/btrfs/file-item.c | 3 ++- fs/btrfs/inode-map.c | 3 ++- fs/btrfs/ioctl.c | 5 ++++- fs/btrfs/root-tree.c | 6 ++++-- fs/btrfs/transaction.c | 12 +++++++++--- fs/btrfs/tree-log.c | 15 +++++++++------ 9 files changed, 54 insertions(+), 23 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 465b5d7d6b48..4edcbe915736 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3709,7 +3709,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root unsigned long ptr; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (!ret) { leaf = path->nodes[0]; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9f31e110b481..00cbb41af660 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1248,7 +1248,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, root, fs_info, location->objectid); path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) { + kfree(root); + return ERR_PTR(-ENOMEM); + } ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0); if (ret == 0) { l = path->nodes[0]; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 86ea471d3801..a6a8159c5d1e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5463,7 +5463,8 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, @@ -6457,10 +6458,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; wc = kzalloc(sizeof(*wc), GFP_NOFS); - BUG_ON(!wc); + if (!wc) { + btrfs_free_path(path); + return -ENOMEM; + } btrfs_assert_tree_locked(parent); parent_level = btrfs_header_level(parent); @@ -6918,7 +6923,11 @@ static noinline int get_new_locations(struct inode *reloc_inode, } path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) { + if (exts != *extents) + kfree(exts); + return -ENOMEM; + } cur_pos = extent_key->objectid - offset; last_byte = extent_key->objectid + extent_key->offset; @@ -7442,7 +7451,8 @@ static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, int ret; new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); - BUG_ON(!new_extent); + if (!new_extent) + return -ENOMEM; ref = btrfs_lookup_leaf_ref(root, leaf->start); BUG_ON(!ref); @@ -7647,7 +7657,8 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, return 0; root_item = kmalloc(sizeof(*root_item), GFP_NOFS); - BUG_ON(!root_item); + if (!root_item) + return -ENOMEM; ret = btrfs_copy_root(trans, root, root->commit_root, &eb, BTRFS_TREE_RELOC_OBJECTID); @@ -7673,7 +7684,7 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, &root_key); - BUG_ON(!reloc_root); + BUG_ON(IS_ERR(reloc_root)); reloc_root->last_trans = trans->transid; reloc_root->commit_root = NULL; reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 4f19a3e1bf32..a2134195a85e 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -48,7 +48,8 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; file_key.objectid = objectid; file_key.offset = pos; btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c56eb5909172..c05a08f4c411 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -30,7 +30,8 @@ int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) int slot; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; search_key.objectid = BTRFS_LAST_FREE_OBJECTID; search_key.type = -1; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ad9b8c0e930b..88d3cb2eaf75 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2350,12 +2350,15 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root; struct btrfs_trans_handle *trans; u64 transid; + int ret; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) return PTR_ERR(trans); transid = trans->transid; - btrfs_commit_transaction_async(trans, root, 0); + ret = btrfs_commit_transaction_async(trans, root, 0); + if (ret) + return ret; if (argp) if (copy_to_user(argp, &transid, sizeof(transid))) diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6a1086e83ffc..29b2d7c930eb 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -88,7 +88,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, search_key.offset = (u64)-1; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); if (ret < 0) goto out; @@ -332,7 +333,8 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *leaf; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; ret = btrfs_search_slot(trans, root, key, path, -1, 1); if (ret < 0) goto out; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 5b4bc685bb0e..ce48eb59d615 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -57,7 +57,8 @@ static noinline int join_transaction(struct btrfs_root *root) if (!cur_trans) { cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); - BUG_ON(!cur_trans); + if (!cur_trans) + return -ENOMEM; root->fs_info->generation++; cur_trans->num_writers = 1; cur_trans->num_joined = 0; @@ -195,7 +196,11 @@ again: wait_current_trans(root); ret = join_transaction(root); - BUG_ON(ret); + if (ret < 0) { + if (type != TRANS_JOIN_NOLOCK) + mutex_unlock(&root->fs_info->trans_mutex); + return ERR_PTR(ret); + } cur_trans = root->fs_info->running_transaction; cur_trans->use_count++; @@ -1156,7 +1161,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, struct btrfs_transaction *cur_trans; ac = kmalloc(sizeof(*ac), GFP_NOFS); - BUG_ON(!ac); + if (!ac) + return -ENOMEM; INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 429cfcfadf90..f9425e33e358 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1828,7 +1828,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, int orig_level; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; level = btrfs_header_level(log->node); orig_level = level; @@ -3114,9 +3115,11 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) .stage = 0, }; - fs_info->log_root_recovering = 1; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; + + fs_info->log_root_recovering = 1; trans = btrfs_start_transaction(fs_info->tree_root, 0); BUG_ON(IS_ERR(trans)); @@ -3124,7 +3127,8 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) wc.trans = trans; wc.pin = 1; - walk_log_tree(trans, log_root_tree, &wc); + ret = walk_log_tree(trans, log_root_tree, &wc); + BUG_ON(ret); again: key.objectid = BTRFS_TREE_LOG_OBJECTID; @@ -3148,8 +3152,7 @@ again: log = btrfs_read_fs_root_no_radix(log_root_tree, &found_key); - BUG_ON(!log); - + BUG_ON(IS_ERR(log)); tmp_key.objectid = found_key.offset; tmp_key.type = BTRFS_ROOT_ITEM_KEY; -- cgit v1.2.2 From 7e75bf3ff3a716d7b21d8fb43bf823115801c1e9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 18 Mar 2011 22:56:43 +0000 Subject: btrfs: properly access unaligned checksum buffer On Fri, Mar 18, 2011 at 11:56:53AM -0400, Chris Mason wrote: > Thanks for fielding this one. Does put_unaligned_le32 optimize away on > platforms with efficient access? It would be great if we didn't need > the #ifdef. (quicktest: assembly output is same for put_unaligned_le32 and direct assignment on my x86_64) I was originally following examples in Documentation/unaligned-memory-access.txt. From other code it seems to me that the define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is intended for larger portions of code. Macros/wrappers for {put,get}_unaligned* are chosen via arch//include/asm/unaligned.h accordingly, therefore it's safe to use put_unaligned_le32 without the ifdef. dave Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 00cbb41af660..2bdb124333ab 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -198,7 +199,7 @@ u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len) void btrfs_csum_final(u32 crc, char *result) { - *(__le32 *)result = ~cpu_to_le32(crc); + put_unaligned_le32(~crc, result); } /* -- cgit v1.2.2 From 97d9a8a420444eb5b5c071d4b3b9c4100a7ae015 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 24 Mar 2011 06:33:21 +0000 Subject: Btrfs: check return value of read_tree_block() This patch is checking return value of read_tree_block(), and if it is NULL, error processing. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 3 +++ fs/btrfs/extent-tree.c | 6 ++++++ fs/btrfs/relocation.c | 6 ++++++ 3 files changed, 15 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 4edcbe915736..84d7ca1fe0ba 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -682,6 +682,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, if (!cur) { cur = read_tree_block(root, blocknr, blocksize, gen); + if (!cur) + return -EIO; } else if (!uptodate) { btrfs_read_buffer(cur, gen); } @@ -4087,6 +4089,7 @@ find_next_key: } btrfs_set_path_blocking(path); cur = read_node_slot(root, cur, slot); + BUG_ON(!cur); btrfs_tree_lock(cur); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a6a8159c5d1e..5bc658a9d85c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6067,6 +6067,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, if (reada && level == 1) reada_walk_down(trans, root, wc, path); next = read_tree_block(root, bytenr, blocksize, generation); + if (!next) + return -EIO; btrfs_tree_lock(next); btrfs_set_lock_blocking(next); } @@ -7937,6 +7939,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, eb = read_tree_block(found_root, block_start, block_size, 0); + if (!eb) { + ret = -EIO; + goto out; + } btrfs_tree_lock(eb); BUG_ON(level != btrfs_header_level(eb)); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index c863c8447015..58250e09eb05 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1724,6 +1724,7 @@ again: eb = read_tree_block(dest, old_bytenr, blocksize, old_ptr_gen); + BUG_ON(!eb); btrfs_tree_lock(eb); if (cow) { ret = btrfs_cow_block(trans, dest, eb, parent, @@ -2513,6 +2514,10 @@ static int do_relocation(struct btrfs_trans_handle *trans, blocksize = btrfs_level_size(root, node->level); generation = btrfs_node_ptr_generation(upper->eb, slot); eb = read_tree_block(root, bytenr, blocksize, generation); + if (!eb) { + err = -EIO; + goto next; + } btrfs_tree_lock(eb); btrfs_set_lock_blocking(eb); @@ -2670,6 +2675,7 @@ static int get_tree_block_key(struct reloc_control *rc, BUG_ON(block->key_ready); eb = read_tree_block(rc->extent_root, block->bytenr, block->key.objectid, block->key.offset); + BUG_ON(!eb); WARN_ON(btrfs_header_level(eb) != block->level); if (block->level == 0) btrfs_item_key_to_cpu(eb, &block->key, 0); -- cgit v1.2.2 From fc0e4a314e361af3b13d9320e92c64118f9a3e61 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Thu, 24 Mar 2011 11:41:21 +0000 Subject: btrfs: use GFP_NOFS instead of GFP_KERNEL In the filesystem context, we must allocate memory by GFP_NOFS, or we may start another filesystem operation and make kswap thread hang up. Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5bc658a9d85c..7922f296420d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -471,7 +471,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, if (load_cache_only) return 0; - caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL); + caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); BUG_ON(!caching_ctl); INIT_LIST_HEAD(&caching_ctl->list); @@ -1743,7 +1743,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans, static void btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len) { - blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0); + blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); } static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, -- cgit v1.2.2 From 75e7cb7fe0c391561bd3af36515be3f3c64a04c6 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Tue, 22 Mar 2011 10:12:20 +0000 Subject: Btrfs: Per file/directory controls for COW and compression Data compression and data cow are controlled across the entire FS by mount options right now. ioctls are needed to set this on a per file or per directory basis. This has been proposed previously, but VFS developers wanted us to use generic ioctls rather than btrfs-specific ones. According to Chris's comment, there should be just one true compression method(probably LZO) stored in the super. However, before this, we would wait for that one method is stable enough to be adopted into the super. So I list it as a long term goal, and just store it in ram today. After applying this patch, we can use the generic "FS_IOC_SETFLAGS" ioctl to control file and directory's datacow and compression attribute. NOTE: - The compression type is selected by such rules: If we mount btrfs with compress options, ie, zlib/lzo, the type is it. Otherwise, we'll use the default compress type (zlib today). v1->v2: - rebase to the latest btrfs. v2->v3: - fix a problem, i.e. when a file is set NOCOW via mount option, then this NOCOW will be screwed by inheritance from parent directory. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 6 ++++++ fs/btrfs/inode.c | 31 ++++++++++++++++++++++++++++--- fs/btrfs/ioctl.c | 41 +++++++++++++++++++++++++++++++++++++---- 4 files changed, 72 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9d0f59142afa..8302ecd4197f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1282,6 +1282,7 @@ struct btrfs_root { #define BTRFS_INODE_NODUMP (1 << 8) #define BTRFS_INODE_NOATIME (1 << 9) #define BTRFS_INODE_DIRSYNC (1 << 10) +#define BTRFS_INODE_COMPRESS (1 << 11) /* some macros to generate set/get funcs for the struct fields. This * assumes there is a lefoo_to_cpu for every type, so lets make a simple diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2bdb124333ab..125639ddaffe 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1854,6 +1854,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); + /* + * In the long term, we'll store the compression type in the super + * block, and it'll be used for per file compression control. + */ + fs_info->compress_type = BTRFS_COMPRESS_ZLIB; + ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index eaa271484199..7a7a202b82ab 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -384,7 +384,8 @@ again: */ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && (btrfs_test_opt(root, COMPRESS) || - (BTRFS_I(inode)->force_compress))) { + (BTRFS_I(inode)->force_compress) || + (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { WARN_ON(pages); pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); @@ -1256,7 +1257,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, ret = run_delalloc_nocow(inode, locked_page, start, end, page_started, 0, nr_written); else if (!btrfs_test_opt(root, COMPRESS) && - !(BTRFS_I(inode)->force_compress)) + !(BTRFS_I(inode)->force_compress) && + !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) ret = cow_file_range(inode, locked_page, start, end, page_started, nr_written, 1); else @@ -4584,7 +4586,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, if ((mode & S_IFREG)) { if (btrfs_test_opt(root, NODATASUM)) BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; - if (btrfs_test_opt(root, NODATACOW)) + if (btrfs_test_opt(root, NODATACOW) || + (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW)) BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; } @@ -6866,6 +6869,26 @@ static int btrfs_getattr(struct vfsmount *mnt, return 0; } +/* + * If a file is moved, it will inherit the cow and compression flags of the new + * directory. + */ +static void fixup_inode_flags(struct inode *dir, struct inode *inode) +{ + struct btrfs_inode *b_dir = BTRFS_I(dir); + struct btrfs_inode *b_inode = BTRFS_I(inode); + + if (b_dir->flags & BTRFS_INODE_NODATACOW) + b_inode->flags |= BTRFS_INODE_NODATACOW; + else + b_inode->flags &= ~BTRFS_INODE_NODATACOW; + + if (b_dir->flags & BTRFS_INODE_COMPRESS) + b_inode->flags |= BTRFS_INODE_COMPRESS; + else + b_inode->flags &= ~BTRFS_INODE_COMPRESS; +} + static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { @@ -6999,6 +7022,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, } } + fixup_inode_flags(new_dir, old_inode); + ret = btrfs_add_link(trans, new_dir, old_inode, new_dentry->d_name.name, new_dentry->d_name.len, 0, index); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 88d3cb2eaf75..32c980ae0f1c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -138,6 +138,24 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg) return 0; } +static int check_flags(unsigned int flags) +{ + if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ + FS_NOATIME_FL | FS_NODUMP_FL | \ + FS_SYNC_FL | FS_DIRSYNC_FL | \ + FS_NOCOMP_FL | FS_COMPR_FL | \ + FS_NOCOW_FL | FS_COW_FL)) + return -EOPNOTSUPP; + + if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) + return -EINVAL; + + if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) + return -EINVAL; + + return 0; +} + static int btrfs_ioctl_setflags(struct file *file, void __user *arg) { struct inode *inode = file->f_path.dentry->d_inode; @@ -153,10 +171,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; - if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ - FS_NOATIME_FL | FS_NODUMP_FL | \ - FS_SYNC_FL | FS_DIRSYNC_FL)) - return -EOPNOTSUPP; + ret = check_flags(flags); + if (ret) + return ret; if (!is_owner_or_cap(inode)) return -EACCES; @@ -201,6 +218,22 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) else ip->flags &= ~BTRFS_INODE_DIRSYNC; + /* + * The COMPRESS flag can only be changed by users, while the NOCOMPRESS + * flag may be changed automatically if compression code won't make + * things smaller. + */ + if (flags & FS_NOCOMP_FL) { + ip->flags &= ~BTRFS_INODE_COMPRESS; + ip->flags |= BTRFS_INODE_NOCOMPRESS; + } else if (flags & FS_COMPR_FL) { + ip->flags |= BTRFS_INODE_COMPRESS; + ip->flags &= ~BTRFS_INODE_NOCOMPRESS; + } + if (flags & FS_NOCOW_FL) + ip->flags |= BTRFS_INODE_NODATACOW; + else if (flags & FS_COW_FL) + ip->flags &= ~BTRFS_INODE_NODATACOW; trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); -- cgit v1.2.2 From 3ab3564f018b9b265d0258e4a231794bacd5ad85 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 22 Mar 2011 17:20:26 +0000 Subject: btrfs: return EXDEV when linking from different subvolumes btrfs_link returns EPERM if a cross-subvolume link is attempted. However, in this case I believe EXDEV to be the more appropriate value. >From the link(2) man page: EXDEV oldpath and newpath are not on the same mounted file system. (Linux permits a file system to be mounted at multiple points, but link() does not work across different mount points, even if the same file system is mounted on both.) This matters because an application may have different behaviors based on return codes. Signed-off-by: Mark Fasheh Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7a7a202b82ab..67fd6e9552d3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4817,7 +4817,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, /* do not allow sys_link's with other subvols of the same device */ if (root->objectid != BTRFS_I(inode)->root->objectid) - return -EPERM; + return -EXDEV; btrfs_inc_nlink(inode); inode->i_ctime = CURRENT_TIME; -- cgit v1.2.2 From b4d00d569a49fcef02195635dbf8d15904b1fb63 Mon Sep 17 00:00:00 2001 From: Li Dongyang Date: Thu, 24 Mar 2011 10:24:25 +0000 Subject: Btrfs: make update_reserved_bytes() public Make the function public as we should update the reserved extents calculations after taking out an extent for trimming. Signed-off-by: Li Dongyang Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/extent-tree.c | 16 +++++++--------- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8302ecd4197f..9e21176cdf57 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2157,6 +2157,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, u64 root_objectid, u64 owner, u64 offset); int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); +int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 num_bytes, int reserve, int sinfo); int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7922f296420d..0671f5b77eb8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -36,8 +36,6 @@ static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int alloc); -static int update_reserved_bytes(struct btrfs_block_group_cache *cache, - u64 num_bytes, int reserve, int sinfo); static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, @@ -4233,8 +4231,8 @@ int btrfs_pin_extent(struct btrfs_root *root, * update size of reserved extents. this function may return -EAGAIN * if 'reserve' is true or 'sinfo' is false. */ -static int update_reserved_bytes(struct btrfs_block_group_cache *cache, - u64 num_bytes, int reserve, int sinfo) +int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 num_bytes, int reserve, int sinfo) { int ret = 0; if (sinfo) { @@ -4714,10 +4712,10 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); btrfs_add_free_space(cache, buf->start, buf->len); - ret = update_reserved_bytes(cache, buf->len, 0, 0); + ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0); if (ret == -EAGAIN) { /* block group became read-only */ - update_reserved_bytes(cache, buf->len, 0, 1); + btrfs_update_reserved_bytes(cache, buf->len, 0, 1); goto out; } @@ -5206,7 +5204,7 @@ checks: search_start - offset); BUG_ON(offset > search_start); - ret = update_reserved_bytes(block_group, num_bytes, 1, + ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1, (data & BTRFS_BLOCK_GROUP_DATA)); if (ret == -EAGAIN) { btrfs_add_free_space(block_group, offset, num_bytes); @@ -5432,7 +5430,7 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) ret = btrfs_discard_extent(root, start, len); btrfs_add_free_space(cache, start, len); - update_reserved_bytes(cache, len, 0, 1); + btrfs_update_reserved_bytes(cache, len, 0, 1); btrfs_put_block_group(cache); trace_btrfs_reserved_extent_free(root, start, len); @@ -5634,7 +5632,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, put_caching_control(caching_ctl); } - ret = update_reserved_bytes(block_group, ins->offset, 1, 1); + ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1); BUG_ON(ret); btrfs_put_block_group(block_group); ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, -- cgit v1.2.2 From fce3bb9a1bd492793170e117c60d5718b7896af4 Mon Sep 17 00:00:00 2001 From: Li Dongyang Date: Thu, 24 Mar 2011 10:24:26 +0000 Subject: Btrfs: make btrfs_map_block() return entire free extent for each device of RAID0/1/10/DUP btrfs_map_block() will only return a single stripe length, but we want the full extent be mapped to each disk when we are trimming the extent, so we add length to btrfs_bio_stripe and fill it if we are mapping for REQ_DISCARD. Signed-off-by: Li Dongyang Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 150 +++++++++++++++++++++++++++++++++++++++++++++-------- fs/btrfs/volumes.h | 1 + 2 files changed, 129 insertions(+), 22 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8ba3c9ebff93..c440c89a470a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2956,7 +2956,10 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, struct extent_map_tree *em_tree = &map_tree->map_tree; u64 offset; u64 stripe_offset; + u64 stripe_end_offset; u64 stripe_nr; + u64 stripe_nr_orig; + u64 stripe_nr_end; int stripes_allocated = 8; int stripes_required = 1; int stripe_index; @@ -2965,7 +2968,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, int max_errors = 0; struct btrfs_multi_bio *multi = NULL; - if (multi_ret && !(rw & REQ_WRITE)) + if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD))) stripes_allocated = 1; again: if (multi_ret) { @@ -3011,7 +3014,15 @@ again: max_errors = 1; } } - if (multi_ret && (rw & REQ_WRITE) && + if (rw & REQ_DISCARD) { + if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | + BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_DUP | + BTRFS_BLOCK_GROUP_RAID10)) { + stripes_required = map->num_stripes; + } + } + if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) && stripes_allocated < stripes_required) { stripes_allocated = map->num_stripes; free_extent_map(em); @@ -3031,12 +3042,15 @@ again: /* stripe_offset is the offset of this block in its stripe*/ stripe_offset = offset - stripe_offset; - if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | - BTRFS_BLOCK_GROUP_RAID10 | - BTRFS_BLOCK_GROUP_DUP)) { + if (rw & REQ_DISCARD) + *length = min_t(u64, em->len - offset, *length); + else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | + BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_RAID10 | + BTRFS_BLOCK_GROUP_DUP)) { /* we limit the length of each bio to what fits in a stripe */ *length = min_t(u64, em->len - offset, - map->stripe_len - stripe_offset); + map->stripe_len - stripe_offset); } else { *length = em->len - offset; } @@ -3046,8 +3060,19 @@ again: num_stripes = 1; stripe_index = 0; - if (map->type & BTRFS_BLOCK_GROUP_RAID1) { - if (unplug_page || (rw & REQ_WRITE)) + stripe_nr_orig = stripe_nr; + stripe_nr_end = (offset + *length + map->stripe_len - 1) & + (~(map->stripe_len - 1)); + do_div(stripe_nr_end, map->stripe_len); + stripe_end_offset = stripe_nr_end * map->stripe_len - + (offset + *length); + if (map->type & BTRFS_BLOCK_GROUP_RAID0) { + if (rw & REQ_DISCARD) + num_stripes = min_t(u64, map->num_stripes, + stripe_nr_end - stripe_nr_orig); + stripe_index = do_div(stripe_nr, map->num_stripes); + } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { + if (unplug_page || (rw & (REQ_WRITE | REQ_DISCARD))) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; @@ -3058,7 +3083,7 @@ again: } } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { - if (rw & REQ_WRITE) + if (rw & (REQ_WRITE | REQ_DISCARD)) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; @@ -3071,6 +3096,10 @@ again: if (unplug_page || (rw & REQ_WRITE)) num_stripes = map->sub_stripes; + else if (rw & REQ_DISCARD) + num_stripes = min_t(u64, map->sub_stripes * + (stripe_nr_end - stripe_nr_orig), + map->num_stripes); else if (mirror_num) stripe_index += mirror_num - 1; else { @@ -3088,24 +3117,101 @@ again: } BUG_ON(stripe_index >= map->num_stripes); - for (i = 0; i < num_stripes; i++) { - if (unplug_page) { - struct btrfs_device *device; - struct backing_dev_info *bdi; - - device = map->stripes[stripe_index].dev; - if (device->bdev) { - bdi = blk_get_backing_dev_info(device->bdev); - if (bdi->unplug_io_fn) - bdi->unplug_io_fn(bdi, unplug_page); - } - } else { + if (rw & REQ_DISCARD) { + for (i = 0; i < num_stripes; i++) { multi->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; multi->stripes[i].dev = map->stripes[stripe_index].dev; + + if (map->type & BTRFS_BLOCK_GROUP_RAID0) { + u64 stripes; + int last_stripe = (stripe_nr_end - 1) % + map->num_stripes; + int j; + + for (j = 0; j < map->num_stripes; j++) { + if ((stripe_nr_end - 1 - j) % + map->num_stripes == stripe_index) + break; + } + stripes = stripe_nr_end - 1 - j; + do_div(stripes, map->num_stripes); + multi->stripes[i].length = map->stripe_len * + (stripes - stripe_nr + 1); + + if (i == 0) { + multi->stripes[i].length -= + stripe_offset; + stripe_offset = 0; + } + if (stripe_index == last_stripe) + multi->stripes[i].length -= + stripe_end_offset; + } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { + u64 stripes; + int j; + int factor = map->num_stripes / + map->sub_stripes; + int last_stripe = (stripe_nr_end - 1) % factor; + last_stripe *= map->sub_stripes; + + for (j = 0; j < factor; j++) { + if ((stripe_nr_end - 1 - j) % factor == + stripe_index / map->sub_stripes) + break; + } + stripes = stripe_nr_end - 1 - j; + do_div(stripes, factor); + multi->stripes[i].length = map->stripe_len * + (stripes - stripe_nr + 1); + + if (i < map->sub_stripes) { + multi->stripes[i].length -= + stripe_offset; + if (i == map->sub_stripes - 1) + stripe_offset = 0; + } + if (stripe_index >= last_stripe && + stripe_index <= (last_stripe + + map->sub_stripes - 1)) { + multi->stripes[i].length -= + stripe_end_offset; + } + } else + multi->stripes[i].length = *length; + + stripe_index++; + if (stripe_index == map->num_stripes) { + /* This could only happen for RAID0/10 */ + stripe_index = 0; + stripe_nr++; + } + } + } else { + for (i = 0; i < num_stripes; i++) { + if (unplug_page) { + struct btrfs_device *device; + struct backing_dev_info *bdi; + + device = map->stripes[stripe_index].dev; + if (device->bdev) { + bdi = blk_get_backing_dev_info(device-> + bdev); + if (bdi->unplug_io_fn) + bdi->unplug_io_fn(bdi, + unplug_page); + } + } else { + multi->stripes[i].physical = + map->stripes[stripe_index].physical + + stripe_offset + + stripe_nr * map->stripe_len; + multi->stripes[i].dev = + map->stripes[stripe_index].dev; + } + stripe_index++; } - stripe_index++; } if (multi_ret) { *multi_ret = multi; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 7b38d0668b51..cc2eadaf7a27 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -126,6 +126,7 @@ struct btrfs_fs_devices { struct btrfs_bio_stripe { struct btrfs_device *dev; u64 physical; + u64 length; /* only used for discard mappings */ }; struct btrfs_multi_bio { -- cgit v1.2.2 From 5378e60734f5b7bfe1b43dc191aaf6131c1befe7 Mon Sep 17 00:00:00 2001 From: Li Dongyang Date: Thu, 24 Mar 2011 10:24:27 +0000 Subject: Btrfs: adjust btrfs_discard_extent() return errors and trimmed bytes Callers of btrfs_discard_extent() should check if we are mounted with -o discard, as we want to make fitrim to work even the fs is not mounted with -o discard. Also we should use REQ_DISCARD to map the free extent to get a full mapping, last we only return errors if 1. the error is not a EOPNOTSUPP 2. no device supports discard Signed-off-by: Li Dongyang Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 5 ++++- fs/btrfs/extent-tree.c | 43 ++++++++++++++++++++++++++----------------- 3 files changed, 31 insertions(+), 19 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9e21176cdf57..a18b7bc2b22c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2229,7 +2229,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end); int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, - u64 num_bytes); + u64 num_bytes, u64 *actual_bytes); int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 type); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 125639ddaffe..8ecc5419d8b6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3054,7 +3054,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, break; /* opt_discard */ - ret = btrfs_error_discard_extent(root, start, end + 1 - start); + if (btrfs_test_opt(root, DISCARD)) + ret = btrfs_error_discard_extent(root, start, + end + 1 - start, + NULL); clear_extent_dirty(unpin, start, end, GFP_NOFS); btrfs_error_unpin_extent_range(root, start, end); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0671f5b77eb8..e990d2d1ba4a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1738,39 +1738,45 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans, return ret; } -static void btrfs_issue_discard(struct block_device *bdev, +static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len) { - blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); + return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); } static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, - u64 num_bytes) + u64 num_bytes, u64 *actual_bytes) { int ret; - u64 map_length = num_bytes; + u64 discarded_bytes = 0; struct btrfs_multi_bio *multi = NULL; - if (!btrfs_test_opt(root, DISCARD)) - return 0; /* Tell the block device(s) that the sectors can be discarded */ - ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, - bytenr, &map_length, &multi, 0); + ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD, + bytenr, &num_bytes, &multi, 0); if (!ret) { struct btrfs_bio_stripe *stripe = multi->stripes; int i; - if (map_length > num_bytes) - map_length = num_bytes; for (i = 0; i < multi->num_stripes; i++, stripe++) { - btrfs_issue_discard(stripe->dev->bdev, - stripe->physical, - map_length); + ret = btrfs_issue_discard(stripe->dev->bdev, + stripe->physical, + stripe->length); + if (!ret) + discarded_bytes += stripe->length; + else if (ret != -EOPNOTSUPP) + break; } kfree(multi); } + if (discarded_bytes && ret == -EOPNOTSUPP) + ret = 0; + + if (actual_bytes) + *actual_bytes = discarded_bytes; + return ret; } @@ -4371,7 +4377,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, if (ret) break; - ret = btrfs_discard_extent(root, start, end + 1 - start); + if (btrfs_test_opt(root, DISCARD)) + ret = btrfs_discard_extent(root, start, + end + 1 - start, NULL); clear_extent_dirty(unpin, start, end, GFP_NOFS); unpin_extent_range(root, start, end); @@ -5427,7 +5435,8 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) return -ENOSPC; } - ret = btrfs_discard_extent(root, start, len); + if (btrfs_test_opt(root, DISCARD)) + ret = btrfs_discard_extent(root, start, len, NULL); btrfs_add_free_space(cache, start, len); btrfs_update_reserved_bytes(cache, len, 0, 1); @@ -8765,7 +8774,7 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) } int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, - u64 num_bytes) + u64 num_bytes, u64 *actual_bytes) { - return btrfs_discard_extent(root, bytenr, num_bytes); + return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes); } -- cgit v1.2.2 From f7039b1d5c32241f87a513e33120db36bf30264d Mon Sep 17 00:00:00 2001 From: Li Dongyang Date: Thu, 24 Mar 2011 10:24:28 +0000 Subject: Btrfs: add btrfs_trim_fs() to handle FITRIM We take an free extent out from allocator, trim it, then put it back, but before we trim the block group, we should make sure the block group is cached, so plus a little change to make cache_block_group() run without a transaction. Signed-off-by: Li Dongyang Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/extent-tree.c | 50 +++++++++++++++++++++++- fs/btrfs/free-space-cache.c | 92 +++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/free-space-cache.h | 2 + fs/btrfs/ioctl.c | 46 +++++++++++++++++++++++ 5 files changed, 190 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a18b7bc2b22c..93a0191aded6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2232,6 +2232,7 @@ int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 *actual_bytes); int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 type); +int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e990d2d1ba4a..1efeda3b2f6f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -440,7 +440,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, * allocate blocks for the tree root we can't do the fast caching since * we likely hold important locks. */ - if (!trans->transaction->in_commit && + if (trans && (!trans->transaction->in_commit) && (root && root != root->fs_info->tree_root)) { spin_lock(&cache->lock); if (cache->cached != BTRFS_CACHE_NO) { @@ -8778,3 +8778,51 @@ int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, { return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes); } + +int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_block_group_cache *cache = NULL; + u64 group_trimmed; + u64 start; + u64 end; + u64 trimmed = 0; + int ret = 0; + + cache = btrfs_lookup_block_group(fs_info, range->start); + + while (cache) { + if (cache->key.objectid >= (range->start + range->len)) { + btrfs_put_block_group(cache); + break; + } + + start = max(range->start, cache->key.objectid); + end = min(range->start + range->len, + cache->key.objectid + cache->key.offset); + + if (end - start >= range->minlen) { + if (!block_group_cache_done(cache)) { + ret = cache_block_group(cache, NULL, root, 0); + if (!ret) + wait_block_group_cache_done(cache); + } + ret = btrfs_trim_block_group(cache, + &group_trimmed, + start, + end, + range->minlen); + + trimmed += group_trimmed; + if (ret) { + btrfs_put_block_group(cache); + break; + } + } + + cache = next_block_group(fs_info->tree_root, cache); + } + + range->len = trimmed; + return ret; +} diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f03ef97c3b21..0037427d8a9d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2178,3 +2178,95 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) cluster->block_group = NULL; } +int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, + u64 *trimmed, u64 start, u64 end, u64 minlen) +{ + struct btrfs_free_space *entry = NULL; + struct btrfs_fs_info *fs_info = block_group->fs_info; + u64 bytes = 0; + u64 actually_trimmed; + int ret = 0; + + *trimmed = 0; + + while (start < end) { + spin_lock(&block_group->tree_lock); + + if (block_group->free_space < minlen) { + spin_unlock(&block_group->tree_lock); + break; + } + + entry = tree_search_offset(block_group, start, 0, 1); + if (!entry) + entry = tree_search_offset(block_group, + offset_to_bitmap(block_group, + start), + 1, 1); + + if (!entry || entry->offset >= end) { + spin_unlock(&block_group->tree_lock); + break; + } + + if (entry->bitmap) { + ret = search_bitmap(block_group, entry, &start, &bytes); + if (!ret) { + if (start >= end) { + spin_unlock(&block_group->tree_lock); + break; + } + bytes = min(bytes, end - start); + bitmap_clear_bits(block_group, entry, + start, bytes); + if (entry->bytes == 0) + free_bitmap(block_group, entry); + } else { + start = entry->offset + BITS_PER_BITMAP * + block_group->sectorsize; + spin_unlock(&block_group->tree_lock); + ret = 0; + continue; + } + } else { + start = entry->offset; + bytes = min(entry->bytes, end - start); + unlink_free_space(block_group, entry); + kfree(entry); + } + + spin_unlock(&block_group->tree_lock); + + if (bytes >= minlen) { + int update_ret; + update_ret = btrfs_update_reserved_bytes(block_group, + bytes, 1, 1); + + ret = btrfs_error_discard_extent(fs_info->extent_root, + start, + bytes, + &actually_trimmed); + + btrfs_add_free_space(block_group, + start, bytes); + if (!update_ret) + btrfs_update_reserved_bytes(block_group, + bytes, 0, 1); + + if (ret) + break; + *trimmed += actually_trimmed; + } + start += bytes; + bytes = 0; + + if (fatal_signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + cond_resched(); + } + + return ret; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index e49ca5c321b5..65c3b935289f 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -68,4 +68,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, int btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster); +int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, + u64 *trimmed, u64 start, u64 end, u64 minlen); #endif diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 32c980ae0f1c..649f47d2afb4 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -258,6 +259,49 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg) return put_user(inode->i_generation, arg); } +static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) +{ + struct btrfs_root *root = fdentry(file)->d_sb->s_fs_info; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_device *device; + struct request_queue *q; + struct fstrim_range range; + u64 minlen = ULLONG_MAX; + u64 num_devices = 0; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + mutex_lock(&fs_info->fs_devices->device_list_mutex); + list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { + if (!device->bdev) + continue; + q = bdev_get_queue(device->bdev); + if (blk_queue_discard(q)) { + num_devices++; + minlen = min((u64)q->limits.discard_granularity, + minlen); + } + } + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + if (!num_devices) + return -EOPNOTSUPP; + + if (copy_from_user(&range, arg, sizeof(range))) + return -EFAULT; + + range.minlen = max(range.minlen, minlen); + ret = btrfs_trim_fs(root, &range); + if (ret < 0) + return ret; + + if (copy_to_user(arg, &range, sizeof(range))) + return -EFAULT; + + return 0; +} + static noinline int create_subvol(struct btrfs_root *root, struct dentry *dentry, char *name, int namelen, @@ -2426,6 +2470,8 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_setflags(file, argp); case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); + case FITRIM: + return btrfs_ioctl_fitrim(file, argp); case BTRFS_IOC_SNAP_CREATE: return btrfs_ioctl_snap_create(file, argp, 0); case BTRFS_IOC_SNAP_CREATE_V2: -- cgit v1.2.2 From c622ae6085d0c6ad834213bbf1477eb311359078 Mon Sep 17 00:00:00 2001 From: liubo Date: Sat, 26 Mar 2011 08:01:12 -0400 Subject: btrfs: make inode ref log recovery faster When we recover from crash via write-ahead log tree and process the inode refs, for each btrfs_inode_ref item, we will 1) check if we already have a perfect match in fs/file tree, if we have, then we're done. 2) search the corresponding back reference in fs/file tree, and check all the names in this back reference to see if they are also in the log to avoid conflict corners. 3) recover the logged inode refs to fs/file tree. In current btrfs, however, - for 2)'s check, once is enough, since the checked back reference will remain unchanged after processing all the inode refs belonged to the key. - it has no need to do another 1) between 2) and 3). I've made a small test to show how it improves, $dd if=/dev/zero of=foobar bs=4K count=1 $sync $make 100 hard links continuously, like ln foobar link_i $fsync foobar $echo b > /proc/sysrq-trigger after reboot $time mount DEV PATH without patch: real 0m0.285s user 0m0.001s sys 0m0.009s with patch: real 0m0.123s user 0m0.000s sys 0m0.010s Changelog v1->v2: - fix double free - pointed by David Sterba Changelog v2->v3: - adjust free order Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 35 +++++++++++------------------------ 1 file changed, 11 insertions(+), 24 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f9425e33e358..c50271ad3157 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -799,12 +799,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, struct inode *dir; int ret; struct btrfs_inode_ref *ref; - struct btrfs_dir_item *di; struct inode *inode; char *name; int namelen; unsigned long ref_ptr; unsigned long ref_end; + int search_done = 0; /* * it is possible that we didn't log all the parent directories @@ -845,7 +845,10 @@ again: * existing back reference, and we don't want to create * dangling pointers in the directory. */ -conflict_again: + + if (search_done) + goto insert; + ret = btrfs_search_slot(NULL, root, key, path, 0, 0); if (ret == 0) { char *victim_name; @@ -886,37 +889,21 @@ conflict_again: ret = btrfs_unlink_inode(trans, root, dir, inode, victim_name, victim_name_len); - kfree(victim_name); - btrfs_release_path(root, path); - goto conflict_again; } kfree(victim_name); ptr = (unsigned long)(victim_ref + 1) + victim_name_len; } BUG_ON(ret); - } - btrfs_release_path(root, path); - - /* look for a conflicting sequence number */ - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - btrfs_inode_ref_index(eb, ref), - name, namelen, 0); - if (di && !IS_ERR(di)) { - ret = drop_one_dir_item(trans, root, path, dir, di); - BUG_ON(ret); - } - btrfs_release_path(root, path); - - /* look for a conflicting name */ - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, - name, namelen, 0); - if (di && !IS_ERR(di)) { - ret = drop_one_dir_item(trans, root, path, dir, di); - BUG_ON(ret); + /* + * NOTE: we have searched root tree and checked the + * coresponding ref, it does not need to check again. + */ + search_done = 1; } btrfs_release_path(root, path); +insert: /* insert our name */ ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, btrfs_inode_ref_index(eb, ref)); -- cgit v1.2.2 From dac97e516c617f9c797f64b0224050b70aea30c7 Mon Sep 17 00:00:00 2001 From: Yoshinori Sano Date: Tue, 15 Feb 2011 12:01:42 +0000 Subject: Btrfs: fix uncheck memory allocations To make Btrfs code more robust, several return value checks where memory allocation can fail are introduced. I use BUG_ON where I don't know how to handle the error properly, which increases the number of using the notorious BUG_ON, though. Signed-off-by: Yoshinori Sano Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 6 ++++++ fs/btrfs/extent-tree.c | 4 ++++ fs/btrfs/inode.c | 2 ++ 3 files changed, 12 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 4d2110eafe29..992a4b92083e 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -340,6 +340,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); + if (!cb) + return -ENOMEM; atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; @@ -354,6 +356,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); + if(!bio) { + kfree(cb); + return -ENOMEM; + } bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; atomic_inc(&cb->pending_bios); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1efeda3b2f6f..cd0b69f57375 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6978,6 +6978,10 @@ static noinline int get_new_locations(struct inode *reloc_inode, struct disk_extent *old = exts; max *= 2; exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); + if (!exts) { + ret = -ENOMEM; + goto out; + } memcpy(exts, old, sizeof(*exts) * nr); if (old != *extents) kfree(old); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 67fd6e9552d3..f739b256967e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -290,6 +290,7 @@ static noinline int add_async_extent(struct async_cow *cow, struct async_extent *async_extent; async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); + BUG_ON(!async_extent); async_extent->start = start; async_extent->ram_size = ram_size; async_extent->compressed_size = compressed_size; @@ -388,6 +389,7 @@ again: (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { WARN_ON(pages); pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + BUG_ON(!pages); if (BTRFS_I(inode)->force_compress) compress_type = BTRFS_I(inode)->force_compress; -- cgit v1.2.2 From 2d4e6f6ad2b9f84f568d07dae4bdbc7f48a70ad1 Mon Sep 17 00:00:00 2001 From: liubo Date: Thu, 24 Feb 2011 09:38:16 +0000 Subject: Btrfs: fix return value of setflags ioctl setflags ioctl should return error when any checks fail. Signed-off-by: Liu Bo Reviewed-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 649f47d2afb4..6b70e0e2bd1e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -247,9 +247,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) btrfs_end_transaction(trans, root); mnt_drop_write(file->f_path.mnt); + + ret = 0; out_unlock: mutex_unlock(&inode->i_mutex); - return 0; + return ret; } static int btrfs_ioctl_getversion(struct file *file, int __user *arg) -- cgit v1.2.2 From 9f7c43c96727a53bea45f7f2549d897f0a6117b8 Mon Sep 17 00:00:00 2001 From: liubo Date: Mon, 7 Mar 2011 02:13:33 +0000 Subject: Btrfs: fix memory leak of empty filesystem after balance After Josef's patch(commit 3c14874acc71180553fb5aba528e3cf57c5b958b), btrfs will exclude super bytes when reading block groups(by marking a extent state UPTODATE). However, these bytes do not get freed while balance remove unused block groups, and we won't process those removed ones any more, when we do umount and unload the btrfs module, btrfs hits a memory leak. This patch add the missing free operation. Reproduce steps: $ mkfs.btrfs disk $ mount disk /mnt/btrfs -o loop $ btrfs filesystem balance /mnt/btrfs $ umount /mnt/btrfs $ rmmod btrfs Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd0b69f57375..a561060f5ffb 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8669,6 +8669,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, BUG_ON(!block_group); BUG_ON(!block_group->ro); + /* + * Free the reserved super bytes from this block group before + * remove it. + */ + free_excluded_extents(root, block_group); + memcpy(&key, &block_group->key, sizeof(key)); if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | -- cgit v1.2.2 From c59021f846881a957ac5afe456d0f59d6a517b61 Mon Sep 17 00:00:00 2001 From: liubo Date: Mon, 7 Mar 2011 02:13:14 +0000 Subject: Btrfs: fix OOPS of empty filesystem after balance btrfs will remove unused block groups after balance. When a empty filesystem is balanced, the block group with tag "DATA" may be dropped, and after umount and mount again, it will not find "DATA" space_info and lead to OOPS. So we initial the necessary space_infos(DATA, SYSTEM, METADATA) to avoid OOPS. Reported-by: Daniel J Blueman Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 6 ++++++ fs/btrfs/extent-tree.c | 23 +++++++++++++++++++++++ 3 files changed, 30 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 93a0191aded6..d47ce8307854 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2234,6 +2234,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 type); int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); +int btrfs_init_space_info(struct btrfs_fs_info *fs_info); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8ecc5419d8b6..b3fc8475870f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2065,6 +2065,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->metadata_alloc_profile = (u64)-1; fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; + ret = btrfs_init_space_info(fs_info); + if (ret) { + printk(KERN_ERR "Failed to initial space info: %d\n", ret); + goto fail_block_groups; + } + ret = btrfs_read_block_groups(extent_root); if (ret) { printk(KERN_ERR "Failed to read block groups: %d\n", ret); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a561060f5ffb..f619c3cb13b7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8778,6 +8778,29 @@ out: return ret; } +int btrfs_init_space_info(struct btrfs_fs_info *fs_info) +{ + struct btrfs_space_info *space_info; + int ret; + + ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, + &space_info); + if (ret) + return ret; + + ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, + &space_info); + if (ret) + return ret; + + ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, + &space_info); + if (ret) + return ret; + + return ret; +} + int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) { return unpin_extent_range(root, start, end); -- cgit v1.2.2 From c2db1073fdf9757e6fd8b4a59d15b6ecc7a2af8a Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Tue, 1 Mar 2011 06:48:31 +0000 Subject: Btrfs: check return value of btrfs_alloc_path() Adding the check on the return value of btrfs_alloc_path() to several places. And, some of callers are modified by this change. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 11 +++++++---- fs/btrfs/dir-item.c | 10 ++++++---- fs/btrfs/file-item.c | 2 ++ fs/btrfs/inode.c | 18 ++++++++++++------ 4 files changed, 27 insertions(+), 14 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 992a4b92083e..41d1d7c70e29 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -663,8 +663,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, atomic_inc(&cb->pending_bios); if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { - btrfs_lookup_bio_sums(root, inode, comp_bio, - sums); + ret = btrfs_lookup_bio_sums(root, inode, + comp_bio, sums); + BUG_ON(ret); } sums += (comp_bio->bi_size + root->sectorsize - 1) / root->sectorsize; @@ -689,8 +690,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); BUG_ON(ret); - if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) - btrfs_lookup_bio_sums(root, inode, comp_bio, sums); + if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { + ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); + BUG_ON(ret); + } ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); BUG_ON(ret); diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index 02c97ad61b6d..c62f02f6ae69 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -151,7 +151,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root ret = PTR_ERR(dir_item); if (ret == -EEXIST) goto second_insert; - goto out; + goto out_free; } leaf = path->nodes[0]; @@ -170,7 +170,7 @@ second_insert: /* FIXME, use some real flag for selecting the extra index */ if (root == root->fs_info->tree_root) { ret = 0; - goto out; + goto out_free; } btrfs_release_path(root, path); @@ -180,7 +180,7 @@ second_insert: name, name_len); if (IS_ERR(dir_item)) { ret2 = PTR_ERR(dir_item); - goto out; + goto out_free; } leaf = path->nodes[0]; btrfs_cpu_key_to_disk(&disk_key, location); @@ -192,7 +192,9 @@ second_insert: name_ptr = (unsigned long)(dir_item + 1); write_extent_buffer(leaf, name, name_ptr, name_len); btrfs_mark_buffer_dirty(leaf); -out: + +out_free: + btrfs_free_path(path); if (ret) return ret; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a2134195a85e..a6a9d4e8b491 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -170,6 +170,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; if (bio->bi_size > PAGE_CACHE_SIZE * 8) path->reada = 2; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f739b256967e..04e9fffee8cc 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1467,8 +1467,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, if (bio_flags & EXTENT_BIO_COMPRESSED) { return btrfs_submit_compressed_read(inode, bio, mirror_num, bio_flags); - } else if (!skip_sum) - btrfs_lookup_bio_sums(root, inode, bio, NULL); + } else if (!skip_sum) { + ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); + if (ret) + return ret; + } goto mapit; } else if (!skip_sum) { /* csum items have already been cloned */ @@ -1903,10 +1906,10 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, else rw = READ; - BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, + ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, failrec->last_mirror, failrec->bio_flags, 0); - return 0; + return ret; } /* @@ -5943,9 +5946,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, __btrfs_submit_bio_start_direct_io, __btrfs_submit_bio_done); goto err; - } else if (!skip_sum) - btrfs_lookup_bio_sums_dio(root, inode, bio, + } else if (!skip_sum) { + ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset, csums); + if (ret) + goto err; + } ret = btrfs_map_bio(root, rw, bio, 0, 1); err: -- cgit v1.2.2 From 92986796d84ef939e304099dece32572a755b280 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 4 Mar 2011 17:14:37 +0000 Subject: btrfs: don't mess with i_nlink of unlocked inode in rename() old_inode is not locked; it's not safe to play with its link count. Instead of bumping it and calling btrfs_unlink_inode(), add a variant of the latter that does not do btrfs_drop_nlink()/ btrfs_update_inode(), call it instead of btrfs_inc_nlink()/ btrfs_unlink_inode() and do btrfs_update_inode() ourselves. Signed-off-by: Al Viro Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 04e9fffee8cc..4822b3132784 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2664,10 +2664,10 @@ failed: * recovery code. It remove a link in a directory with a given name, and * also drops the back refs in the inode to the directory */ -int btrfs_unlink_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *dir, struct inode *inode, - const char *name, int name_len) +static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *dir, struct inode *inode, + const char *name, int name_len) { struct btrfs_path *path; int ret = 0; @@ -2739,12 +2739,25 @@ err: btrfs_i_size_write(dir, dir->i_size - name_len * 2); inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; btrfs_update_inode(trans, root, dir); - btrfs_drop_nlink(inode); - ret = btrfs_update_inode(trans, root, inode); out: return ret; } +int btrfs_unlink_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *dir, struct inode *inode, + const char *name, int name_len) +{ + int ret; + ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); + if (!ret) { + btrfs_drop_nlink(inode); + ret = btrfs_update_inode(trans, root, inode); + } + return ret; +} + + /* helper to check if there is any shared block in the path */ static int check_path_shared(struct btrfs_root *root, struct btrfs_path *path) @@ -6999,11 +7012,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, old_dentry->d_name.name, old_dentry->d_name.len); } else { - btrfs_inc_nlink(old_dentry->d_inode); - ret = btrfs_unlink_inode(trans, root, old_dir, - old_dentry->d_inode, - old_dentry->d_name.name, - old_dentry->d_name.len); + ret = __btrfs_unlink_inode(trans, root, old_dir, + old_dentry->d_inode, + old_dentry->d_name.name, + old_dentry->d_name.len); + if (!ret) + ret = btrfs_update_inode(trans, root, old_inode); } BUG_ON(ret); -- cgit v1.2.2 From c055e99eea6e4f614267632fac546e7896c0227b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 4 Mar 2011 17:15:18 +0000 Subject: btrfs: check link counter overflow in link(2) Signed-off-by: Al Viro Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4822b3132784..04babaf31a33 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4837,6 +4837,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (root->objectid != BTRFS_I(inode)->root->objectid) return -EXDEV; + if (inode->i_nlink == ~0U) + return -EMLINK; + btrfs_inc_nlink(inode); inode->i_ctime = CURRENT_TIME; -- cgit v1.2.2 From 1561deda687eef0e95065f1268d680ddc5976ee7 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Sun, 27 Mar 2011 16:07:36 +0800 Subject: btrfs: fix possible deadlock by clearing __GFP_FS flag Using the GFP_HIGHUSER_MOVABLE flag to allocate the metadata's page may cause deadlock. Task1 open() ... btrfs_search_slot() ... btrfs_cow_block() ... alloc_page() wait for reclaiming shrink_slab() ... shrink_icache_memory() ... btrfs_evict_inode() ... btrfs_search_slot() If the path is locked by task1, the deadlock happens. So the btree's page cache is different with the file's page cache, it can not allocate pages by GFP_HIGHUSER_MOVABLE flag, we must clear __GFP_FS flag in GFP_HIGHUSER_MOVABLE flag. Reported-by: Itaru Kitayama Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 ++ fs/btrfs/inode.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b3fc8475870f..5cf3aa7b125c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1724,6 +1724,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, goto fail_bdi; } + fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS; + INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 04babaf31a33..06274186b290 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2536,6 +2536,8 @@ static void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); alloc_group_block = btrfs_inode_block_group(leaf, inode_item); + if (location.objectid == BTRFS_FREE_SPACE_OBJECTID) + inode->i_mapping->flags &= ~__GFP_FS; /* * try to precache a NULL acl entry for files that don't have @@ -4084,7 +4086,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, BTRFS_I(inode)->root = root; memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); btrfs_read_locked_inode(inode); - inode_tree_add(inode); unlock_new_inode(inode); if (new) -- cgit v1.2.2 From d9d04879321af570ea7285c6dad92d9c3cd108a1 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sun, 27 Mar 2011 21:23:21 -0400 Subject: Btrfs: fix __btrfs_map_block on 32 bit machines Recent changes for discard support didn't compile, this fixes them not to try and % 64 bit numbers. Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c440c89a470a..8b9fb8c7683d 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3126,13 +3126,19 @@ again: if (map->type & BTRFS_BLOCK_GROUP_RAID0) { u64 stripes; - int last_stripe = (stripe_nr_end - 1) % - map->num_stripes; + u32 last_stripe = 0; int j; + div_u64_rem(stripe_nr_end - 1, + map->num_stripes, + &last_stripe); + for (j = 0; j < map->num_stripes; j++) { - if ((stripe_nr_end - 1 - j) % - map->num_stripes == stripe_index) + u32 test; + + div_u64_rem(stripe_nr_end - 1 - j, + map->num_stripes, &test); + if (test == stripe_index) break; } stripes = stripe_nr_end - 1 - j; @@ -3153,11 +3159,19 @@ again: int j; int factor = map->num_stripes / map->sub_stripes; - int last_stripe = (stripe_nr_end - 1) % factor; + u32 last_stripe = 0; + + div_u64_rem(stripe_nr_end - 1, + factor, &last_stripe); last_stripe *= map->sub_stripes; for (j = 0; j < factor; j++) { - if ((stripe_nr_end - 1 - j) % factor == + u32 test; + + div_u64_rem(stripe_nr_end - 1 - j, + factor, &test); + + if (test == stripe_index / map->sub_stripes) break; } -- cgit v1.2.2 From 25985edcedea6396277003854657b5f3cb31a628 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 30 Mar 2011 22:57:33 -0300 Subject: Fix common misspellings Fixes generated by 'codespell' and manually reviewed. Signed-off-by: Lucas De Marchi --- fs/btrfs/extent_map.c | 2 +- fs/btrfs/inode.c | 2 +- fs/btrfs/relocation.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 2b6c12e983b3..a24a3f2fa13e 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -243,7 +243,7 @@ out: * Insert @em into @tree or perform a simple forward/backward merge with * existing mappings. The extent_map struct passed in will be inserted * into the tree directly, with an additional reference taken, or a - * reference dropped if the merge attempt was successfull. + * reference dropped if the merge attempt was successful. */ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 93c28a1d6bdc..80920bce01ab 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2324,7 +2324,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) /* * if ret == 0 means we found what we were searching for, which - * is weird, but possible, so only screw with path if we didnt + * is weird, but possible, so only screw with path if we didn't * find the key and see if we have stuff that matches */ if (ret > 0) { diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 58250e09eb05..199a80134312 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2346,7 +2346,7 @@ struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans, root = next->root; BUG_ON(!root); - /* no other choice for non-refernce counted tree */ + /* no other choice for non-references counted tree */ if (!root->ref_cows) return root; -- cgit v1.2.2 From c9149235a42ab93914434fff45c44b45023363f3 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Wed, 30 Mar 2011 00:57:23 +0000 Subject: Btrfs: fix compiler warning in file.c While compiling Btrfs, I got following messages: CC [M] fs/btrfs/file.o fs/btrfs/file.c: In function '__btrfs_buffered_write': fs/btrfs/file.c:909: warning: 'ret' may be used uninitialized in this function CC [M] fs/btrfs/tree-defrag.o This patch fixes compiler warning. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 656bc0a892b1..e621ea54a3fd 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -906,7 +906,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, unsigned long last_index; size_t num_written = 0; int nrptrs; - int ret; + int ret = 0; nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / -- cgit v1.2.2 From 200da64e0b039f873f0f20481e6a7d056e7cc6c9 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 31 Mar 2011 00:44:29 +0000 Subject: Btrfs: fix /proc/mounts info. Some mount options are not displayed by /proc/mounts. This patch displays the option such as compress_type by /proc/mounts. Ex. [before] $ mount | grep sdc2 /dev/sdc2 on /test12 type btrfs (rw,space_cache,compress=lzo) $ cat /proc/mounts | grep sdc2 /dev/sdc2 /test12 btrfs rw,relatime,compress 0 0 [after] $ mount | grep sdc2 /dev/sdc2 on /test12 type btrfs (rw,space_cache,compress=lzo) $ cat /proc/mounts | grep sdc2 /dev/sdc2 /test12 btrfs rw,relatime,compress=lzo,space_cache 0 0 Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/super.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2edfc039f098..58e7de9cc90c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -644,6 +644,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) { struct btrfs_root *root = btrfs_sb(vfs->mnt_sb); struct btrfs_fs_info *info = root->fs_info; + char *compress_type; if (btrfs_test_opt(root, DEGRADED)) seq_puts(seq, ",degraded"); @@ -662,8 +663,16 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) if (info->thread_pool_size != min_t(unsigned long, num_online_cpus() + 2, 8)) seq_printf(seq, ",thread_pool=%d", info->thread_pool_size); - if (btrfs_test_opt(root, COMPRESS)) - seq_puts(seq, ",compress"); + if (btrfs_test_opt(root, COMPRESS)) { + if (info->compress_type == BTRFS_COMPRESS_ZLIB) + compress_type = "zlib"; + else + compress_type = "lzo"; + if (btrfs_test_opt(root, FORCE_COMPRESS)) + seq_printf(seq, ",compress-force=%s", compress_type); + else + seq_printf(seq, ",compress=%s", compress_type); + } if (btrfs_test_opt(root, NOSSD)) seq_puts(seq, ",nossd"); if (btrfs_test_opt(root, SSD_SPREAD)) @@ -678,6 +687,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_puts(seq, ",discard"); if (!(root->fs_info->sb->s_flags & MS_POSIXACL)) seq_puts(seq, ",noacl"); + if (btrfs_test_opt(root, SPACE_CACHE)) + seq_puts(seq, ",space_cache"); + if (btrfs_test_opt(root, CLEAR_CACHE)) + seq_puts(seq, ",clear_cache"); + if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) + seq_puts(seq, ",user_subvol_rm_allowed"); return 0; } -- cgit v1.2.2 From fe3f566cd19bb6d787c92b2e202c85f929abf3ac Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 28 Mar 2011 08:30:38 +0000 Subject: Btrfs: Fix oops for defrag with compression turned on When we defrag a file, whose size can be fit into an inline extent, with compression enabled, the compress type is set to be fs_info->compress_type, which is 0 if the btrfs filesystem is mounted without compress option. This leads to oops. Reported-by: Daniel Blueman Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 06274186b290..62ae9d5da806 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -111,6 +111,7 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, size_t size, size_t compressed_size, + int compress_type, struct page **compressed_pages) { struct btrfs_key key; @@ -125,12 +126,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, size_t cur_size = size; size_t datasize; unsigned long offset; - int compress_type = BTRFS_COMPRESS_NONE; - if (compressed_size && compressed_pages) { - compress_type = root->fs_info->compress_type; + if (compressed_size && compressed_pages) cur_size = compressed_size; - } path = btrfs_alloc_path(); if (!path) @@ -220,7 +218,7 @@ fail: static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, - size_t compressed_size, + size_t compressed_size, int compress_type, struct page **compressed_pages) { u64 isize = i_size_read(inode); @@ -253,7 +251,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, inline_len = min_t(u64, isize, actual_end); ret = insert_inline_extent(trans, root, inode, start, inline_len, compressed_size, - compressed_pages); + compress_type, compressed_pages); BUG_ON(ret); btrfs_delalloc_release_metadata(inode, end + 1 - start); btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); @@ -432,12 +430,13 @@ again: * to make an uncompressed inline extent. */ ret = cow_file_range_inline(trans, root, inode, - start, end, 0, NULL); + start, end, 0, 0, NULL); } else { /* try making a compressed inline extent */ ret = cow_file_range_inline(trans, root, inode, start, end, - total_compressed, pages); + total_compressed, + compress_type, pages); } if (ret == 0) { /* @@ -791,7 +790,7 @@ static noinline int cow_file_range(struct inode *inode, if (start == 0) { /* lets try to make an inline extent */ ret = cow_file_range_inline(trans, root, inode, - start, end, 0, NULL); + start, end, 0, 0, NULL); if (ret == 0) { extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, -- cgit v1.2.2 From b44c59a80ded004e1a82712e5f9e17b131c03221 Mon Sep 17 00:00:00 2001 From: Johann Lombardi Date: Thu, 31 Mar 2011 13:23:47 +0000 Subject: Btrfs: fix subvol_sem leak in btrfs_rename() btrfs_rename() does not release the subvol_sem if the transaction failed to start. Signed-off-by: Johann Lombardi Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 62ae9d5da806..1ca3e68586cf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6961,8 +6961,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * should cover the worst case number of items we'll modify. */ trans = btrfs_start_transaction(root, 20); - if (IS_ERR(trans)) - return PTR_ERR(trans); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_notrans; + } btrfs_set_trans_block_group(trans, new_dir); @@ -7062,7 +7064,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, } out_fail: btrfs_end_transaction_throttle(trans, root); - +out_notrans: if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&root->fs_info->subvol_sem); -- cgit v1.2.2 From 8b2b2d3cbefb605501342adaf64d601b545ed154 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 4 Apr 2011 01:52:13 +0000 Subject: Btrfs: fix memory leak in btrfs_ioctl_start_sync() Call btrfs_end_transaction() if btrfs_commit_transaction_async() fails. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 6b70e0e2bd1e..255c7c5279c4 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2436,8 +2436,10 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp return PTR_ERR(trans); transid = trans->transid; ret = btrfs_commit_transaction_async(trans, root, 0); - if (ret) + if (ret) { + btrfs_end_transaction(trans, root); return ret; + } if (argp) if (copy_to_user(argp, &transid, sizeof(transid))) -- cgit v1.2.2 From 6e8df2ae89ab37730c0062782f844c66ecfc97a7 Mon Sep 17 00:00:00 2001 From: Yoshinori Sano Date: Sun, 3 Apr 2011 12:31:28 +0000 Subject: Btrfs: fix memory leak in start_transaction() Free btrfs_trans_handle when join_transaction() fails in start_transaction() Signed-off-by: Yoshinori Sano Signed-off-by: Chris Mason --- fs/btrfs/transaction.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index ce48eb59d615..d01cc249a8d3 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -197,6 +197,7 @@ again: ret = join_transaction(root); if (ret < 0) { + kmem_cache_free(btrfs_trans_handle_cachep, h); if (type != TRANS_JOIN_NOLOCK) mutex_unlock(&root->fs_info->trans_mutex); return ERR_PTR(ret); -- cgit v1.2.2 From adae52b94e18afa1f84fab67df2a8a872c2f5533 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Thu, 31 Mar 2011 09:43:23 +0000 Subject: btrfs: clear __GFP_FS flag in the space cache inode the object id of the space cache inode's key is allocated from the relative root, just like the regular file. So we can't identify space cache inode by checking the object id of the inode's key, and we have to clear __GFP_FS flag at the time we look up the space cache inode. Signed-off-by: Miao Xie Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 2 ++ fs/btrfs/inode.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 0037427d8a9d..13575de85543 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -81,6 +81,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return ERR_PTR(-ENOENT); } + inode->i_mapping->flags &= ~__GFP_FS; + spin_lock(&block_group->lock); if (!root->fs_info->closing) { block_group->inode = igrab(inode); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1ca3e68586cf..57a03f6eb224 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2535,8 +2535,6 @@ static void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); alloc_group_block = btrfs_inode_block_group(leaf, inode_item); - if (location.objectid == BTRFS_FREE_SPACE_OBJECTID) - inode->i_mapping->flags &= ~__GFP_FS; /* * try to precache a NULL acl entry for files that don't have -- cgit v1.2.2 From 08fe4db170b4193603d9d31f40ebaf652d07ac9c Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 28 Mar 2011 02:01:25 +0000 Subject: Btrfs: Fix uninitialized root flags for subvolumes root_item->flags and root_item->byte_limit are not initialized when a subvolume is created. This bug is not revealed until we added readonly snapshot support - now you mount a btrfs filesystem and you may find the subvolumes in it are readonly. To work around this problem, we steal a bit from root_item->inode_item->flags, and use it to indicate if those fields have been properly initialized. When we read a tree root from disk, we check if the bit is set, and if not we'll set the flag and initialize the two fields of the root item. Reported-by: Andreas Philipp Signed-off-by: Li Zefan Tested-by: Andreas Philipp cc: stable@kernel.org Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 4 ++++ fs/btrfs/disk-io.c | 4 +++- fs/btrfs/ioctl.c | 4 ++++ fs/btrfs/root-tree.c | 18 ++++++++++++++++++ fs/btrfs/transaction.c | 1 + 5 files changed, 30 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d47ce8307854..3458b5725540 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1284,6 +1284,8 @@ struct btrfs_root { #define BTRFS_INODE_DIRSYNC (1 << 10) #define BTRFS_INODE_COMPRESS (1 << 11) +#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) + /* some macros to generate set/get funcs for the struct fields. This * assumes there is a lefoo_to_cpu for every type, so lets make a simple * one for u8: @@ -2359,6 +2361,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); int btrfs_find_orphan_roots(struct btrfs_root *tree_root); int btrfs_set_root_node(struct btrfs_root_item *item, struct extent_buffer *node); +void btrfs_check_and_init_root_item(struct btrfs_root_item *item); + /* dir-item.c */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5cf3aa7b125c..a272bfd74ea0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1276,8 +1276,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, root->commit_root = btrfs_root_node(root); BUG_ON(!root->node); out: - if (location->objectid != BTRFS_TREE_LOG_OBJECTID) + if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { root->ref_cows = 1; + btrfs_check_and_init_root_item(&root->root_item); + } return root; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 255c7c5279c4..f9c93a9ed4a7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -373,6 +373,10 @@ static noinline int create_subvol(struct btrfs_root *root, inode_item->nbytes = cpu_to_le64(root->leafsize); inode_item->mode = cpu_to_le32(S_IFDIR | 0755); + root_item.flags = 0; + root_item.byte_limit = 0; + inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT); + btrfs_set_root_bytenr(&root_item, leaf->start); btrfs_set_root_generation(&root_item, trans->transid); btrfs_set_root_level(&root_item, 0); diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 29b2d7c930eb..6928bff62daa 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -473,3 +473,21 @@ again: btrfs_free_path(path); return 0; } + +/* + * Old btrfs forgets to init root_item->flags and root_item->byte_limit + * for subvolumes. To work around this problem, we steal a bit from + * root_item->inode_item->flags, and use it to indicate if those fields + * have been properly initialized. + */ +void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item) +{ + u64 inode_flags = le64_to_cpu(root_item->inode.flags); + + if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) { + inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT; + root_item->inode.flags = cpu_to_le64(inode_flags); + root_item->flags = 0; + root_item->byte_limit = 0; + } +} diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index d01cc249a8d3..5b158da7e0bb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -976,6 +976,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); + btrfs_check_and_init_root_item(new_root_item); root_flags = btrfs_root_flags(new_root_item); if (pending->readonly) -- cgit v1.2.2 From 43be21462d8c263e2449b52b23326232fd710bee Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 1 Apr 2011 14:55:00 +0000 Subject: Btrfs: fix free space cache when there are pinned extents and clusters V2 I noticed a huge problem with the free space cache that was presenting as an early ENOSPC. Turns out when writing the free space cache out I forgot to take into account pinned extents and more importantly clusters. This would result in us leaking free space everytime we unmounted the filesystem and remounted it. I fix this by making sure to check and see if the current block group has a cluster and writing out any entries that are in the cluster to the cache, as well as writing any pinned extents we currently have to the cache since those will be available for us to use the next time the fs mounts. This patch also adds a check to the end of load_free_space_cache to make sure we got the right amount of free space cache, and if not make sure to clear the cache and re-cache the old fashioned way. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 82 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 78 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 13575de85543..f561c953205b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -24,6 +24,7 @@ #include "free-space-cache.h" #include "transaction.h" #include "disk-io.h" +#include "extent_io.h" #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) @@ -224,6 +225,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, u64 num_entries; u64 num_bitmaps; u64 generation; + u64 used = btrfs_block_group_used(&block_group->item); u32 cur_crc = ~(u32)0; pgoff_t index = 0; unsigned long first_page_offset; @@ -469,6 +471,17 @@ next: index++; } + spin_lock(&block_group->tree_lock); + if (block_group->free_space != (block_group->key.offset - used - + block_group->bytes_super)) { + spin_unlock(&block_group->tree_lock); + printk(KERN_ERR "block group %llu has an wrong amount of free " + "space\n", block_group->key.objectid); + ret = 0; + goto free_cache; + } + spin_unlock(&block_group->tree_lock); + ret = 1; out: kfree(checksums); @@ -497,8 +510,11 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct list_head *pos, *n; struct page *page; struct extent_state *cached_state = NULL; + struct btrfs_free_cluster *cluster = NULL; + struct extent_io_tree *unpin = NULL; struct list_head bitmap_list; struct btrfs_key key; + u64 start, end, len; u64 bytes = 0; u32 *crc, *checksums; pgoff_t index = 0, last_index = 0; @@ -507,6 +523,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, int entries = 0; int bitmaps = 0; int ret = 0; + bool next_page = false; root = root->fs_info->tree_root; @@ -553,6 +570,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, */ first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); + /* Get the cluster for this block_group if it exists */ + if (!list_empty(&block_group->cluster_list)) + cluster = list_entry(block_group->cluster_list.next, + struct btrfs_free_cluster, + block_group_list); + + /* + * We shouldn't have switched the pinned extents yet so this is the + * right one + */ + unpin = root->fs_info->pinned_extents; + /* * Lock all pages first so we can lock the extent safely. * @@ -582,6 +611,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 0, &cached_state, GFP_NOFS); + /* + * When searching for pinned extents, we need to start at our start + * offset. + */ + start = block_group->key.objectid; + /* Write out the extent entries */ do { struct btrfs_free_space_entry *entry; @@ -589,6 +624,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, unsigned long offset = 0; unsigned long start_offset = 0; + next_page = false; + if (index == 0) { start_offset = first_page_offset; offset = start_offset; @@ -600,7 +637,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, entry = addr + start_offset; memset(addr, 0, PAGE_CACHE_SIZE); - while (1) { + while (node && !next_page) { struct btrfs_free_space *e; e = rb_entry(node, struct btrfs_free_space, offset_index); @@ -616,12 +653,49 @@ int btrfs_write_out_cache(struct btrfs_root *root, entry->type = BTRFS_FREE_SPACE_EXTENT; } node = rb_next(node); - if (!node) - break; + if (!node && cluster) { + node = rb_first(&cluster->root); + cluster = NULL; + } offset += sizeof(struct btrfs_free_space_entry); if (offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) + next_page = true; + entry++; + } + + /* + * We want to add any pinned extents to our free space cache + * so we don't leak the space + */ + while (!next_page && (start < block_group->key.objectid + + block_group->key.offset)) { + ret = find_first_extent_bit(unpin, start, &start, &end, + EXTENT_DIRTY); + if (ret) { + ret = 0; + break; + } + + /* This pinned extent is out of our range */ + if (start >= block_group->key.objectid + + block_group->key.offset) break; + + len = block_group->key.objectid + + block_group->key.offset - start; + len = min(len, end + 1 - start); + + entries++; + entry->offset = cpu_to_le64(start); + entry->bytes = cpu_to_le64(len); + entry->type = BTRFS_FREE_SPACE_EXTENT; + + start = end + 1; + offset += sizeof(struct btrfs_free_space_entry); + if (offset + sizeof(struct btrfs_free_space_entry) >= + PAGE_CACHE_SIZE) + next_page = true; entry++; } *crc = ~(u32)0; @@ -652,7 +726,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, page_cache_release(page); index++; - } while (node); + } while (node || next_page); /* Write out the bitmaps */ list_for_each_safe(pos, n, &bitmap_list) { -- cgit v1.2.2 From c9ddec74aa950a220cc4caa5215cfc5d886050b7 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 28 Mar 2011 13:43:25 +0000 Subject: Btrfs: don't warn in btrfs_add_orphan When I moved the orphan adding to btrfs_truncate I missed the fact that during orphan cleanup we just add the orphan items to the orphan list without going through btrfs_orphan_add, which results in lots of warnings on mount if you have any orphan items that need to be truncated. Just remove this warning since it's ok, this will allow all of the normal space accounting take place. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 57a03f6eb224..cc6022842e0c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2220,8 +2220,6 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) insert = 1; #endif insert = 1; - } else { - WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved); } if (!BTRFS_I(inode)->orphan_meta_reserved) { -- cgit v1.2.2 From be1a12a0dfed06cf1e62e35bf91620dc610a451a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 13:05:22 -0400 Subject: Btrfs: deal with the case that we run out of space in the cache Currently we don't handle running out of space in the cache, so to fix this we keep track of how far in the cache we are. Then we only dirty the pages if we successfully modify all of them, otherwise if we have an error or run out of space we can just drop them and not worry about the vm writing them out. Thanks, Tested-by Johannes Hirte Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 5 ++ fs/btrfs/file.c | 21 ++++---- fs/btrfs/free-space-cache.c | 117 +++++++++++++++++++++----------------------- 3 files changed, 69 insertions(+), 74 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3458b5725540..0d00a07b5b29 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2576,6 +2576,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, struct inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); +void btrfs_drop_pages(struct page **pages, size_t num_pages); +int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, + struct page **pages, size_t num_pages, + loff_t pos, size_t write_bytes, + struct extent_state **cached); /* tree-defrag.c */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e621ea54a3fd..75899a01dded 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, /* * unlocks pages after btrfs_file_write is done with them */ -static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) +void btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { @@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) * this also makes the decision about creating an inline extent vs * doing real data extents, marking pages dirty and delalloc as required. */ -static noinline int dirty_and_release_pages(struct btrfs_root *root, - struct file *file, - struct page **pages, - size_t num_pages, - loff_t pos, - size_t write_bytes) +int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, + struct page **pages, size_t num_pages, + loff_t pos, size_t write_bytes, + struct extent_state **cached) { int err = 0; int i; - struct inode *inode = fdentry(file)->d_inode; u64 num_bytes; u64 start_pos; u64 end_of_last_block; @@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root, end_of_last_block = start_pos + num_bytes - 1; err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, - NULL); + cached); if (err) return err; @@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, } if (copied > 0) { - ret = dirty_and_release_pages(root, file, pages, - dirty_pages, pos, - copied); + ret = btrfs_dirty_pages(root, inode, pages, + dirty_pages, pos, copied, + NULL); if (ret) { btrfs_delalloc_release_space(inode, dirty_pages << PAGE_CACHE_SHIFT); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f561c953205b..a3f420def0e9 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode; struct rb_node *node; struct list_head *pos, *n; + struct page **pages; struct page *page; struct extent_state *cached_state = NULL; struct btrfs_free_cluster *cluster = NULL; @@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, u64 start, end, len; u64 bytes = 0; u32 *crc, *checksums; - pgoff_t index = 0, last_index = 0; unsigned long first_page_offset; - int num_checksums; + int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; int ret = 0; bool next_page = false; + bool out_of_space = false; root = root->fs_info->tree_root; @@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root, return 0; } - last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; filemap_write_and_wait(inode->i_mapping); btrfs_wait_ordered_range(inode, inode->i_size & ~(root->sectorsize - 1), (u64)-1); /* We need a checksum per page. */ - num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; - crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS); + crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); if (!crc) { iput(inode); return 0; } + pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); + if (!pages) { + kfree(crc); + iput(inode); + return 0; + } + /* Since the first page has all of our checksums and our generation we * need to calculate the offset into the page that we can start writing * our entries. */ - first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); + first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); /* Get the cluster for this block_group if it exists */ if (!list_empty(&block_group->cluster_list)) @@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, * after find_get_page at this point. Just putting this here so people * know and don't freak out. */ - while (index <= last_index) { + while (index < num_pages) { page = grab_cache_page(inode->i_mapping, index); if (!page) { - pgoff_t i = 0; + int i; - while (i < index) { - page = find_get_page(inode->i_mapping, i); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); - i++; + for (i = 0; i < num_pages; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); } goto out_free; } + pages[index] = page; index++; } @@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, offset = start_offset; } - page = find_get_page(inode->i_mapping, index); + if (index >= num_pages) { + out_of_space = true; + break; + } + + page = pages[index]; addr = kmap(page); entry = addr + start_offset; @@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root, bytes += PAGE_CACHE_SIZE; - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - - /* - * We need to release our reference we got for grab_cache_page, - * except for the first page which will hold our checksums, we - * do that below. - */ - if (index != 0) { - unlock_page(page); - page_cache_release(page); - } - - page_cache_release(page); - index++; } while (node || next_page); @@ -734,6 +728,10 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_free_space *entry = list_entry(pos, struct btrfs_free_space, list); + if (index >= num_pages) { + out_of_space = true; + break; + } page = find_get_page(inode->i_mapping, index); addr = kmap(page); @@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root, crc++; bytes += PAGE_CACHE_SIZE; - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); list_del_init(&entry->list); index++; } + if (out_of_space) { + btrfs_drop_pages(pages, num_pages); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, &cached_state, + GFP_NOFS); + ret = 0; + goto out_free; + } + /* Zero out the rest of the pages just to make sure */ - while (index <= last_index) { + while (index < num_pages) { void *addr; - page = find_get_page(inode->i_mapping, index); - + page = pages[index]; addr = kmap(page); memset(addr, 0, PAGE_CACHE_SIZE); kunmap(page); - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); bytes += PAGE_CACHE_SIZE; index++; } - btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state); - /* Write the checksums and trans id to the first page */ { void *addr; u64 *gen; - page = find_get_page(inode->i_mapping, 0); + page = pages[0]; addr = kmap(page); - memcpy(addr, checksums, sizeof(u32) * num_checksums); - gen = addr + (sizeof(u32) * num_checksums); + memcpy(addr, checksums, sizeof(u32) * num_pages); + gen = addr + (sizeof(u32) * num_pages); *gen = trans->transid; kunmap(page); - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); } - BTRFS_I(inode)->generation = trans->transid; + ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, + bytes, &cached_state); + btrfs_drop_pages(pages, num_pages); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state, GFP_NOFS); + if (ret) { + ret = 0; + goto out_free; + } + + BTRFS_I(inode)->generation = trans->transid; + filemap_write_and_wait(inode->i_mapping); key.objectid = BTRFS_FREE_SPACE_OBJECTID; @@ -853,6 +845,7 @@ out_free: BTRFS_I(inode)->generation = 0; } kfree(checksums); + kfree(pages); btrfs_update_inode(trans, root, inode); iput(inode); return ret; -- cgit v1.2.2 From 06d5a5899d6d3ac401d2359b5eac6d2a3a0fe331 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 5 Apr 2011 11:57:27 -0400 Subject: Btrfs: only retry transaction reservation once I saw a lockup where we kept getting into this start transaction->commit transaction loop because of enospce. The fact is if we fail to make our reservation, we've tried _everything_ several times, so we only need to try and commit the transaction once, and if that doesn't work then we really are out of space and need to just exit. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/transaction.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 5b158da7e0bb..4583008217e6 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -181,6 +181,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, { struct btrfs_trans_handle *h; struct btrfs_transaction *cur_trans; + int retries = 0; int ret; if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) @@ -224,10 +225,18 @@ again: if (num_items > 0) { ret = btrfs_trans_reserve_metadata(h, root, num_items); - if (ret == -EAGAIN) { + if (ret == -EAGAIN && !retries) { + retries++; btrfs_commit_transaction(h, root); goto again; + } else if (ret == -EAGAIN) { + /* + * We have already retried and got EAGAIN, so really we + * don't have space, so set ret to -ENOSPC. + */ + ret = -ENOSPC; } + if (ret < 0) { btrfs_end_transaction(h, root); return ERR_PTR(ret); -- cgit v1.2.2 From 12ddb96cb6752218d8a1aeb696ec9b0ca7adb42f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 5 Apr 2011 13:02:27 -0400 Subject: Btrfs: map the inode item when doing fill_inode_item Instead of calling kmap_atomic for every thing we set in the inode item, map the entire inode item at the start and unmap it at the end. This makes a sequential dd of 400mb O_DIRECT something like 1% faster. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cc6022842e0c..da2680263d9f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2589,6 +2589,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, struct btrfs_inode_item *item, struct inode *inode) { + if (!leaf->map_token) + map_private_extent_buffer(leaf, (unsigned long)item, + sizeof(struct btrfs_inode_item), + &leaf->map_token, &leaf->kaddr, + &leaf->map_start, &leaf->map_len, + KM_USER1); + btrfs_set_inode_uid(leaf, item, inode->i_uid); btrfs_set_inode_gid(leaf, item, inode->i_gid); btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); @@ -2617,6 +2624,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_inode_rdev(leaf, item, inode->i_rdev); btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); + + if (leaf->map_token) { + unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); + leaf->map_token = NULL; + } } /* -- cgit v1.2.2 From 1ef30be142d2cc60e2687ef267de864cf31be995 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 5 Apr 2011 19:25:36 -0400 Subject: Btrfs: do not call btrfs_update_inode in endio if nothing changed In the DIO code we often don't update the i_disk_size because the i_size isn't updated until after the DIO is completed, so basically we are allocating a path, doing a search, and updating the inode item for no reason since nothing changed. btrfs_ordered_update_i_size will return 1 if it didn't update i_disk_size, so only run btrfs_update_inode if btrfs_ordered_update_i_size returns 0. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index da2680263d9f..4a238d676e5b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1769,9 +1769,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); - btrfs_ordered_update_i_size(inode, 0, ordered_extent); - ret = btrfs_update_inode(trans, root, inode); - BUG_ON(ret); + ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); + if (!ret) { + ret = btrfs_update_inode(trans, root, inode); + BUG_ON(ret); + } + ret = 0; out: if (nolock) { if (trans) @@ -5865,8 +5868,10 @@ again: } add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); - btrfs_ordered_update_i_size(inode, 0, ordered); - btrfs_update_inode(trans, root, inode); + ret = btrfs_ordered_update_i_size(inode, 0, ordered); + if (!ret) + btrfs_update_inode(trans, root, inode); + ret = 0; out_unlock: unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, ordered->file_offset + ordered->len - 1, -- cgit v1.2.2 From 02f57c7aedef1a537f4b16db7061cdd8efa3bb4e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 14:25:44 -0400 Subject: Btrfs: don't split dio bios if we don't have to We have been unconditionally allocating a new bio and re-adding all pages from our original bio to the new bio. This is needed if our original bio is larger than our stripe size, but if it is smaller than the stripe size then there is no need to do this. So check the map length and if we are under that then go ahead and submit the original bio. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4a238d676e5b..149c77fd1eb5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6006,13 +6006,6 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, int ret = 0; int write = rw & REQ_WRITE; - bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); - if (!bio) - return -ENOMEM; - bio->bi_private = dip; - bio->bi_end_io = btrfs_end_dio_bio; - atomic_inc(&dip->pending_bios); - map_length = orig_bio->bi_size; ret = btrfs_map_block(map_tree, READ, start_sector << 9, &map_length, NULL, 0); @@ -6021,6 +6014,18 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, return -EIO; } + if (map_length >= orig_bio->bi_size) { + bio = orig_bio; + goto submit; + } + + bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); + if (!bio) + return -ENOMEM; + bio->bi_private = dip; + bio->bi_end_io = btrfs_end_dio_bio; + atomic_inc(&dip->pending_bios); + while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { if (unlikely(map_length < submit_len + bvec->bv_len || bio_add_page(bio, bvec->bv_page, bvec->bv_len, @@ -6071,6 +6076,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, } } +submit: ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, csums); if (!ret) -- cgit v1.2.2 From 1ae399382512b3e4d6c923e53da9e45935577040 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 14:41:34 -0400 Subject: Btrfs: do not use async submit for small DIO io's When looking at our DIO performance Chris said that for small IO's doing the async submit stuff tends to be more overhead than it's worth. With this on top of my other fixes I get about a 17-20% speedup doing a sequential dd with 4k IO's. Basically if we don't have to split the bio for the map length it's small enough to be directly submitted, otherwise go back to the async submit. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 149c77fd1eb5..2bb76c6157a4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5957,7 +5957,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, int rw, u64 file_offset, int skip_sum, - u32 *csums) + u32 *csums, int async_submit) { int write = rw & REQ_WRITE; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -5968,13 +5968,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, if (ret) goto err; - if (write && !skip_sum) { + if (skip_sum) + goto map; + + if (write && async_submit) { ret = btrfs_wq_submit_bio(root->fs_info, inode, rw, bio, 0, 0, file_offset, __btrfs_submit_bio_start_direct_io, __btrfs_submit_bio_done); goto err; + } else if (write) { + /* + * If we aren't doing async submit, calculate the csum of the + * bio now. + */ + ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); + if (ret) + goto err; } else if (!skip_sum) { ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset, csums); @@ -5982,7 +5993,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, goto err; } - ret = btrfs_map_bio(root, rw, bio, 0, 1); +map: + ret = btrfs_map_bio(root, rw, bio, 0, async_submit); err: bio_put(bio); return ret; @@ -6004,6 +6016,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, int nr_pages = 0; u32 *csums = dip->csums; int ret = 0; + int async_submit = 0; int write = rw & REQ_WRITE; map_length = orig_bio->bi_size; @@ -6019,6 +6032,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, goto submit; } + async_submit = 1; bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); if (!bio) return -ENOMEM; @@ -6039,7 +6053,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, atomic_inc(&dip->pending_bios); ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums); + csums, async_submit); if (ret) { bio_put(bio); atomic_dec(&dip->pending_bios); @@ -6078,7 +6092,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, submit: ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums); + csums, async_submit); if (!ret) return 0; -- cgit v1.2.2 From 16d299ac7446b5a75c5683a9ae11d7907d444c86 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 14:53:07 -0400 Subject: Btrfs: reuse the extent_map we found when calling btrfs_get_extent In btrfs_get_block_direct we call btrfs_get_extent to lookup the extent for the range that we are looking for. If we don't find an extent, btrfs_get_extent will insert a extent_map for that area and mark it as a hole. So it does the job of allocating a new extent map and inserting it into the io tree. But if we're creating a new extent we free it up and redo all of that work. So instead pass the em to btrfs_new_extent_direct(), and if it will work just allocate the disk space and set it up properly and bypass the freeing/allocating of a new extent map and the expensive operation of inserting the thing into the io_tree. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2bb76c6157a4..24310c9cb14f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5445,17 +5445,30 @@ out: } static struct extent_map *btrfs_new_extent_direct(struct inode *inode, + struct extent_map *em, u64 start, u64 len) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; - struct extent_map *em; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct btrfs_key ins; u64 alloc_hint; int ret; + bool insert = false; - btrfs_drop_extent_cache(inode, start, start + len - 1, 0); + /* + * Ok if the extent map we looked up is a hole and is for the exact + * range we want, there is no reason to allocate a new one, however if + * it is not right then we need to free this one and drop the cache for + * our range. + */ + if (em->block_start != EXTENT_MAP_HOLE || em->start != start || + em->len != len) { + free_extent_map(em); + em = NULL; + insert = true; + btrfs_drop_extent_cache(inode, start, start + len - 1, 0); + } trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) @@ -5471,10 +5484,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, goto out; } - em = alloc_extent_map(GFP_NOFS); if (!em) { - em = ERR_PTR(-ENOMEM); - goto out; + em = alloc_extent_map(GFP_NOFS); + if (!em) { + em = ERR_PTR(-ENOMEM); + goto out; + } } em->start = start; @@ -5484,9 +5499,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; + + /* + * We need to do this because if we're using the original em we searched + * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that. + */ + em->flags = 0; set_bit(EXTENT_FLAG_PINNED, &em->flags); - while (1) { + while (insert) { write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); @@ -5704,8 +5725,7 @@ must_cow: * it above */ len = bh_result->b_size; - free_extent_map(em); - em = btrfs_new_extent_direct(inode, start, len); + em = btrfs_new_extent_direct(inode, em, start, len); if (IS_ERR(em)) return PTR_ERR(em); len = min(len, em->len - (start - em->start)); -- cgit v1.2.2 From 93a54bc4c28a125978cddbe2db9e347391e3522d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 15:11:44 -0400 Subject: Btrfs: check for duplicate iov_base's when doing dio reads Apparently it is ok to submit a read to an IDE device with the same target page for different offsets. This is what Windows does under qemu. The problem is under DIO we expect them to be different buffers for checksumming reasons, and so this sort of thing will result in checksum errors, when in reality the file is fine. So when reading, check to make sure that all iov bases are different, and if they aren't fall back to buffered mode, since that will work out right. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 24310c9cb14f..00d59c6a9769 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6207,6 +6207,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io unsigned long nr_segs) { int seg; + int i; size_t size; unsigned long addr; unsigned blocksize_mask = root->sectorsize - 1; @@ -6221,8 +6222,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io addr = (unsigned long)iov[seg].iov_base; size = iov[seg].iov_len; end += size; - if ((addr & blocksize_mask) || (size & blocksize_mask)) + if ((addr & blocksize_mask) || (size & blocksize_mask)) goto out; + + /* If this is a write we don't need to check anymore */ + if (rw & WRITE) + continue; + + /* + * Check to make sure we don't have duplicate iov_base's in this + * iovec, if so return EINVAL, otherwise we'll get csum errors + * when reading back. + */ + for (i = seg + 1; i < nr_segs; i++) { + if (iov[seg].iov_base == iov[i].iov_base) + goto out; + } } retval = 0; out: -- cgit v1.2.2 From 8fb27640d0e2b43c5584bf0087431b7b8d3c319a Mon Sep 17 00:00:00 2001 From: Yoshinori Sano Date: Sat, 9 Apr 2011 02:30:07 +0000 Subject: Btrfs: fix memory leaks in btrfs_new_inode() This patch fixes memory leaks in btrfs_new_inode(). Signed-off-by: Yoshinori Sano Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cc6022842e0c..2d1208f964eb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4526,14 +4526,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, BUG_ON(!path); inode = new_inode(root->fs_info->sb); - if (!inode) + if (!inode) { + btrfs_free_path(path); return ERR_PTR(-ENOMEM); + } if (dir) { trace_btrfs_inode_request(dir); ret = btrfs_set_inode_index(dir, index); if (ret) { + btrfs_free_path(path); iput(inode); return ERR_PTR(ret); } -- cgit v1.2.2 From 3387206f26e1b48703e810175b98611a4fd8e8ea Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Mon, 11 Apr 2011 21:52:52 +0000 Subject: btrfs: properly handle overlapping areas in memmove_extent_buffer Fix data corruption caused by memcpy() usage on overlapping data. I've observed it first when found out usermode linux crash on btrfs. ?all chain is the following: ------------[ cut here ]------------ WARNING: at /home/slyfox/linux-2.6/fs/btrfs/extent_io.c:3900 memcpy_extent_buffer+0x1a5/0x219() Call Trace: 6fa39a58: [<601b495e>] _raw_spin_unlock_irqrestore+0x18/0x1c 6fa39a68: [<60029ad9>] warn_slowpath_common+0x59/0x70 6fa39aa8: [<60029b05>] warn_slowpath_null+0x15/0x17 6fa39ab8: [<600efc97>] memcpy_extent_buffer+0x1a5/0x219 6fa39b48: [<600efd9f>] memmove_extent_buffer+0x94/0x208 6fa39bc8: [<600becbf>] btrfs_del_items+0x214/0x473 6fa39c78: [<600ce1b0>] btrfs_delete_one_dir_name+0x7c/0xda 6fa39cc8: [<600dad6b>] __btrfs_unlink_inode+0xad/0x25d 6fa39d08: [<600d7864>] btrfs_start_transaction+0xe/0x10 6fa39d48: [<600dc9ff>] btrfs_unlink_inode+0x1b/0x3b 6fa39d78: [<600e04bc>] btrfs_unlink+0x70/0xef 6fa39dc8: [<6007f0d0>] vfs_unlink+0x58/0xa3 6fa39df8: [<60080278>] do_unlinkat+0xd4/0x162 6fa39e48: [<600517db>] call_rcu_sched+0xe/0x10 6fa39e58: [<600452a8>] __put_cred+0x58/0x5a 6fa39e78: [<6007446c>] sys_faccessat+0x154/0x166 6fa39ed8: [<60080317>] sys_unlink+0x11/0x13 6fa39ee8: [<60016b80>] handle_syscall+0x58/0x70 6fa39f08: [<60021377>] userspace+0x2d4/0x381 6fa39fc8: [<60014507>] fork_handler+0x62/0x69 ---[ end trace 70b0ca2ef0266b93 ]--- http://www.mail-archive.com/linux-btrfs@vger.kernel.org/msg09302.html Signed-off-by: Sergei Trofimovich Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 77c65a0bea34..864e0496cc1c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3885,6 +3885,12 @@ static void move_pages(struct page *dst_page, struct page *src_page, kunmap_atomic(dst_kaddr, KM_USER0); } +static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) +{ + unsigned long distance = (src > dst) ? src - dst : dst - src; + return distance < len; +} + static void copy_pages(struct page *dst_page, struct page *src_page, unsigned long dst_off, unsigned long src_off, unsigned long len) @@ -3892,10 +3898,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page, char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); char *src_kaddr; - if (dst_page != src_page) + if (dst_page != src_page) { src_kaddr = kmap_atomic(src_page, KM_USER1); - else + } else { src_kaddr = dst_kaddr; + BUG_ON(areas_overlap(src_off, dst_off, len)); + } memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); kunmap_atomic(dst_kaddr, KM_USER0); @@ -3970,7 +3978,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, "len %lu len %lu\n", dst_offset, len, dst->len); BUG_ON(1); } - if (dst_offset < src_offset) { + if (!areas_overlap(src_offset, dst_offset, len)) { memcpy_extent_buffer(dst, dst_offset, src_offset, len); return; } -- cgit v1.2.2 From a1b75f7d961955e697ec377f90115e3517df98f9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 8 Apr 2011 15:51:18 +0000 Subject: Btrfs: check for duplicate iov_base's when doing dio reads Apparently it is ok to submit a read to an IDE device with the same target page for different offsets. This is what Windows does under qemu. The problem is under DIO we expect them to be different buffers for checksumming reasons, and so this sort of thing will result in checksum errors, when in reality the file is fine. So when reading, check to make sure that all iov bases are different, and if they aren't fall back to buffered mode, since that will work out right. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2d1208f964eb..edafc28883af 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6153,6 +6153,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io unsigned long nr_segs) { int seg; + int i; size_t size; unsigned long addr; unsigned blocksize_mask = root->sectorsize - 1; @@ -6167,8 +6168,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io addr = (unsigned long)iov[seg].iov_base; size = iov[seg].iov_len; end += size; - if ((addr & blocksize_mask) || (size & blocksize_mask)) + if ((addr & blocksize_mask) || (size & blocksize_mask)) goto out; + + /* If this is a write we don't need to check anymore */ + if (rw & WRITE) + continue; + + /* + * Check to make sure we don't have duplicate iov_base's in this + * iovec, if so return EINVAL, otherwise we'll get csum errors + * when reading back. + */ + for (i = seg + 1; i < nr_segs; i++) { + if (iov[seg].iov_base == iov[i].iov_base) + goto out; + } } retval = 0; out: -- cgit v1.2.2 From 13f2696f1da9700d401db0ac2bc27ebc17068b22 Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Mon, 11 Apr 2011 15:56:31 +0000 Subject: fix user annotation in ioctl.c Fix address space annotation correct in ioctl.c. Signed-off-by: Daniel J Blueman BTRFS_BLOCK_GROUP_SYSTEM, @@ -2387,7 +2387,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) up_read(&info->groups_sem); } - user_dest = (struct btrfs_ioctl_space_info *) + user_dest = (struct btrfs_ioctl_space_info __user *) (arg + sizeof(struct btrfs_ioctl_space_args)); if (copy_to_user(user_dest, dest_orig, alloc_size)) Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f9c93a9ed4a7..f580a3a5d2fc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) struct btrfs_ioctl_space_info space; struct btrfs_ioctl_space_info *dest; struct btrfs_ioctl_space_info *dest_orig; - struct btrfs_ioctl_space_info *user_dest; + struct btrfs_ioctl_space_info __user *user_dest; struct btrfs_space_info *info; u64 types[] = {BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_SYSTEM, -- cgit v1.2.2 From e15d0542426f063dc53b4c51bdfc11e0bbe4d298 Mon Sep 17 00:00:00 2001 From: Xin Zhong Date: Wed, 6 Apr 2011 07:33:51 +0000 Subject: Btrfs: fix subvolume mount by name problem when default mount subvolume is set We create two subvolumes (meego_root and meego_home) in btrfs root directory. And set meego_root as default mount subvolume. After we remount btrfs, meego_root is mounted to top directory by default. Then when we try to mount meego_home (subvol=meego_home) to a subdirectory, it failed. The problem is when default mount subvolume is set to meego_root, we search meego_home in meego_root but can not find it. So the solution is to add a new mount option (subvolrootid) to specify subvol id of root and search subvol name in it. For our case, now we can use "-o subvolrootid=0,subvol=meego_home) to mount meego_home. Detail information can be found in meego bugzilla: https://bugs.meego.com/show_bug.cgi?id=15055 Signed-off-by: Zhong, Xin Signed-off-by: Chris Mason --- fs/btrfs/super.c | 42 +++++++++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 58e7de9cc90c..0ac712efcdf2 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -159,7 +159,7 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_err, }; static match_table_t tokens = { @@ -189,6 +189,7 @@ static match_table_t tokens = { {Opt_clear_cache, "clear_cache"}, {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, {Opt_enospc_debug, "enospc_debug"}, + {Opt_subvolrootid, "subvolrootid=%d"}, {Opt_err, NULL}, }; @@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) break; case Opt_subvol: case Opt_subvolid: + case Opt_subvolrootid: case Opt_device: /* * These are parsed by btrfs_parse_early_options @@ -388,7 +390,7 @@ out: */ static int btrfs_parse_early_options(const char *options, fmode_t flags, void *holder, char **subvol_name, u64 *subvol_objectid, - struct btrfs_fs_devices **fs_devices) + u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; char *opts, *orig, *p; @@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, *subvol_objectid = intarg; } break; + case Opt_subvolrootid: + intarg = 0; + error = match_int(&args[0], &intarg); + if (!error) { + /* we want the original fs_tree */ + if (!intarg) + *subvol_rootid = + BTRFS_FS_TREE_OBJECTID; + else + *subvol_rootid = intarg; + } + break; case Opt_device: error = btrfs_scan_one_device(match_strdup(&args[0]), flags, holder, fs_devices); @@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, fmode_t mode = FMODE_READ; char *subvol_name = NULL; u64 subvol_objectid = 0; + u64 subvol_rootid = 0; int error = 0; if (!(flags & MS_RDONLY)) @@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, error = btrfs_parse_early_options(data, mode, fs_type, &subvol_name, &subvol_objectid, - &fs_devices); + &subvol_rootid, &fs_devices); if (error) return ERR_PTR(error); @@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, s->s_flags |= MS_ACTIVE; } - root = get_default_root(s, subvol_objectid); - if (IS_ERR(root)) { - error = PTR_ERR(root); - deactivate_locked_super(s); - goto error_free_subvol_name; - } /* if they gave us a subvolume name bind mount into that */ if (strcmp(subvol_name, ".")) { struct dentry *new_root; + + root = get_default_root(s, subvol_rootid); + if (IS_ERR(root)) { + error = PTR_ERR(root); + deactivate_locked_super(s); + goto error_free_subvol_name; + } + mutex_lock(&root->d_inode->i_mutex); new_root = lookup_one_len(subvol_name, root, strlen(subvol_name)); @@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } dput(root); root = new_root; + } else { + root = get_default_root(s, subvol_objectid); + if (IS_ERR(root)) { + error = PTR_ERR(root); + deactivate_locked_super(s); + goto error_free_subvol_name; + } } kfree(subvol_name); -- cgit v1.2.2 From 13c5a93e7005d7dae0b6d070d25203593e692d13 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 11 Apr 2011 15:45:29 -0400 Subject: Btrfs: avoid taking the trans_mutex in btrfs_end_transaction I've been working on making our O_DIRECT latency not suck and I noticed we were taking the trans_mutex in btrfs_end_transaction. So to do this we convert num_writers and use_count to atomic_t's and just decrement them in btrfs_end_transaction. Instead of deleting the transaction from the trans list in put_transaction we do that in btrfs_commit_transaction() since that's the only time it actually needs to be removed from the list. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/transaction.c | 37 ++++++++++++++++--------------------- fs/btrfs/transaction.h | 4 ++-- 3 files changed, 19 insertions(+), 24 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a272bfd74ea0..ef6865c17cd6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3136,7 +3136,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); - t->use_count = 0; + atomic_set(&t->use_count, 0); list_del_init(&t->list); memset(t, 0, sizeof(*t)); kmem_cache_free(btrfs_transaction_cachep, t); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 4583008217e6..c571734d5e5a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -32,10 +32,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) { - WARN_ON(transaction->use_count == 0); - transaction->use_count--; - if (transaction->use_count == 0) { - list_del_init(&transaction->list); + WARN_ON(atomic_read(&transaction->use_count) == 0); + if (atomic_dec_and_test(&transaction->use_count)) { memset(transaction, 0, sizeof(*transaction)); kmem_cache_free(btrfs_transaction_cachep, transaction); } @@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root) if (!cur_trans) return -ENOMEM; root->fs_info->generation++; - cur_trans->num_writers = 1; + atomic_set(&cur_trans->num_writers, 1); cur_trans->num_joined = 0; cur_trans->transid = root->fs_info->generation; init_waitqueue_head(&cur_trans->writer_wait); init_waitqueue_head(&cur_trans->commit_wait); cur_trans->in_commit = 0; cur_trans->blocked = 0; - cur_trans->use_count = 1; + atomic_set(&cur_trans->use_count, 1); cur_trans->commit_done = 0; cur_trans->start_time = get_seconds(); @@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root) root->fs_info->running_transaction = cur_trans; spin_unlock(&root->fs_info->new_trans_lock); } else { - cur_trans->num_writers++; + atomic_inc(&cur_trans->num_writers); cur_trans->num_joined++; } @@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root) cur_trans = root->fs_info->running_transaction; if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); while (1) { prepare_to_wait(&root->fs_info->transaction_wait, &wait, TASK_UNINTERRUPTIBLE); @@ -205,7 +203,7 @@ again: } cur_trans = root->fs_info->running_transaction; - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); if (type != TRANS_JOIN_NOLOCK) mutex_unlock(&root->fs_info->trans_mutex); @@ -336,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) goto out_unlock; /* nothing committing|committed */ } - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); wait_for_commit(root, cur_trans); @@ -466,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, wake_up_process(info->transaction_kthread); } - if (lock) - mutex_lock(&info->trans_mutex); WARN_ON(cur_trans != info->running_transaction); - WARN_ON(cur_trans->num_writers < 1); - cur_trans->num_writers--; + WARN_ON(atomic_read(&cur_trans->num_writers) < 1); + atomic_dec(&cur_trans->num_writers); smp_mb(); if (waitqueue_active(&cur_trans->writer_wait)) wake_up(&cur_trans->writer_wait); put_transaction(cur_trans); - if (lock) - mutex_unlock(&info->trans_mutex); if (current->journal_info == trans) current->journal_info = NULL; @@ -1187,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, /* take transaction reference */ mutex_lock(&root->fs_info->trans_mutex); cur_trans = trans->transaction; - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); @@ -1246,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->trans_mutex); if (cur_trans->in_commit) { - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); @@ -1268,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); if (!prev_trans->commit_done) { - prev_trans->use_count++; + atomic_inc(&prev_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); wait_for_commit(root, prev_trans); @@ -1309,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, TASK_UNINTERRUPTIBLE); smp_mb(); - if (cur_trans->num_writers > 1) + if (atomic_read(&cur_trans->num_writers) > 1) schedule_timeout(MAX_SCHEDULE_TIMEOUT); else if (should_grow) schedule_timeout(1); mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); - } while (cur_trans->num_writers > 1 || + } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); ret = create_pending_snapshots(trans, root->fs_info); @@ -1403,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, wake_up(&cur_trans->commit_wait); + list_del_init(&cur_trans->list); put_transaction(cur_trans); put_transaction(cur_trans); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 229a594cacd5..e441acc6c584 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -27,11 +27,11 @@ struct btrfs_transaction { * total writers in this transaction, it must be zero before the * transaction can end */ - unsigned long num_writers; + atomic_t num_writers; unsigned long num_joined; int in_commit; - int use_count; + atomic_t use_count; int commit_done; int blocked; struct list_head list; -- cgit v1.2.2 From 507903b81840a70cc6a179d4eb03584ad50e8c5b Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Wed, 6 Apr 2011 10:02:20 +0000 Subject: btrfs: using cached extent_state in set/unlock combinations In several places the sequence (set_extent_uptodate, unlock_extent) is used. This leads to a duplicate lookup of the extent state. This patch lets set_extent_uptodate return a cached extent_state which can be passed to unlock_extent_cached. The occurences of the above sequences are updated to use the cache. Only end_bio_extent_readpage is updated that it first gets a cached state to pass it to the readpage_end_io_hook as the prototype requested and is later on being used for set/unlock. Signed-off-by: Arne Jansen Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 70 +++++++++++++++++++++++++++++++++++++++------------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 2 +- 3 files changed, 55 insertions(+), 19 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 864e0496cc1c..8dcfb77678de 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -690,6 +690,17 @@ static void cache_state(struct extent_state *state, } } +static void uncache_state(struct extent_state **cached_ptr) +{ + if (cached_ptr && (*cached_ptr)) { + struct extent_state *state = *cached_ptr; + if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { + *cached_ptr = NULL; + free_extent_state(state); + } + } +} + /* * set some bits on a range in the tree. This may require allocations or * sleeping, so the gfp mask is used to indicate what is allowed. @@ -940,10 +951,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, } int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) + struct extent_state **cached_state, gfp_t mask) { - return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, - NULL, mask); + return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, + NULL, cached_state, mask); } static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, @@ -1012,8 +1023,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, mask); } -int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) +int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, mask); @@ -1735,6 +1745,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err) do { struct page *page = bvec->bv_page; + struct extent_state *cached = NULL; + struct extent_state *state; + tree = &BTRFS_I(page->mapping->host)->io_tree; start = ((u64)page->index << PAGE_CACHE_SHIFT) + @@ -1749,9 +1762,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (++bvec <= bvec_end) prefetchw(&bvec->bv_page->flags); + spin_lock(&tree->lock); + state = find_first_extent_bit_state(tree, start, 0); + if (state) { + /* + * take a reference on the state, unlock will drop + * the ref + */ + cache_state(state, &cached); + } + spin_unlock(&tree->lock); + if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, - NULL); + state); if (ret) uptodate = 0; } @@ -1764,15 +1788,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err) test_bit(BIO_UPTODATE, &bio->bi_flags); if (err) uptodate = 0; + uncache_state(&cached); continue; } } if (uptodate) { - set_extent_uptodate(tree, start, end, + set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); } - unlock_extent(tree, start, end, GFP_ATOMIC); + unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); if (whole_page) { if (uptodate) { @@ -1811,6 +1836,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) do { struct page *page = bvec->bv_page; + struct extent_state *cached = NULL; tree = &BTRFS_I(page->mapping->host)->io_tree; start = ((u64)page->index << PAGE_CACHE_SHIFT) + @@ -1821,13 +1847,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) prefetchw(&bvec->bv_page->flags); if (uptodate) { - set_extent_uptodate(tree, start, end, GFP_ATOMIC); + set_extent_uptodate(tree, start, end, &cached, + GFP_ATOMIC); } else { ClearPageUptodate(page); SetPageError(page); } - unlock_extent(tree, start, end, GFP_ATOMIC); + unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); } while (bvec >= bio->bi_io_vec); @@ -2016,14 +2043,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, while (cur <= end) { if (cur >= last_byte) { char *userpage; + struct extent_state *cached = NULL; + iosize = PAGE_CACHE_SIZE - page_offset; userpage = kmap_atomic(page, KM_USER0); memset(userpage + page_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, - GFP_NOFS); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + &cached, GFP_NOFS); + unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); break; } em = get_extent(inode, page, page_offset, cur, @@ -2063,14 +2093,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, /* we've found a hole, just zero and go on */ if (block_start == EXTENT_MAP_HOLE) { char *userpage; + struct extent_state *cached = NULL; + userpage = kmap_atomic(page, KM_USER0); memset(userpage + page_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, - GFP_NOFS); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + &cached, GFP_NOFS); + unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); cur = cur + iosize; page_offset += iosize; continue; @@ -2789,9 +2822,12 @@ int extent_prepare_write(struct extent_io_tree *tree, iocount++; block_start = block_start + iosize; } else { - set_extent_uptodate(tree, block_start, cur_end, + struct extent_state *cached = NULL; + + set_extent_uptodate(tree, block_start, cur_end, &cached, GFP_NOFS); - unlock_extent(tree, block_start, cur_end, GFP_NOFS); + unlock_extent_cached(tree, block_start, cur_end, + &cached, GFP_NOFS); block_start = cur_end + 1; } page_offset = block_start & (PAGE_CACHE_SIZE - 1); @@ -3457,7 +3493,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, num_pages = num_extent_pages(eb->start, eb->len); set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, - GFP_NOFS); + NULL, GFP_NOFS); for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f62c5442835d..af2d7179c372 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, int exclusive_bits, u64 *failed_start, struct extent_state **cached_state, gfp_t mask); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask); + struct extent_state **cached_state, gfp_t mask); int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index edafc28883af..5a993e0ec865 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5226,7 +5226,7 @@ again: btrfs_mark_buffer_dirty(leaf); } set_extent_uptodate(io_tree, em->start, - extent_map_end(em) - 1, GFP_NOFS); + extent_map_end(em) - 1, NULL, GFP_NOFS); goto insert; } else { printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); -- cgit v1.2.2 From 109b36a2bb3eebf5c9994980e724958a5b2b62b6 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 12 Apr 2011 13:57:39 -0400 Subject: Btrfs: make uncache_state unconditional The extent_io code can take cached pointers into the extent state trees, and these can make lookups much faster in common operations. The caching only happens when specific bits are set that prevent merging and splitting of the extent state. A help function was added to uncache the state, and it was testing the same set of conditionals. This can leak in very strange corner cases where the lock bit goes away unexpectedly. The uncaching should be unconditional. Once we have a ref on the extent we should always give it up. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8dcfb77678de..1c462f895c98 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -694,10 +694,8 @@ static void uncache_state(struct extent_state **cached_ptr) { if (cached_ptr && (*cached_ptr)) { struct extent_state *state = *cached_ptr; - if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { - *cached_ptr = NULL; - free_extent_state(state); - } + *cached_ptr = NULL; + free_extent_state(state); } } @@ -1764,7 +1762,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) spin_lock(&tree->lock); state = find_first_extent_bit_state(tree, start, 0); - if (state) { + if (state && state->start == start) { /* * take a reference on the state, unlock will drop * the ref -- cgit v1.2.2 From 2e6a00356a066d34cd00872b067589549169ad48 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 17 Mar 2011 15:17:59 +0800 Subject: Btrfs: Check if btrfs_next_leaf() returns error in btrfs_listxattr() btrfs_next_leaf() can return -errno, and we should propagate it to userspace. This also simplifies how we walk the btree path. Signed-off-by: Li Zefan --- fs/btrfs/xattr.c | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index e5d22f280956..07b9bc350d5d 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; - int ret = 0, slot, advance; + int ret = 0, slot; size_t total_size = 0, size_left = size; unsigned long name_ptr; size_t name_len; - u32 nritems; /* * ok we want all objects associated with this id. @@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; - advance = 0; + while (1) { leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; /* this is where we start walking through the path */ - if (advance || slot >= nritems) { + if (slot >= btrfs_header_nritems(leaf)) { /* * if we've reached the last slot in this leaf we need * to go to the next leaf and reset everything */ - if (slot >= nritems-1) { - ret = btrfs_next_leaf(root, path); - if (ret) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - slot = path->slots[0]; - } else { - /* - * just walking through the slots on this leaf - */ - slot++; - path->slots[0]++; - } + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto err; + else if (ret > 0) + break; + continue; } - advance = 1; btrfs_item_key_to_cpu(leaf, &found_key, slot); @@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) /* we are just looking for how big our buffer needs to be */ if (!size) - continue; + goto next; if (!buffer || (name_len + 1) > size_left) { ret = -ERANGE; @@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) size_left -= name_len + 1; buffer += name_len + 1; +next: + path->slots[0]++; } ret = total_size; -- cgit v1.2.2 From b9e03af0bcc11310f6be4a3951c9ee2c26465011 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 23 Mar 2011 10:43:58 +0800 Subject: Btrfs: Check if btrfs_next_leaf() returns error in btrfs_real_readdir() btrfs_next_leaf() can return -errno, and we should propagate it to userspace. This also simplifies how we walk the btree path. Signed-off-by: Li Zefan --- fs/btrfs/inode.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 55a6a0b416d7..b9f7f5258343 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4221,10 +4221,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, struct btrfs_key found_key; struct btrfs_path *path; int ret; - u32 nritems; struct extent_buffer *leaf; int slot; - int advance; unsigned char d_type; int over = 0; u32 di_cur; @@ -4267,27 +4265,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; - advance = 0; while (1) { leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; - if (advance || slot >= nritems) { - if (slot >= nritems - 1) { - ret = btrfs_next_leaf(root, path); - if (ret) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - slot = path->slots[0]; - } else { - slot++; - path->slots[0]++; - } + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto err; + else if (ret > 0) + break; + continue; } - advance = 1; item = btrfs_item_nr(leaf, slot); btrfs_item_key_to_cpu(leaf, &found_key, slot); @@ -4296,7 +4286,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, if (btrfs_key_type(&found_key) != key_type) break; if (found_key.offset < filp->f_pos) - continue; + goto next; filp->f_pos = found_key.offset; @@ -4349,6 +4339,8 @@ skip: di_cur += di_len; di = (struct btrfs_dir_item *)((char *)di + di_len); } +next: + path->slots[0]++; } /* Reached end of directory/root. Bump pos past the last item. */ -- cgit v1.2.2 From 3153495d8ed6a9bb9f00aea42c18dc488a885dd6 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 13 Apr 2011 13:19:21 +0800 Subject: Btrfs: Fix incorrect inode nlink in btrfs_link() Link count of the inode is not decreased if btrfs_set_inode_index() fails. Signed-off-by: Miao Xie Singed-off-by: Li Zefan --- fs/btrfs/inode.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b9f7f5258343..a4157cfdd533 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4846,9 +4846,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (inode->i_nlink == ~0U) return -EMLINK; - btrfs_inc_nlink(inode); - inode->i_ctime = CURRENT_TIME; - err = btrfs_set_inode_index(dir, &index); if (err) goto fail; @@ -4864,6 +4861,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, goto fail; } + btrfs_inc_nlink(inode); + inode->i_ctime = CURRENT_TIME; + btrfs_set_trans_block_group(trans, dir); ihold(inode); -- cgit v1.2.2 From 329c5056be8774255db04b01242a9ff4f02eb8ea Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 13 Apr 2011 14:07:59 +0800 Subject: Btrfs: Check validity before setting an acl Call posix_acl_valid() to check if an acl is valid or not. Signed-off-by: Miao Xie Signed-off-by: Li Zefan --- fs/btrfs/acl.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 9c949348510b..a892bc27f13a 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, if (value) { acl = posix_acl_from_xattr(value, size); - if (acl == NULL) { - value = NULL; - size = 0; + if (acl) { + ret = posix_acl_valid(acl); + if (ret) + goto out; } else if (IS_ERR(acl)) { return PTR_ERR(acl); } } ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); - +out: posix_acl_release(acl); return ret; -- cgit v1.2.2 From 0e4f8f888845f9dca540ad175884244e5db5eea2 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 15 Apr 2011 16:05:44 -0400 Subject: Btrfs: don't force chunk allocation in find_free_extent find_free_extent likes to allocate in contiguous clusters, which makes writeback faster, especially on SSD storage. As the FS fragments, these clusters become harder to find and we have to decide between allocating a new chunk to make more clusters or giving up on the cluster to allocate from the free space we have. Right now it creates too many chunks, and you can end up with a whole FS that is mostly empty metadata chunks. This commit changes the allocation code to be more strict and only allocate new chunks when we've made good use of the chunks we already have. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 95 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 73 insertions(+), 22 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f619c3cb13b7..26479484180d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -33,6 +33,25 @@ #include "locking.h" #include "free-space-cache.h" +/* control flags for do_chunk_alloc's force field + * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk + * if we really need one. + * + * CHUNK_ALLOC_FORCE means it must try to allocate one + * + * CHUNK_ALLOC_LIMITED means to only try and allocate one + * if we have very few chunks already allocated. This is + * used as part of the clustering code to help make sure + * we have a good pool of storage to cluster in, without + * filling the FS with empty chunks + * + */ +enum { + CHUNK_ALLOC_NO_FORCE = 0, + CHUNK_ALLOC_FORCE = 1, + CHUNK_ALLOC_LIMITED = 2, +}; + static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int alloc); @@ -3019,7 +3038,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found->bytes_readonly = 0; found->bytes_may_use = 0; found->full = 0; - found->force_alloc = 0; + found->force_alloc = CHUNK_ALLOC_NO_FORCE; *space_info = found; list_add_rcu(&found->list, &info->space_info); atomic_set(&found->caching_threads, 0); @@ -3150,7 +3169,7 @@ again: if (!data_sinfo->full && alloc_chunk) { u64 alloc_target; - data_sinfo->force_alloc = 1; + data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; spin_unlock(&data_sinfo->lock); alloc: alloc_target = btrfs_get_alloc_profile(root, 1); @@ -3160,7 +3179,8 @@ alloc: ret = do_chunk_alloc(trans, root->fs_info->extent_root, bytes + 2 * 1024 * 1024, - alloc_target, 0); + alloc_target, + CHUNK_ALLOC_NO_FORCE); btrfs_end_transaction(trans, root); if (ret < 0) { if (ret != -ENOSPC) @@ -3239,31 +3259,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { if (found->flags & BTRFS_BLOCK_GROUP_METADATA) - found->force_alloc = 1; + found->force_alloc = CHUNK_ALLOC_FORCE; } rcu_read_unlock(); } static int should_alloc_chunk(struct btrfs_root *root, - struct btrfs_space_info *sinfo, u64 alloc_bytes) + struct btrfs_space_info *sinfo, u64 alloc_bytes, + int force) { u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; + u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; u64 thresh; - if (sinfo->bytes_used + sinfo->bytes_reserved + - alloc_bytes + 256 * 1024 * 1024 < num_bytes) + if (force == CHUNK_ALLOC_FORCE) + return 1; + + /* + * in limited mode, we want to have some free space up to + * about 1% of the FS size. + */ + if (force == CHUNK_ALLOC_LIMITED) { + thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); + thresh = max_t(u64, 64 * 1024 * 1024, + div_factor_fine(thresh, 1)); + + if (num_bytes - num_allocated < thresh) + return 1; + } + + /* + * we have two similar checks here, one based on percentage + * and once based on a hard number of 256MB. The idea + * is that if we have a good amount of free + * room, don't allocate a chunk. A good mount is + * less than 80% utilized of the chunks we have allocated, + * or more than 256MB free + */ + if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes) return 0; - if (sinfo->bytes_used + sinfo->bytes_reserved + - alloc_bytes < div_factor(num_bytes, 8)) + if (num_allocated + alloc_bytes < div_factor(num_bytes, 8)) return 0; thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); + + /* 256MB or 5% of the FS */ thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) return 0; - return 1; } @@ -3289,17 +3334,17 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, spin_lock(&space_info->lock); if (space_info->force_alloc) - force = 1; + force = space_info->force_alloc; if (space_info->full) { spin_unlock(&space_info->lock); goto out; } - if (!force && !should_alloc_chunk(extent_root, space_info, - alloc_bytes)) { + if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { spin_unlock(&space_info->lock); goto out; } + spin_unlock(&space_info->lock); /* @@ -3327,7 +3372,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, space_info->full = 1; else ret = 1; - space_info->force_alloc = 0; + space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; spin_unlock(&space_info->lock); out: mutex_unlock(&extent_root->fs_info->chunk_mutex); @@ -5303,11 +5348,13 @@ loop: if (allowed_chunk_alloc) { ret = do_chunk_alloc(trans, root, num_bytes + - 2 * 1024 * 1024, data, 1); + 2 * 1024 * 1024, data, + CHUNK_ALLOC_LIMITED); allowed_chunk_alloc = 0; done_chunk_alloc = 1; - } else if (!done_chunk_alloc) { - space_info->force_alloc = 1; + } else if (!done_chunk_alloc && + space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) { + space_info->force_alloc = CHUNK_ALLOC_LIMITED; } if (loop < LOOP_NO_EMPTY_SIZE) { @@ -5393,7 +5440,8 @@ again: */ if (empty_size || root->ref_cows) ret = do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes + 2 * 1024 * 1024, data, 0); + num_bytes + 2 * 1024 * 1024, data, + CHUNK_ALLOC_NO_FORCE); WARN_ON(num_bytes < root->sectorsize); ret = find_free_extent(trans, root, num_bytes, empty_size, @@ -5405,7 +5453,7 @@ again: num_bytes = num_bytes & ~(root->sectorsize - 1); num_bytes = max(num_bytes, min_alloc_size); do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes, data, 1); + num_bytes, data, CHUNK_ALLOC_FORCE); goto again; } if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { @@ -8109,13 +8157,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, alloc_flags = update_block_group_flags(root, cache->flags); if (alloc_flags != cache->flags) - do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); ret = set_block_group_ro(cache); if (!ret) goto out; alloc_flags = get_alloc_profile(root, cache->space_info->flags); - ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); if (ret < 0) goto out; ret = set_block_group_ro(cache); @@ -8128,7 +8178,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 type) { u64 alloc_flags = get_alloc_profile(root, type); - return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); } /* -- cgit v1.2.2 From 0d399205edf3a4c290e76ebb36e541593af4a1b4 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sat, 16 Apr 2011 06:55:39 -0400 Subject: Btrfs end_bio_extent_readpage should look for locked bits A recent commit caches the extent state in end_bio_extent_readpage, but the search it does should look for locked extents. This fixes things to make it more effective. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1c462f895c98..5ae0bffaa4d8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1761,7 +1761,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) prefetchw(&bvec->bv_page->flags); spin_lock(&tree->lock); - state = find_first_extent_bit_state(tree, start, 0); + state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED); if (state && state->start == start) { /* * take a reference on the state, unlock will drop -- cgit v1.2.2 From 6d74119f1a3efad9dc7f79a16c201242324b731f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 11 Apr 2011 20:20:11 -0400 Subject: Btrfs: avoid taking the chunk_mutex in do_chunk_alloc Everytime we try to allocate disk space we try and see if we can pre-emptively allocate a chunk, but in the common case we don't allocate anything, so there is no sense in taking the chunk_mutex at all. So instead if we are allocating a chunk, mark it in the space_info so we don't get two people trying to allocate at the same time. Thanks, Signed-off-by: Josef Bacik Reviewed-by: Liu Bo --- fs/btrfs/ctree.h | 4 +++- fs/btrfs/extent-tree.c | 30 +++++++++++++++++++++++++----- 2 files changed, 28 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0d00a07b5b29..2e61fe1b6b8c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -740,8 +740,10 @@ struct btrfs_space_info { */ unsigned long reservation_progress; - int full; /* indicates that we cannot allocate any more + int full:1; /* indicates that we cannot allocate any more chunks for this space */ + int chunk_alloc:1; /* set if we are allocating a chunk */ + int force_alloc; /* set if we need to force a chunk alloc for this space */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 26479484180d..31f33ba56fe8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3039,6 +3039,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found->bytes_may_use = 0; found->full = 0; found->force_alloc = CHUNK_ALLOC_NO_FORCE; + found->chunk_alloc = 0; *space_info = found; list_add_rcu(&found->list, &info->space_info); atomic_set(&found->caching_threads, 0); @@ -3318,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, { struct btrfs_space_info *space_info; struct btrfs_fs_info *fs_info = extent_root->fs_info; + int wait_for_alloc = 0; int ret = 0; - mutex_lock(&fs_info->chunk_mutex); - flags = btrfs_reduce_alloc_profile(extent_root, flags); space_info = __find_space_info(extent_root->fs_info, flags); @@ -3332,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, } BUG_ON(!space_info); +again: spin_lock(&space_info->lock); if (space_info->force_alloc) force = space_info->force_alloc; if (space_info->full) { spin_unlock(&space_info->lock); - goto out; + return 0; } if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { spin_unlock(&space_info->lock); - goto out; + return 0; + } else if (space_info->chunk_alloc) { + wait_for_alloc = 1; + } else { + space_info->chunk_alloc = 1; } spin_unlock(&space_info->lock); + mutex_lock(&fs_info->chunk_mutex); + + /* + * The chunk_mutex is held throughout the entirety of a chunk + * allocation, so once we've acquired the chunk_mutex we know that the + * other guy is done and we need to recheck and see if we should + * allocate. + */ + if (wait_for_alloc) { + mutex_unlock(&fs_info->chunk_mutex); + wait_for_alloc = 0; + goto again; + } + /* * If we have mixed data/metadata chunks we want to make sure we keep * allocating mixed chunks instead of individual chunks. @@ -3372,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, space_info->full = 1; else ret = 1; + space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; + space_info->chunk_alloc = 0; spin_unlock(&space_info->lock); -out: mutex_unlock(&extent_root->fs_info->chunk_mutex); return ret; } -- cgit v1.2.2 From f65647c29b14f5a32ff6f3237b0ef3b375ed5a79 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 18 Apr 2011 08:55:34 -0400 Subject: Btrfs: fix free space cache leak The free space caching code was recently reworked to cache all the pages it needed instead of using find_get_page everywhere. One loop was missed though, so it ended up leaking pages. This fixes it to use our page array instead of find_get_page. Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index a3f420def0e9..11d2e9cea09e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -732,7 +732,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, out_of_space = true; break; } - page = find_get_page(inode->i_mapping, index); + page = pages[index]; addr = kmap(page); memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); -- cgit v1.2.2 From 211588ad1902df57beeeadc9b44546540fa4bd81 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 19 Apr 2011 20:12:40 -0400 Subject: Btrfs: do some plugging in the submit_bio threads The Btrfs submit bio threads have a small number of threads responsible for pushing down bios we've collected for a large number of devices. Since we do all the bios for a single device at once, we want to make sure we unplug and send down the bios for each device as we're done processing them. The new plugging API removed the btrfs code to unplug while processing bios, this adds it back with the new API. Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 309a57b9fc85..c7367ae5a3e6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -155,6 +155,15 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) unsigned long limit; unsigned long last_waited = 0; int force_reg = 0; + struct blk_plug plug; + + /* + * this function runs all the bios we've collected for + * a particular device. We don't want to wander off to + * another device without first sending all of these down. + * So, setup a plug here and finish it off before we return + */ + blk_start_plug(&plug); bdi = blk_get_backing_dev_info(device->bdev); fs_info = device->dev_root->fs_info; @@ -294,6 +303,7 @@ loop_lock: spin_unlock(&device->io_lock); done: + blk_finish_plug(&plug); return 0; } -- cgit v1.2.2 From 92c423118105e1c8c1587367a26eeb3277bda89a Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 2 Mar 2011 16:50:21 +0800 Subject: Btrfs: Remove unused btrfs_block_group_free_space() We've already recorded the value in block_group->frees_space. Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 15 --------------- fs/btrfs/free-space-cache.h | 1 - 2 files changed, 16 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 11d2e9cea09e..3af64c6ea9df 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1685,21 +1685,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) -{ - struct btrfs_free_space *info; - struct rb_node *n; - u64 ret = 0; - - for (n = rb_first(&block_group->free_space_offset); n; - n = rb_next(n)) { - info = rb_entry(n, struct btrfs_free_space, offset_index); - ret += info->bytes; - } - - return ret; -} - /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn't match the block group diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 65c3b935289f..12b2b5165f8a 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -55,7 +55,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_group_cache *block_group, -- cgit v1.2.2 From f38b6e754d8cc4605ac21d9c1094d569d88b163b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 14 Mar 2011 13:40:51 +0800 Subject: Btrfs: Use bitmap_set/clear() No functional change. Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 3af64c6ea9df..0e23bbabbba2 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1134,15 +1134,13 @@ static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, end; - unsigned long i; + unsigned long start, count; start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); + count = bytes_to_bits(bytes, block_group->sectorsize); + BUG_ON(start + count > BITS_PER_BITMAP); - for (i = start; i < end; i++) - clear_bit(i, info->bitmap); + bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; block_group->free_space -= bytes; @@ -1152,15 +1150,13 @@ static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, end; - unsigned long i; + unsigned long start, count; start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); + count = bytes_to_bits(bytes, block_group->sectorsize); + BUG_ON(start + count > BITS_PER_BITMAP); - for (i = start; i < end; i++) - set_bit(i, info->bitmap); + bitmap_set(info->bitmap, start, count); info->bytes += bytes; block_group->free_space += bytes; -- cgit v1.2.2 From 34d52cb6c50b5a43901709998f59fb1c5a43dc4a Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 29 Mar 2011 13:46:06 +0800 Subject: Btrfs: Make free space cache code generic So we can re-use the code to cache free inode numbers. The change is quite straightforward. Two new structures are introduced. - struct btrfs_free_space_ctl We move those variables that are used for caching free space from struct btrfs_block_group_cache to this new struct. - struct btrfs_free_space_op We do block group specific work (e.g. calculation of extents threshold) through functions registered in this struct. And then we can remove references to struct btrfs_block_group_cache. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 7 +- fs/btrfs/extent-tree.c | 37 ++-- fs/btrfs/free-space-cache.c | 430 ++++++++++++++++++++++++-------------------- fs/btrfs/free-space-cache.h | 20 +++ 4 files changed, 271 insertions(+), 223 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2e61fe1b6b8c..d17e4a3b8bf7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -830,9 +830,6 @@ struct btrfs_block_group_cache { u64 bytes_super; u64 flags; u64 sectorsize; - int extents_thresh; - int free_extents; - int total_bitmaps; unsigned int ro:1; unsigned int dirty:1; unsigned int iref:1; @@ -847,9 +844,7 @@ struct btrfs_block_group_cache { struct btrfs_space_info *space_info; /* free space cache stuff */ - spinlock_t tree_lock; - struct rb_root free_space_offset; - u64 free_space; + struct btrfs_free_space_ctl *free_space_ctl; /* block group cache stuff */ struct rb_node cache_node; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 31f33ba56fe8..904eae10ec65 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) WARN_ON(cache->pinned > 0); WARN_ON(cache->reserved > 0); WARN_ON(cache->reserved_pinned > 0); + kfree(cache->free_space_ctl); kfree(cache); } } @@ -4893,7 +4894,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, return 0; wait_event(caching_ctl->wait, block_group_cache_done(cache) || - (cache->free_space >= num_bytes)); + (cache->free_space_ctl->free_space >= num_bytes)); put_caching_control(caching_ctl); return 0; @@ -8551,10 +8552,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) ret = -ENOMEM; goto error; } + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + ret = -ENOMEM; + goto error; + } atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); - spin_lock_init(&cache->tree_lock); cache->fs_info = info; INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); @@ -8562,14 +8569,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (need_clear) cache->disk_cache_state = BTRFS_DC_CLEAR; - /* - * we only want to have 32k of ram per block group for keeping - * track of free space, and if we pass 1/2 of that we want to - * start converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); - read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(cache->item)); @@ -8580,6 +8579,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; + btrfs_init_free_space_ctl(cache); + /* * We need to exclude the super stripes now so that the space * info has super bytes accounted for, otherwise we'll think @@ -8666,6 +8667,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) return -ENOMEM; + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + return -ENOMEM; + } cache->key.objectid = chunk_offset; cache->key.offset = size; @@ -8673,19 +8680,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->sectorsize = root->sectorsize; cache->fs_info = root->fs_info; - /* - * we only want to have 32k of ram per block group for keeping track - * of free space, and if we pass 1/2 of that we want to start - * converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); - spin_lock_init(&cache->tree_lock); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); + btrfs_init_free_space_ctl(cache); + btrfs_set_block_group_used(&cache->item, bytes_used); btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); cache->flags = type; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 0e23bbabbba2..d4fb4f077a79 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -29,9 +29,7 @@ #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) -static void recalculate_thresholds(struct btrfs_block_group_cache - *block_group); -static int link_free_space(struct btrfs_block_group_cache *block_group, +static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info); struct inode *lookup_free_space_inode(struct btrfs_root *root, @@ -212,6 +210,7 @@ static int readahead_cache(struct inode *inode) int load_free_space_cache(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_root *root = fs_info->tree_root; struct inode *inode; struct btrfs_free_space_header *header; @@ -417,9 +416,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, } if (entry->type == BTRFS_FREE_SPACE_EXTENT) { - spin_lock(&block_group->tree_lock); - ret = link_free_space(block_group, e); - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + spin_unlock(&ctl->tree_lock); BUG_ON(ret); } else { e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); @@ -431,11 +430,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, page_cache_release(page); goto free_cache; } - spin_lock(&block_group->tree_lock); - ret = link_free_space(block_group, e); - block_group->total_bitmaps++; - recalculate_thresholds(block_group); - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + ctl->total_bitmaps++; + ctl->op->recalc_thresholds(ctl); + spin_unlock(&ctl->tree_lock); list_add_tail(&e->list, &bitmaps); } @@ -471,16 +470,16 @@ next: index++; } - spin_lock(&block_group->tree_lock); - if (block_group->free_space != (block_group->key.offset - used - - block_group->bytes_super)) { - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + if (ctl->free_space != (block_group->key.offset - used - + block_group->bytes_super)) { + spin_unlock(&ctl->tree_lock); printk(KERN_ERR "block group %llu has an wrong amount of free " "space\n", block_group->key.objectid); ret = 0; goto free_cache; } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); ret = 1; out: @@ -503,6 +502,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_header *header; struct extent_buffer *leaf; struct inode *inode; @@ -546,7 +546,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, return 0; } - node = rb_first(&block_group->free_space_offset); + node = rb_first(&ctl->free_space_offset); if (!node) { iput(inode); return 0; @@ -851,30 +851,30 @@ out_free: return ret; } -static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, +static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, u64 offset) { BUG_ON(offset < bitmap_start); offset -= bitmap_start; - return (unsigned long)(div64_u64(offset, sectorsize)); + return (unsigned long)(div_u64(offset, unit)); } -static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) +static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) { - return (unsigned long)(div64_u64(bytes, sectorsize)); + return (unsigned long)(div_u64(bytes, unit)); } -static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, +static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) { u64 bitmap_start; u64 bytes_per_bitmap; - bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; - bitmap_start = offset - block_group->key.objectid; + bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; + bitmap_start = offset - ctl->start; bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); bitmap_start *= bytes_per_bitmap; - bitmap_start += block_group->key.objectid; + bitmap_start += ctl->start; return bitmap_start; } @@ -932,10 +932,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, * offset. */ static struct btrfs_free_space * -tree_search_offset(struct btrfs_block_group_cache *block_group, +tree_search_offset(struct btrfs_free_space_ctl *ctl, u64 offset, int bitmap_only, int fuzzy) { - struct rb_node *n = block_group->free_space_offset.rb_node; + struct rb_node *n = ctl->free_space_offset.rb_node; struct btrfs_free_space *entry, *prev = NULL; /* find entry that is closest to the 'offset' */ @@ -1031,8 +1031,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, break; } } - if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) + if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) return entry; } else if (entry->offset + entry->bytes > offset) return entry; @@ -1043,7 +1042,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, while (1) { if (entry->bitmap) { if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) + ctl->unit > offset) break; } else { if (entry->offset + entry->bytes > offset) @@ -1059,42 +1058,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, } static inline void -__unlink_free_space(struct btrfs_block_group_cache *block_group, +__unlink_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - rb_erase(&info->offset_index, &block_group->free_space_offset); - block_group->free_extents--; + rb_erase(&info->offset_index, &ctl->free_space_offset); + ctl->free_extents--; } -static void unlink_free_space(struct btrfs_block_group_cache *block_group, +static void unlink_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - __unlink_free_space(block_group, info); - block_group->free_space -= info->bytes; + __unlink_free_space(ctl, info); + ctl->free_space -= info->bytes; } -static int link_free_space(struct btrfs_block_group_cache *block_group, +static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { int ret = 0; BUG_ON(!info->bitmap && !info->bytes); - ret = tree_insert_offset(&block_group->free_space_offset, info->offset, + ret = tree_insert_offset(&ctl->free_space_offset, info->offset, &info->offset_index, (info->bitmap != NULL)); if (ret) return ret; - block_group->free_space += info->bytes; - block_group->free_extents++; + ctl->free_space += info->bytes; + ctl->free_extents++; return ret; } -static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) +static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) { + struct btrfs_block_group_cache *block_group = ctl->private; u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; u64 size = block_group->key.offset; + u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; + int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); + + BUG_ON(ctl->total_bitmaps > max_bitmaps); /* * The goal is to keep the total amount of memory used per 1gb of space @@ -1112,10 +1116,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as * we add more bitmaps. */ - bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; + bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; if (bitmap_bytes >= max_bytes) { - block_group->extents_thresh = 0; + ctl->extents_thresh = 0; return; } @@ -1126,43 +1130,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) extent_bytes = max_bytes - bitmap_bytes; extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); - block_group->extents_thresh = + ctl->extents_thresh = div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); } -static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, +static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { unsigned long start, count; - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - count = bytes_to_bits(bytes, block_group->sectorsize); + start = offset_to_bit(info->offset, ctl->unit, offset); + count = bytes_to_bits(bytes, ctl->unit); BUG_ON(start + count > BITS_PER_BITMAP); bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; - block_group->free_space -= bytes; + ctl->free_space -= bytes; } -static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, +static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { unsigned long start, count; - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - count = bytes_to_bits(bytes, block_group->sectorsize); + start = offset_to_bit(info->offset, ctl->unit, offset); + count = bytes_to_bits(bytes, ctl->unit); BUG_ON(start + count > BITS_PER_BITMAP); bitmap_set(info->bitmap, start, count); info->bytes += bytes; - block_group->free_space += bytes; + ctl->free_space += bytes; } -static int search_bitmap(struct btrfs_block_group_cache *block_group, +static int search_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) { @@ -1170,9 +1174,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, unsigned long bits, i; unsigned long next_zero; - i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, + i = offset_to_bit(bitmap_info->offset, ctl->unit, max_t(u64, *offset, bitmap_info->offset)); - bits = bytes_to_bits(*bytes, block_group->sectorsize); + bits = bytes_to_bits(*bytes, ctl->unit); for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); i < BITS_PER_BITMAP; @@ -1187,29 +1191,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, } if (found_bits) { - *offset = (u64)(i * block_group->sectorsize) + - bitmap_info->offset; - *bytes = (u64)(found_bits) * block_group->sectorsize; + *offset = (u64)(i * ctl->unit) + bitmap_info->offset; + *bytes = (u64)(found_bits) * ctl->unit; return 0; } return -1; } -static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache - *block_group, u64 *offset, - u64 *bytes, int debug) +static struct btrfs_free_space * +find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) { struct btrfs_free_space *entry; struct rb_node *node; int ret; - if (!block_group->free_space_offset.rb_node) + if (!ctl->free_space_offset.rb_node) return NULL; - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, *offset), - 0, 1); + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); if (!entry) return NULL; @@ -1219,7 +1219,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache continue; if (entry->bitmap) { - ret = search_bitmap(block_group, entry, offset, bytes); + ret = search_bitmap(ctl, entry, offset, bytes); if (!ret) return entry; continue; @@ -1233,33 +1233,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache return NULL; } -static void add_new_bitmap(struct btrfs_block_group_cache *block_group, +static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset) { - u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; - int max_bitmaps = (int)div64_u64(block_group->key.offset + - bytes_per_bg - 1, bytes_per_bg); - BUG_ON(block_group->total_bitmaps >= max_bitmaps); - - info->offset = offset_to_bitmap(block_group, offset); + info->offset = offset_to_bitmap(ctl, offset); info->bytes = 0; - link_free_space(block_group, info); - block_group->total_bitmaps++; + link_free_space(ctl, info); + ctl->total_bitmaps++; - recalculate_thresholds(block_group); + ctl->op->recalc_thresholds(ctl); } -static void free_bitmap(struct btrfs_block_group_cache *block_group, +static void free_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info) { - unlink_free_space(block_group, bitmap_info); + unlink_free_space(ctl, bitmap_info); kfree(bitmap_info->bitmap); kmem_cache_free(btrfs_free_space_cachep, bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); + ctl->total_bitmaps--; + ctl->op->recalc_thresholds(ctl); } -static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, +static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) { @@ -1268,8 +1263,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro int ret; again: - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; + end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; /* * XXX - this can go away after a few releases. @@ -1284,24 +1278,22 @@ again: search_start = *offset; search_bytes = *bytes; search_bytes = min(search_bytes, end - search_start + 1); - ret = search_bitmap(block_group, bitmap_info, &search_start, - &search_bytes); + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); if (*offset > bitmap_info->offset && *offset + *bytes > end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, - end - *offset + 1); + bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); *bytes -= end - *offset + 1; *offset = end + 1; } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); + bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); *bytes = 0; } if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + free_bitmap(ctl, bitmap_info); /* * no entry after this bitmap, but we still have bytes to @@ -1328,31 +1320,28 @@ again: */ search_start = *offset; search_bytes = *bytes; - ret = search_bitmap(block_group, bitmap_info, &search_start, + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); if (ret < 0 || search_start != *offset) return -EAGAIN; goto again; } else if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + free_bitmap(ctl, bitmap_info); return 0; } -static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) +static bool use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) { - struct btrfs_free_space *bitmap_info; - int added = 0; - u64 bytes, offset, end; - int ret; + struct btrfs_block_group_cache *block_group = ctl->private; /* * If we are below the extents threshold then we can add this as an * extent, and don't have to deal with the bitmap */ - if (block_group->free_extents < block_group->extents_thresh) { + if (ctl->free_extents < ctl->extents_thresh) { /* * If this block group has some small extents we don't want to * use up all of our free slots in the cache with them, we want @@ -1361,11 +1350,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, * the overhead of a bitmap if we don't have to. */ if (info->bytes <= block_group->sectorsize * 4) { - if (block_group->free_extents * 2 <= - block_group->extents_thresh) - return 0; + if (ctl->free_extents * 2 <= ctl->extents_thresh) + return false; } else { - return 0; + return false; } } @@ -1375,31 +1363,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, */ if (BITS_PER_BITMAP * block_group->sectorsize > block_group->key.offset) - return 0; + return false; + + return true; +} + +static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + struct btrfs_free_space *bitmap_info; + int added = 0; + u64 bytes, offset, end; + int ret; bytes = info->bytes; offset = info->offset; + if (!ctl->op->use_bitmap(ctl, info)) + return 0; + again: - bitmap_info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), + bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!bitmap_info) { BUG_ON(added); goto new_bitmap; } - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize); + end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); if (offset >= bitmap_info->offset && offset + bytes > end) { - bitmap_set_bits(block_group, bitmap_info, offset, - end - offset); + bitmap_set_bits(ctl, bitmap_info, offset, end - offset); bytes -= end - offset; offset = end; added = 0; } else if (offset >= bitmap_info->offset && offset + bytes <= end) { - bitmap_set_bits(block_group, bitmap_info, offset, bytes); + bitmap_set_bits(ctl, bitmap_info, offset, bytes); bytes = 0; } else { BUG(); @@ -1413,19 +1412,19 @@ again: new_bitmap: if (info && info->bitmap) { - add_new_bitmap(block_group, info, offset); + add_new_bitmap(ctl, info, offset); added = 1; info = NULL; goto again; } else { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* no pre-allocated info, allocate a new one */ if (!info) { info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); if (!info) { - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); ret = -ENOMEM; goto out; } @@ -1433,7 +1432,7 @@ new_bitmap: /* allocate the bitmap */ info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; goto out; @@ -1451,7 +1450,7 @@ out: return ret; } -bool try_merge_free_space(struct btrfs_block_group_cache *block_group, +bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, bool update_stat) { struct btrfs_free_space *left_info; @@ -1465,18 +1464,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, * are adding, if there is remove that struct and add a new one to * cover the entire range */ - right_info = tree_search_offset(block_group, offset + bytes, 0, 0); + right_info = tree_search_offset(ctl, offset + bytes, 0, 0); if (right_info && rb_prev(&right_info->offset_index)) left_info = rb_entry(rb_prev(&right_info->offset_index), struct btrfs_free_space, offset_index); else - left_info = tree_search_offset(block_group, offset - 1, 0, 0); + left_info = tree_search_offset(ctl, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { if (update_stat) - unlink_free_space(block_group, right_info); + unlink_free_space(ctl, right_info); else - __unlink_free_space(block_group, right_info); + __unlink_free_space(ctl, right_info); info->bytes += right_info->bytes; kmem_cache_free(btrfs_free_space_cachep, right_info); merged = true; @@ -1485,9 +1484,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { if (update_stat) - unlink_free_space(block_group, left_info); + unlink_free_space(ctl, left_info); else - __unlink_free_space(block_group, left_info); + __unlink_free_space(ctl, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kmem_cache_free(btrfs_free_space_cachep, left_info); @@ -1500,6 +1499,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; int ret = 0; @@ -1510,9 +1510,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, info->offset = offset; info->bytes = bytes; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - if (try_merge_free_space(block_group, info, true)) + if (try_merge_free_space(ctl, info, true)) goto link; /* @@ -1520,7 +1520,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, * extent then we know we're going to have to allocate a new extent, so * before we do that see if we need to drop this into a bitmap */ - ret = insert_into_bitmap(block_group, info); + ret = insert_into_bitmap(ctl, info); if (ret < 0) { goto out; } else if (ret) { @@ -1528,11 +1528,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, goto out; } link: - ret = link_free_space(block_group, info); + ret = link_free_space(ctl, info); if (ret) kmem_cache_free(btrfs_free_space_cachep, info); out: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); if (ret) { printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); @@ -1545,21 +1545,21 @@ out: int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct btrfs_free_space *next_info = NULL; int ret = 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); again: - info = tree_search_offset(block_group, offset, 0, 0); + info = tree_search_offset(ctl, offset, 0, 0); if (!info) { /* * oops didn't find an extent that matched the space we wanted * to remove, look for a bitmap instead */ - info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), + info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!info) { WARN_ON(1); @@ -1574,8 +1574,8 @@ again: offset_index); if (next_info->bitmap) - end = next_info->offset + BITS_PER_BITMAP * - block_group->sectorsize - 1; + end = next_info->offset + + BITS_PER_BITMAP * ctl->unit - 1; else end = next_info->offset + next_info->bytes; @@ -1595,20 +1595,20 @@ again: } if (info->bytes == bytes) { - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (info->bitmap) { kfree(info->bitmap); - block_group->total_bitmaps--; + ctl->total_bitmaps--; } kmem_cache_free(btrfs_free_space_cachep, info); goto out_lock; } if (!info->bitmap && info->offset == offset) { - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); info->offset += bytes; info->bytes -= bytes; - link_free_space(block_group, info); + link_free_space(ctl, info); goto out_lock; } @@ -1622,13 +1622,13 @@ again: * first unlink the old info and then * insert it again after the hole we're creating */ - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (offset + bytes < info->offset + info->bytes) { u64 old_end = info->offset + info->bytes; info->offset = offset + bytes; info->bytes = old_end - info->offset; - ret = link_free_space(block_group, info); + ret = link_free_space(ctl, info); WARN_ON(ret); if (ret) goto out_lock; @@ -1638,7 +1638,7 @@ again: */ kmem_cache_free(btrfs_free_space_cachep, info); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* step two, insert a new info struct to cover * anything before the hole @@ -1649,12 +1649,12 @@ again: goto out; } - ret = remove_from_bitmap(block_group, info, &offset, &bytes); + ret = remove_from_bitmap(ctl, info, &offset, &bytes); if (ret == -EAGAIN) goto again; BUG_ON(ret); out_lock: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); out: return ret; } @@ -1662,11 +1662,12 @@ out: void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *n; int count = 0; - for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { + for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { info = rb_entry(n, struct btrfs_free_space, offset_index); if (info->bytes >= bytes) count++; @@ -1681,6 +1682,30 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } +static struct btrfs_free_space_op free_space_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + +void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + + spin_lock_init(&ctl->tree_lock); + ctl->unit = block_group->sectorsize; + ctl->start = block_group->key.objectid; + ctl->private = block_group; + ctl->op = &free_space_op; + + /* + * we only want to have 32k of ram per block group for keeping + * track of free space, and if we pass 1/2 of that we want to + * start converting things over to using bitmaps + */ + ctl->extents_thresh = ((1024 * 32) / 2) / + sizeof(struct btrfs_free_space); +} + /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn't match the block group @@ -1692,6 +1717,7 @@ __btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; @@ -1713,8 +1739,8 @@ __btrfs_return_cluster_to_free_space( bitmap = (entry->bitmap != NULL); if (!bitmap) - try_merge_free_space(block_group, entry, false); - tree_insert_offset(&block_group->free_space_offset, + try_merge_free_space(ctl, entry, false); + tree_insert_offset(&ctl->free_space_offset, entry->offset, &entry->offset_index, bitmap); } cluster->root = RB_ROOT; @@ -1727,12 +1753,13 @@ out: void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *node; struct btrfs_free_cluster *cluster; struct list_head *head; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); while ((head = block_group->cluster_list.next) != &block_group->cluster_list) { cluster = list_entry(head, struct btrfs_free_cluster, @@ -1741,57 +1768,58 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) WARN_ON(cluster->block_group != block_group); __btrfs_return_cluster_to_free_space(block_group, cluster); if (need_resched()) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); cond_resched(); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); } } - while ((node = rb_last(&block_group->free_space_offset)) != NULL) { + while ((node = rb_last(&ctl->free_space_offset)) != NULL) { info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (info->bitmap) kfree(info->bitmap); kmem_cache_free(btrfs_free_space_cachep, info); if (need_resched()) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); cond_resched(); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); } } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); } u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; u64 bytes_search = bytes + empty_size; u64 ret = 0; - spin_lock(&block_group->tree_lock); - entry = find_free_space(block_group, &offset, &bytes_search, 0); + spin_lock(&ctl->tree_lock); + entry = find_free_space(ctl, &offset, &bytes_search); if (!entry) goto out; ret = offset; if (entry->bitmap) { - bitmap_clear_bits(block_group, entry, offset, bytes); + bitmap_clear_bits(ctl, entry, offset, bytes); if (!entry->bytes) - free_bitmap(block_group, entry); + free_bitmap(ctl, entry); } else { - unlink_free_space(block_group, entry); + unlink_free_space(ctl, entry); entry->offset += bytes; entry->bytes -= bytes; if (!entry->bytes) kmem_cache_free(btrfs_free_space_cachep, entry); else - link_free_space(block_group, entry); + link_free_space(ctl, entry); } out: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1808,6 +1836,7 @@ int btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster) { + struct btrfs_free_space_ctl *ctl; int ret; /* first, get a safe pointer to the block group */ @@ -1826,10 +1855,12 @@ int btrfs_return_cluster_to_free_space( atomic_inc(&block_group->count); spin_unlock(&cluster->lock); + ctl = block_group->free_space_ctl; + /* now return any extents the cluster had on it */ - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); ret = __btrfs_return_cluster_to_free_space(block_group, cluster); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* finally drop our ref */ btrfs_put_block_group(block_group); @@ -1841,6 +1872,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *entry, u64 bytes, u64 min_start) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; int err; u64 search_start = cluster->window_start; u64 search_bytes = bytes; @@ -1849,13 +1881,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, search_start = min_start; search_bytes = bytes; - err = search_bitmap(block_group, entry, &search_start, - &search_bytes); + err = search_bitmap(ctl, entry, &search_start, &search_bytes); if (err) return 0; ret = search_start; - bitmap_clear_bits(block_group, entry, ret, bytes); + bitmap_clear_bits(ctl, entry, ret, bytes); return ret; } @@ -1869,6 +1900,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; struct rb_node *node; u64 ret = 0; @@ -1929,20 +1961,20 @@ out: if (!ret) return 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - block_group->free_space -= bytes; + ctl->free_space -= bytes; if (entry->bytes == 0) { - block_group->free_extents--; + ctl->free_extents--; if (entry->bitmap) { kfree(entry->bitmap); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); + ctl->total_bitmaps--; + ctl->op->recalc_thresholds(ctl); } kmem_cache_free(btrfs_free_space_cachep, entry); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1952,6 +1984,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; unsigned long next_zero; unsigned long i; unsigned long search_bits; @@ -2006,7 +2039,7 @@ again: cluster->window_start = start * block_group->sectorsize + entry->offset; - rb_erase(&entry->offset_index, &block_group->free_space_offset); + rb_erase(&entry->offset_index, &ctl->free_space_offset); ret = tree_insert_offset(&cluster->root, entry->offset, &entry->offset_index, 1); BUG_ON(ret); @@ -2021,6 +2054,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *first = NULL; struct btrfs_free_space *entry = NULL; struct btrfs_free_space *prev = NULL; @@ -2031,7 +2065,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, u64 max_extent; u64 max_gap = 128 * 1024; - entry = tree_search_offset(block_group, offset, 0, 1); + entry = tree_search_offset(ctl, offset, 0, 1); if (!entry) return -ENOSPC; @@ -2097,7 +2131,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, if (entry->bitmap) continue; - rb_erase(&entry->offset_index, &block_group->free_space_offset); + rb_erase(&entry->offset_index, &ctl->free_space_offset); ret = tree_insert_offset(&cluster->root, entry->offset, &entry->offset_index, 0); BUG_ON(ret); @@ -2116,16 +2150,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; int ret = -ENOSPC; - if (block_group->total_bitmaps == 0) + if (ctl->total_bitmaps == 0) return -ENOSPC; - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), - 0, 1); + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); if (!entry) return -ENOSPC; @@ -2158,6 +2191,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; u64 min_bytes; int ret; @@ -2177,14 +2211,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, } else min_bytes = max(bytes, (bytes + empty_size) >> 2); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); /* * If we know we don't have enough space to make a cluster don't even * bother doing all the work to try and find one. */ - if (block_group->free_space < min_bytes) { - spin_unlock(&block_group->tree_lock); + if (ctl->free_space < min_bytes) { + spin_unlock(&ctl->tree_lock); return -ENOSPC; } @@ -2210,7 +2244,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, } out: spin_unlock(&cluster->lock); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -2231,6 +2265,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; struct btrfs_fs_info *fs_info = block_group->fs_info; u64 bytes = 0; @@ -2240,52 +2275,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, *trimmed = 0; while (start < end) { - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - if (block_group->free_space < minlen) { - spin_unlock(&block_group->tree_lock); + if (ctl->free_space < minlen) { + spin_unlock(&ctl->tree_lock); break; } - entry = tree_search_offset(block_group, start, 0, 1); + entry = tree_search_offset(ctl, start, 0, 1); if (!entry) - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, - start), + entry = tree_search_offset(ctl, + offset_to_bitmap(ctl, start), 1, 1); if (!entry || entry->offset >= end) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); break; } if (entry->bitmap) { - ret = search_bitmap(block_group, entry, &start, &bytes); + ret = search_bitmap(ctl, entry, &start, &bytes); if (!ret) { if (start >= end) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); break; } bytes = min(bytes, end - start); - bitmap_clear_bits(block_group, entry, - start, bytes); + bitmap_clear_bits(ctl, entry, start, bytes); if (entry->bytes == 0) - free_bitmap(block_group, entry); + free_bitmap(ctl, entry); } else { start = entry->offset + BITS_PER_BITMAP * block_group->sectorsize; - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); ret = 0; continue; } } else { start = entry->offset; bytes = min(entry->bytes, end - start); - unlink_free_space(block_group, entry); + unlink_free_space(ctl, entry); kfree(entry); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); if (bytes >= minlen) { int update_ret; @@ -2297,8 +2330,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, bytes, &actually_trimmed); - btrfs_add_free_space(block_group, - start, bytes); + btrfs_add_free_space(block_group, start, bytes); if (!update_ret) btrfs_update_reserved_bytes(block_group, bytes, 0, 1); diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 12b2b5165f8a..a64a23fae1eb 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -27,6 +27,25 @@ struct btrfs_free_space { struct list_head list; }; +struct btrfs_free_space_ctl { + spinlock_t tree_lock; + struct rb_root free_space_offset; + u64 free_space; + int extents_thresh; + int free_extents; + int total_bitmaps; + int unit; + u64 start; + struct btrfs_free_space_op *op; + void *private; +}; + +struct btrfs_free_space_op { + void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl); + bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info); +}; + struct inode *lookup_free_space_inode(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); @@ -45,6 +64,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); +void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, -- cgit v1.2.2 From 581bb050941b4f220f84d3e5ed6dace3d42dd382 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:06:11 +0800 Subject: Btrfs: Cache free inode numbers in memory Currently btrfs stores the highest objectid of the fs tree, and it always returns (highest+1) inode number when we create a file, so inode numbers won't be reclaimed when we delete files, so we'll run out of inode numbers as we keep create/delete files in 32bits machines. This fixes it, and it works similarly to how we cache free space in block cgroups. We start a kernel thread to read the file tree. By scanning inode items, we know which chunks of inode numbers are free, and we cache them in an rb-tree. Because we are searching the commit root, we have to carefully handle the cross-transaction case. The rb-tree is a hybrid extent+bitmap tree, so if we have too many small chunks of inode numbers, we'll use bitmaps. Initially we allow 16K ram of extents, and a bitmap will be used if we exceed this threshold. The extents threshold is adjusted in runtime. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 15 +- fs/btrfs/disk-io.c | 18 +++ fs/btrfs/free-space-cache.c | 96 ++++++++++--- fs/btrfs/free-space-cache.h | 16 ++- fs/btrfs/inode-map.c | 341 +++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/inode-map.h | 11 ++ fs/btrfs/inode.c | 42 ++++-- fs/btrfs/ioctl.c | 4 +- fs/btrfs/relocation.c | 3 +- fs/btrfs/transaction.c | 7 +- 10 files changed, 500 insertions(+), 53 deletions(-) create mode 100644 fs/btrfs/inode-map.h (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d17e4a3b8bf7..c96a4e4c5566 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1102,6 +1102,15 @@ struct btrfs_root { spinlock_t accounting_lock; struct btrfs_block_rsv *block_rsv; + /* free ino cache stuff */ + struct mutex fs_commit_mutex; + struct btrfs_free_space_ctl *free_ino_ctl; + enum btrfs_caching_type cached; + spinlock_t cache_lock; + wait_queue_head_t cache_wait; + struct btrfs_free_space_ctl *free_ino_pinned; + u64 cache_progress; + struct mutex log_mutex; wait_queue_head_t log_writer_wait; wait_queue_head_t log_commit_wait[2]; @@ -2408,12 +2417,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 offset); int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); -/* inode-map.c */ -int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, - struct btrfs_root *fs_root, - u64 dirid, u64 *objectid); -int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid); - /* inode-item.c */ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ef6865c17cd6..d02683b1ee16 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -41,6 +41,7 @@ #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" +#include "inode-map.h" static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); @@ -1327,6 +1328,19 @@ again: if (IS_ERR(root)) return root; + root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); + if (!root->free_ino_ctl) + goto fail; + root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), + GFP_NOFS); + if (!root->free_ino_pinned) + goto fail; + + btrfs_init_free_ino_ctl(root); + mutex_init(&root->fs_commit_mutex); + spin_lock_init(&root->cache_lock); + init_waitqueue_head(&root->cache_wait); + set_anon_super(&root->anon_super, NULL); if (btrfs_root_refs(&root->root_item) == 0) { @@ -2483,6 +2497,8 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); + __btrfs_remove_free_space_cache(root->free_ino_pinned); + __btrfs_remove_free_space_cache(root->free_ino_ctl); free_fs_root(root); return 0; } @@ -2496,6 +2512,8 @@ static void free_fs_root(struct btrfs_root *root) } free_extent_buffer(root->node); free_extent_buffer(root->commit_root); + kfree(root->free_ino_ctl); + kfree(root->free_ino_pinned); kfree(root->name); kfree(root); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d4fb4f077a79..2ce89bfc8815 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -25,6 +25,7 @@ #include "transaction.h" #include "disk-io.h" #include "extent_io.h" +#include "inode-map.h" #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) @@ -105,7 +106,7 @@ int create_free_space_inode(struct btrfs_root *root, u64 objectid; int ret; - ret = btrfs_find_free_objectid(trans, root, 0, &objectid); + ret = btrfs_find_free_objectid(root, &objectid); if (ret < 0) return ret; @@ -1496,10 +1497,9 @@ bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, return merged; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, + u64 offset, u64 bytes) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; int ret = 0; @@ -1751,11 +1751,29 @@ out: return 0; } -void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *node; + + spin_lock(&ctl->tree_lock); + while ((node = rb_last(&ctl->free_space_offset)) != NULL) { + info = rb_entry(node, struct btrfs_free_space, offset_index); + unlink_free_space(ctl, info); + kfree(info->bitmap); + kmem_cache_free(btrfs_free_space_cachep, info); + if (need_resched()) { + spin_unlock(&ctl->tree_lock); + cond_resched(); + spin_lock(&ctl->tree_lock); + } + } + spin_unlock(&ctl->tree_lock); +} + +void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_cluster *cluster; struct list_head *head; @@ -1773,21 +1791,9 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) spin_lock(&ctl->tree_lock); } } - - while ((node = rb_last(&ctl->free_space_offset)) != NULL) { - info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(ctl, info); - if (info->bitmap) - kfree(info->bitmap); - kmem_cache_free(btrfs_free_space_cachep, info); - if (need_resched()) { - spin_unlock(&ctl->tree_lock); - cond_resched(); - spin_lock(&ctl->tree_lock); - } - } - spin_unlock(&ctl->tree_lock); + + __btrfs_remove_free_space_cache(ctl); } u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, @@ -2352,3 +2358,53 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, return ret; } + +/* + * Find the left-most item in the cache tree, and then return the + * smallest inode number in the item. + * + * Note: the returned inode number may not be the smallest one in + * the tree, if the left-most item is a bitmap. + */ +u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) +{ + struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; + struct btrfs_free_space *entry = NULL; + u64 ino = 0; + + spin_lock(&ctl->tree_lock); + + if (RB_EMPTY_ROOT(&ctl->free_space_offset)) + goto out; + + entry = rb_entry(rb_first(&ctl->free_space_offset), + struct btrfs_free_space, offset_index); + + if (!entry->bitmap) { + ino = entry->offset; + + unlink_free_space(ctl, entry); + entry->offset++; + entry->bytes--; + if (!entry->bytes) + kmem_cache_free(btrfs_free_space_cachep, entry); + else + link_free_space(ctl, entry); + } else { + u64 offset = 0; + u64 count = 1; + int ret; + + ret = search_bitmap(ctl, entry, &offset, &count); + BUG_ON(ret); + + ino = offset; + bitmap_clear_bits(ctl, entry, offset, 1); + if (entry->bytes == 0) + free_bitmap(ctl, entry); + } +out: + spin_unlock(&ctl->tree_lock); + + return ino; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index a64a23fae1eb..af06e6b6ceaa 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -64,15 +64,25 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); + void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 bytenr, u64 size); +int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, + u64 bytenr, u64 size); +static inline int +btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 bytenr, u64 size) +{ + return __btrfs_add_free_space(block_group->free_space_ctl, + bytenr, size); +} int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); +void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); void btrfs_remove_free_space_cache(struct btrfs_block_group_cache - *block_group); + *block_group); u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); +u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c05a08f4c411..5be62df90c4f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -16,11 +16,343 @@ * Boston, MA 021110-1307, USA. */ +#include +#include +#include + #include "ctree.h" #include "disk-io.h" +#include "free-space-cache.h" +#include "inode-map.h" #include "transaction.h" -int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) +static int caching_kthread(void *data) +{ + struct btrfs_root *root = data; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_key key; + struct btrfs_path *path; + struct extent_buffer *leaf; + u64 last = (u64)-1; + int slot; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* Since the commit root is read-only, we can safely skip locking. */ + path->skip_locking = 1; + path->search_commit_root = 1; + path->reada = 2; + + key.objectid = BTRFS_FIRST_FREE_OBJECTID; + key.offset = 0; + key.type = BTRFS_INODE_ITEM_KEY; +again: + /* need to make sure the commit_root doesn't disappear */ + mutex_lock(&root->fs_commit_mutex); + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + while (1) { + smp_mb(); + if (fs_info->closing > 1) + goto out; + + leaf = path->nodes[0]; + slot = path->slots[0]; + if (path->slots[0] >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + else if (ret > 0) + break; + + if (need_resched() || + btrfs_transaction_in_commit(fs_info)) { + leaf = path->nodes[0]; + + if (btrfs_header_nritems(leaf) == 0) { + WARN_ON(1); + break; + } + + /* + * Save the key so we can advances forward + * in the next search. + */ + btrfs_item_key_to_cpu(leaf, &key, 0); + btrfs_release_path(root, path); + root->cache_progress = last; + mutex_unlock(&root->fs_commit_mutex); + schedule_timeout(1); + goto again; + } else + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + + if (key.type != BTRFS_INODE_ITEM_KEY) + goto next; + + if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) + break; + + if (last != (u64)-1 && last + 1 != key.objectid) { + __btrfs_add_free_space(ctl, last + 1, + key.objectid - last - 1); + wake_up(&root->cache_wait); + } + + last = key.objectid; +next: + path->slots[0]++; + } + + if (last < BTRFS_LAST_FREE_OBJECTID - 1) { + __btrfs_add_free_space(ctl, last + 1, + BTRFS_LAST_FREE_OBJECTID - last - 1); + } + + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + + root->cache_progress = (u64)-1; + btrfs_unpin_free_ino(root); +out: + wake_up(&root->cache_wait); + mutex_unlock(&root->fs_commit_mutex); + + btrfs_free_path(path); + + return ret; +} + +static void start_caching(struct btrfs_root *root) +{ + struct task_struct *tsk; + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_NO) { + spin_unlock(&root->cache_lock); + return; + } + + root->cached = BTRFS_CACHE_STARTED; + spin_unlock(&root->cache_lock); + + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", + root->root_key.objectid); + BUG_ON(IS_ERR(tsk)); +} + +int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) +{ +again: + *objectid = btrfs_find_ino_for_alloc(root); + + if (*objectid != 0) + return 0; + + start_caching(root); + + wait_event(root->cache_wait, + root->cached == BTRFS_CACHE_FINISHED || + root->free_ino_ctl->free_space > 0); + + if (root->cached == BTRFS_CACHE_FINISHED && + root->free_ino_ctl->free_space == 0) + return -ENOSPC; + else + goto again; +} + +void btrfs_return_ino(struct btrfs_root *root, u64 objectid) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; +again: + if (root->cached == BTRFS_CACHE_FINISHED) { + __btrfs_add_free_space(ctl, objectid, 1); + } else { + /* + * If we are in the process of caching free ino chunks, + * to avoid adding the same inode number to the free_ino + * tree twice due to cross transaction, we'll leave it + * in the pinned tree until a transaction is committed + * or the caching work is done. + */ + + mutex_lock(&root->fs_commit_mutex); + spin_lock(&root->cache_lock); + if (root->cached == BTRFS_CACHE_FINISHED) { + spin_unlock(&root->cache_lock); + mutex_unlock(&root->fs_commit_mutex); + goto again; + } + spin_unlock(&root->cache_lock); + + start_caching(root); + + if (objectid <= root->cache_progress) + __btrfs_add_free_space(ctl, objectid, 1); + else + __btrfs_add_free_space(pinned, objectid, 1); + + mutex_unlock(&root->fs_commit_mutex); + } +} + +/* + * When a transaction is committed, we'll move those inode numbers which + * are smaller than root->cache_progress from pinned tree to free_ino tree, + * and others will just be dropped, because the commit root we were + * searching has changed. + * + * Must be called with root->fs_commit_mutex held + */ +void btrfs_unpin_free_ino(struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; + struct btrfs_free_space *info; + struct rb_node *n; + u64 count; + + while (1) { + n = rb_first(rbroot); + if (!n) + break; + + info = rb_entry(n, struct btrfs_free_space, offset_index); + BUG_ON(info->bitmap); + + if (info->offset > root->cache_progress) + goto free; + else if (info->offset + info->bytes > root->cache_progress) + count = root->cache_progress - info->offset + 1; + else + count = info->bytes; + + __btrfs_add_free_space(ctl, info->offset, count); +free: + rb_erase(&info->offset_index, rbroot); + kfree(info); + } +} + +#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) +#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) + +/* + * The goal is to keep the memory used by the free_ino tree won't + * exceed the memory if we use bitmaps only. + */ +static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) +{ + struct btrfs_free_space *info; + struct rb_node *n; + int max_ino; + int max_bitmaps; + + n = rb_last(&ctl->free_space_offset); + if (!n) { + ctl->extents_thresh = INIT_THRESHOLD; + return; + } + info = rb_entry(n, struct btrfs_free_space, offset_index); + + /* + * Find the maximum inode number in the filesystem. Note we + * ignore the fact that this can be a bitmap, because we are + * not doing precise calculation. + */ + max_ino = info->bytes - 1; + + max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; + if (max_bitmaps <= ctl->total_bitmaps) { + ctl->extents_thresh = 0; + return; + } + + ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * + PAGE_CACHE_SIZE / sizeof(*info); +} + +/* + * We don't fall back to bitmap, if we are below the extents threshold + * or this chunk of inode numbers is a big one. + */ +static bool use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + if (ctl->free_extents < ctl->extents_thresh || + info->bytes > INODES_PER_BITMAP / 10) + return false; + + return true; +} + +static struct btrfs_free_space_op free_ino_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + +static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) +{ +} + +static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + /* + * We always use extents for two reasons: + * + * - The pinned tree is only used during the process of caching + * work. + * - Make code simpler. See btrfs_unpin_free_ino(). + */ + return false; +} + +static struct btrfs_free_space_op pinned_free_ino_op = { + .recalc_thresholds = pinned_recalc_thresholds, + .use_bitmap = pinned_use_bitmap, +}; + +void btrfs_init_free_ino_ctl(struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; + + spin_lock_init(&ctl->tree_lock); + ctl->unit = 1; + ctl->start = 0; + ctl->private = NULL; + ctl->op = &free_ino_op; + + /* + * Initially we allow to use 16K of ram to cache chunks of + * inode numbers before we resort to bitmaps. This is somewhat + * arbitrary, but it will be adjusted in runtime. + */ + ctl->extents_thresh = INIT_THRESHOLD; + + spin_lock_init(&pinned->tree_lock); + pinned->unit = 1; + pinned->start = 0; + pinned->private = NULL; + pinned->extents_thresh = 0; + pinned->op = &pinned_free_ino_op; +} + +static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) { struct btrfs_path *path; int ret; @@ -55,15 +387,14 @@ error: return ret; } -int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 dirid, u64 *objectid) +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) { int ret; mutex_lock(&root->objectid_mutex); if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { - ret = btrfs_find_highest_inode(root, &root->highest_objectid); + ret = btrfs_find_highest_objectid(root, + &root->highest_objectid); if (ret) goto out; } diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h new file mode 100644 index 000000000000..eb918451b492 --- /dev/null +++ b/fs/btrfs/inode-map.h @@ -0,0 +1,11 @@ +#ifndef __BTRFS_INODE_MAP +#define __BTRFS_INODE_MAP + +void btrfs_init_free_ino_ctl(struct btrfs_root *root); +void btrfs_unpin_free_ino(struct btrfs_root *root); +void btrfs_return_ino(struct btrfs_root *root, u64 objectid); +int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); + +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); + +#endif diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a4157cfdd533..77dd0a776c83 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -51,6 +51,7 @@ #include "compression.h" #include "locking.h" #include "free-space-cache.h" +#include "inode-map.h" struct btrfs_iget_args { u64 ino; @@ -3809,6 +3810,10 @@ void btrfs_evict_inode(struct inode *inode) BUG_ON(ret); } + if (!(root == root->fs_info->tree_root || + root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) + btrfs_return_ino(root, inode->i_ino); + nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); @@ -4538,6 +4543,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, return ERR_PTR(-ENOMEM); } + /* + * we have to initialize this early, so we can reclaim the inode + * number if we fail afterwards in this function. + */ + inode->i_ino = objectid; + if (dir) { trace_btrfs_inode_request(dir); @@ -4583,7 +4594,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, goto fail; inode_init_owner(inode, dir, mode); - inode->i_ino = objectid; inode_set_bytes(inode, 0); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], @@ -4712,10 +4722,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (!new_valid_dev(rdev)) return -EINVAL; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; - /* * 2 for inode item and ref * 2 for dir items @@ -4727,6 +4733,10 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); @@ -4774,9 +4784,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, u64 objectid; u64 index = 0; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; /* * 2 for inode item and ref * 2 for dir items @@ -4788,6 +4795,10 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); @@ -4902,10 +4913,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) u64 index = 0; unsigned long nr = 1; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; - /* * 2 items for inode and ref * 2 items for dir items @@ -4916,6 +4923,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) return PTR_ERR(trans); btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_fail; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFDIR | mode, @@ -7257,9 +7268,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) return -ENAMETOOLONG; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; /* * 2 items for inode item and ref * 2 items for dir items @@ -7271,6 +7279,10 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f580a3a5d2fc..e1835f8eec93 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -50,6 +50,7 @@ #include "print-tree.h" #include "volumes.h" #include "locking.h" +#include "inode-map.h" /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) @@ -323,8 +324,7 @@ static noinline int create_subvol(struct btrfs_root *root, u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; u64 index = 0; - ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, - 0, &objectid); + ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); if (ret) { dput(parent); return ret; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 58250e09eb05..e6cb89357256 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -30,6 +30,7 @@ #include "btrfs_inode.h" #include "async-thread.h" #include "free-space-cache.h" +#include "inode-map.h" /* * backref_node, mapping_node and tree_block start with this @@ -3897,7 +3898,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, if (IS_ERR(trans)) return ERR_CAST(trans); - err = btrfs_find_free_objectid(trans, root, objectid, &objectid); + err = btrfs_find_free_objectid(root, &objectid); if (err) goto out; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..aef6c81e7101 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -27,6 +27,7 @@ #include "transaction.h" #include "locking.h" #include "tree-log.h" +#include "inode-map.h" #define BTRFS_ROOT_TRANS_TAG 0 @@ -761,7 +762,11 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_orphan_commit_root(trans, root); if (root->commit_root != root->node) { + mutex_lock(&root->fs_commit_mutex); switch_commit_root(root); + btrfs_unpin_free_ino(root); + mutex_unlock(&root->fs_commit_mutex); + btrfs_set_root_node(&root->root_item, root->node); } @@ -930,7 +935,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); + ret = btrfs_find_free_objectid(tree_root, &objectid); if (ret) { pending->error = ret; goto fail; -- cgit v1.2.2 From 0414efae7989a2183fb2cc000ab285c4c2836a00 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:20:14 +0800 Subject: Btrfs: Make the code for reading/writing free space cache generic Extract out block group specific code from lookup_free_space_inode(), create_free_space_inode(), load_free_space_cache() and btrfs_write_out_cache(), so the code can be used to read/write free ino cache. Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 358 +++++++++++++++++++++++++------------------- 1 file changed, 204 insertions(+), 154 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2ce89bfc8815..fcbdcef6ca28 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -33,9 +33,9 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info); -struct inode *lookup_free_space_inode(struct btrfs_root *root, - struct btrfs_block_group_cache - *block_group, struct btrfs_path *path) +static struct inode *__lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_path *path, + u64 offset) { struct btrfs_key key; struct btrfs_key location; @@ -45,15 +45,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, struct inode *inode = NULL; int ret; - spin_lock(&block_group->lock); - if (block_group->inode) - inode = igrab(block_group->inode); - spin_unlock(&block_group->lock); - if (inode) - return inode; - key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -83,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, inode->i_mapping->flags &= ~__GFP_FS; + return inode; +} + +struct inode *lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_block_group_cache + *block_group, struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&block_group->lock); + if (block_group->inode) + inode = igrab(block_group->inode); + spin_unlock(&block_group->lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, + block_group->key.objectid); + if (IS_ERR(inode)) + return inode; + spin_lock(&block_group->lock); if (!root->fs_info->closing) { block_group->inode = igrab(inode); @@ -93,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return inode; } -int create_free_space_inode(struct btrfs_root *root, - struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, - struct btrfs_path *path) +int __create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, u64 ino, u64 offset) { struct btrfs_key key; struct btrfs_disk_key disk_key; struct btrfs_free_space_header *header; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; - u64 objectid; int ret; - ret = btrfs_find_free_objectid(root, &objectid); - if (ret < 0) - return ret; - - ret = btrfs_insert_empty_inode(trans, root, path, objectid); + ret = btrfs_insert_empty_inode(trans, root, path, ino); if (ret) return ret; @@ -130,13 +138,12 @@ int create_free_space_inode(struct btrfs_root *root, BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); btrfs_set_inode_nlink(leaf, inode_item, 1); btrfs_set_inode_transid(leaf, inode_item, trans->transid); - btrfs_set_inode_block_group(leaf, inode_item, - block_group->key.objectid); + btrfs_set_inode_block_group(leaf, inode_item, offset); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(root, path); key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_insert_empty_item(trans, root, path, &key, @@ -156,6 +163,22 @@ int create_free_space_inode(struct btrfs_root *root, return 0; } +int create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + int ret; + u64 ino; + + ret = btrfs_find_free_objectid(root, &ino); + if (ret < 0) + return ret; + + return __create_free_space_inode(root, trans, path, ino, + block_group->key.objectid); +} + int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_path *path, @@ -208,16 +231,13 @@ static int readahead_cache(struct inode *inode) return 0; } -int load_free_space_cache(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group) +int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, + struct btrfs_free_space_ctl *ctl, + struct btrfs_path *path, u64 offset) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; - struct btrfs_root *root = fs_info->tree_root; - struct inode *inode; struct btrfs_free_space_header *header; struct extent_buffer *leaf; struct page *page; - struct btrfs_path *path; u32 *checksums = NULL, *crc; char *disk_crcs = NULL; struct btrfs_key key; @@ -225,76 +245,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, u64 num_entries; u64 num_bitmaps; u64 generation; - u64 used = btrfs_block_group_used(&block_group->item); u32 cur_crc = ~(u32)0; pgoff_t index = 0; unsigned long first_page_offset; int num_checksums; - int ret = 0; - - /* - * If we're unmounting then just return, since this does a search on the - * normal root and not the commit root and we could deadlock. - */ - smp_mb(); - if (fs_info->closing) - return 0; - - /* - * If this block group has been marked to be cleared for one reason or - * another then we can't trust the on disk cache, so just return. - */ - spin_lock(&block_group->lock); - if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { - spin_unlock(&block_group->lock); - return 0; - } - spin_unlock(&block_group->lock); + int ret = 0, ret2; INIT_LIST_HEAD(&bitmaps); - path = btrfs_alloc_path(); - if (!path) - return 0; - - inode = lookup_free_space_inode(root, block_group, path); - if (IS_ERR(inode)) { - btrfs_free_path(path); - return 0; - } - /* Nothing in the space cache, goodbye */ - if (!i_size_read(inode)) { - btrfs_free_path(path); + if (!i_size_read(inode)) goto out; - } key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret) { - btrfs_free_path(path); + if (ret < 0) + goto out; + else if (ret > 0) { + btrfs_release_path(root, path); + ret = 0; goto out; } + ret = -1; + leaf = path->nodes[0]; header = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_free_space_header); num_entries = btrfs_free_space_entries(leaf, header); num_bitmaps = btrfs_free_space_bitmaps(leaf, header); generation = btrfs_free_space_generation(leaf, header); - btrfs_free_path(path); + btrfs_release_path(root, path); if (BTRFS_I(inode)->generation != generation) { printk(KERN_ERR "btrfs: free space inode generation (%llu) did" - " not match free space cache generation (%llu) for " - "block group %llu\n", + " not match free space cache generation (%llu)\n", (unsigned long long)BTRFS_I(inode)->generation, - (unsigned long long)generation, - (unsigned long long)block_group->key.objectid); - goto free_cache; + (unsigned long long)generation); + goto out; } if (!num_entries) @@ -311,10 +302,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, goto out; ret = readahead_cache(inode); - if (ret) { - ret = 0; + if (ret) goto out; - } while (1) { struct btrfs_free_space_entry *entry; @@ -333,10 +322,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, } page = grab_cache_page(inode->i_mapping, index); - if (!page) { - ret = 0; + if (!page) goto free_cache; - } if (!PageUptodate(page)) { btrfs_readpage(NULL, page); @@ -345,9 +332,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, unlock_page(page); page_cache_release(page); printk(KERN_ERR "btrfs: error reading free " - "space cache: %llu\n", - (unsigned long long) - block_group->key.objectid); + "space cache\n"); goto free_cache; } } @@ -360,13 +345,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, gen = addr + (sizeof(u32) * num_checksums); if (*gen != BTRFS_I(inode)->generation) { printk(KERN_ERR "btrfs: space cache generation" - " (%llu) does not match inode (%llu) " - "for block group %llu\n", + " (%llu) does not match inode (%llu)\n", (unsigned long long)*gen, (unsigned long long) - BTRFS_I(inode)->generation, - (unsigned long long) - block_group->key.objectid); + BTRFS_I(inode)->generation); kunmap(page); unlock_page(page); page_cache_release(page); @@ -382,9 +364,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, PAGE_CACHE_SIZE - start_offset); btrfs_csum_final(cur_crc, (char *)&cur_crc); if (cur_crc != *crc) { - printk(KERN_ERR "btrfs: crc mismatch for page %lu in " - "block group %llu\n", index, - (unsigned long long)block_group->key.objectid); + printk(KERN_ERR "btrfs: crc mismatch for page %lu\n", + index); kunmap(page); unlock_page(page); page_cache_release(page); @@ -432,7 +413,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, goto free_cache; } spin_lock(&ctl->tree_lock); - ret = link_free_space(ctl, e); + ret2 = link_free_space(ctl, e); ctl->total_bitmaps++; ctl->op->recalc_thresholds(ctl); spin_unlock(&ctl->tree_lock); @@ -471,42 +452,96 @@ next: index++; } - spin_lock(&ctl->tree_lock); - if (ctl->free_space != (block_group->key.offset - used - - block_group->bytes_super)) { - spin_unlock(&ctl->tree_lock); - printk(KERN_ERR "block group %llu has an wrong amount of free " - "space\n", block_group->key.objectid); - ret = 0; - goto free_cache; - } - spin_unlock(&ctl->tree_lock); - ret = 1; out: kfree(checksums); kfree(disk_crcs); - iput(inode); return ret; - free_cache: - /* This cache is bogus, make sure it gets cleared */ - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_CLEAR; - spin_unlock(&block_group->lock); - btrfs_remove_free_space_cache(block_group); + __btrfs_remove_free_space_cache(ctl); goto out; } -int btrfs_write_out_cache(struct btrfs_root *root, - struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, - struct btrfs_path *path) +int load_free_space_cache(struct btrfs_fs_info *fs_info, + struct btrfs_block_group_cache *block_group) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct btrfs_root *root = fs_info->tree_root; + struct inode *inode; + struct btrfs_path *path; + int ret; + bool matched; + u64 used = btrfs_block_group_used(&block_group->item); + + /* + * If we're unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + /* + * If this block group has been marked to be cleared for one reason or + * another then we can't trust the on disk cache, so just return. + */ + spin_lock(&block_group->lock); + if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) { + btrfs_free_path(path); + return 0; + } + + ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, + path, block_group->key.objectid); + btrfs_free_path(path); + if (ret <= 0) + goto out; + + spin_lock(&ctl->tree_lock); + matched = (ctl->free_space == (block_group->key.offset - used - + block_group->bytes_super)); + spin_unlock(&ctl->tree_lock); + + if (!matched) { + __btrfs_remove_free_space_cache(ctl); + printk(KERN_ERR "block group %llu has an wrong amount of free " + "space\n", block_group->key.objectid); + ret = -1; + } +out: + if (ret < 0) { + /* This cache is bogus, make sure it gets cleared */ + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_CLEAR; + spin_unlock(&block_group->lock); + + printk(KERN_ERR "btrfs: failed to load free space cache " + "for block group %llu\n", block_group->key.objectid); + } + + iput(inode); + return ret; +} + +int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, + struct btrfs_free_space_ctl *ctl, + struct btrfs_block_group_cache *block_group, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, u64 offset) +{ struct btrfs_free_space_header *header; struct extent_buffer *leaf; - struct inode *inode; struct rb_node *node; struct list_head *pos, *n; struct page **pages; @@ -523,35 +558,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; - int ret = 0; + int ret = -1; bool next_page = false; bool out_of_space = false; - root = root->fs_info->tree_root; - INIT_LIST_HEAD(&bitmap_list); - spin_lock(&block_group->lock); - if (block_group->disk_cache_state < BTRFS_DC_SETUP) { - spin_unlock(&block_group->lock); - return 0; - } - spin_unlock(&block_group->lock); - - inode = lookup_free_space_inode(root, block_group, path); - if (IS_ERR(inode)) - return 0; - - if (!i_size_read(inode)) { - iput(inode); - return 0; - } - node = rb_first(&ctl->free_space_offset); - if (!node) { - iput(inode); + if (!node) return 0; - } + + if (!i_size_read(inode)) + return -1; num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; @@ -561,16 +579,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, /* We need a checksum per page. */ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); - if (!crc) { - iput(inode); - return 0; - } + if (!crc) + return -1; pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); if (!pages) { kfree(crc); - iput(inode); - return 0; + return -1; } /* Since the first page has all of our checksums and our generation we @@ -580,7 +595,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); /* Get the cluster for this block_group if it exists */ - if (!list_empty(&block_group->cluster_list)) + if (block_group && !list_empty(&block_group->cluster_list)) cluster = list_entry(block_group->cluster_list.next, struct btrfs_free_cluster, block_group_list); @@ -622,7 +637,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, * When searching for pinned extents, we need to start at our start * offset. */ - start = block_group->key.objectid; + if (block_group) + start = block_group->key.objectid; /* Write out the extent entries */ do { @@ -680,8 +696,9 @@ int btrfs_write_out_cache(struct btrfs_root *root, * We want to add any pinned extents to our free space cache * so we don't leak the space */ - while (!next_page && (start < block_group->key.objectid + - block_group->key.offset)) { + while (block_group && !next_page && + (start < block_group->key.objectid + + block_group->key.offset)) { ret = find_first_extent_bit(unpin, start, &start, &end, EXTENT_DIRTY); if (ret) { @@ -799,12 +816,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, filemap_write_and_wait(inode->i_mapping); key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(trans, root, &key, path, 1, 1); if (ret < 0) { - ret = 0; + ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); @@ -817,8 +834,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || - found_key.offset != block_group->key.objectid) { - ret = 0; + found_key.offset != offset) { + ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, @@ -838,16 +855,49 @@ int btrfs_write_out_cache(struct btrfs_root *root, ret = 1; out_free: - if (ret == 0) { + if (ret != 1) { invalidate_inode_pages2_range(inode->i_mapping, 0, index); - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_ERROR; - spin_unlock(&block_group->lock); BTRFS_I(inode)->generation = 0; } kfree(checksums); kfree(pages); btrfs_update_inode(trans, root, inode); + return ret; +} + +int btrfs_write_out_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct inode *inode; + int ret = 0; + + root = root->fs_info->tree_root; + + spin_lock(&block_group->lock); + if (block_group->disk_cache_state < BTRFS_DC_SETUP) { + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, + path, block_group->key.objectid); + if (ret < 0) { + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_ERROR; + spin_unlock(&block_group->lock); + + printk(KERN_ERR "btrfs: failed to write free space cace " + "for block group %llu\n", block_group->key.objectid); + } + iput(inode); return ret; } -- cgit v1.2.2 From 33345d01522f8152f99dc84a3e7a1a45707f387f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:31:50 +0800 Subject: Btrfs: Always use 64bit inode number There's a potential problem in 32bit system when we exhaust 32bit inode numbers and start to allocate big inode numbers, because btrfs uses inode->i_ino in many places. So here we always use BTRFS_I(inode)->location.objectid, which is an u64 variable. There are 2 exceptions that BTRFS_I(inode)->location.objectid != inode->i_ino: the btree inode (0 vs 1) and empty subvol dirs (256 vs 2), and inode->i_ino will be used in those cases. Another reason to make this change is I'm going to use a special inode to save free ino cache, and the inode number must be > (u64)-256. Signed-off-by: Li Zefan --- fs/btrfs/btrfs_inode.h | 9 +++ fs/btrfs/compression.c | 5 +- fs/btrfs/export.c | 25 ++++--- fs/btrfs/extent-tree.c | 10 +-- fs/btrfs/extent_io.c | 4 +- fs/btrfs/file-item.c | 5 +- fs/btrfs/file.c | 27 +++---- fs/btrfs/inode.c | 197 ++++++++++++++++++++++++++----------------------- fs/btrfs/ioctl.c | 18 ++--- fs/btrfs/relocation.c | 24 +++--- fs/btrfs/transaction.c | 4 +- fs/btrfs/tree-log.c | 54 +++++++------- fs/btrfs/xattr.c | 8 +- 13 files changed, 208 insertions(+), 182 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884ce..8842a4195f91 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -166,6 +166,15 @@ static inline struct btrfs_inode *BTRFS_I(struct inode *inode) return container_of(inode, struct btrfs_inode, vfs_inode); } +static inline u64 btrfs_ino(struct inode *inode) +{ + u64 ino = BTRFS_I(inode)->location.objectid; + + if (ino <= BTRFS_FIRST_FREE_OBJECTID) + ino = inode->i_ino; + return ino; +} + static inline void btrfs_i_size_write(struct inode *inode, u64 size) { i_size_write(inode, size); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 41d1d7c70e29..369d5068ac7a 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode, kunmap_atomic(kaddr, KM_USER0); if (csum != *cb_sum) { - printk(KERN_INFO "btrfs csum failed ino %lu " + printk(KERN_INFO "btrfs csum failed ino %llu " "extent %llu csum %u " - "wanted %u mirror %d\n", inode->i_ino, + "wanted %u mirror %d\n", + (unsigned long long)btrfs_ino(inode), (unsigned long long)disk_start, csum, *cb_sum, cb->mirror_num); ret = -EIO; diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ff27d7a477b2..7fa283e7d306 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -28,7 +28,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, len = BTRFS_FID_SIZE_NON_CONNECTABLE; type = FILEID_BTRFS_WITHOUT_PARENT; - fid->objectid = inode->i_ino; + fid->objectid = btrfs_ino(inode); fid->root_objectid = BTRFS_I(inode)->root->objectid; fid->gen = inode->i_generation; @@ -174,13 +174,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child) if (!path) return ERR_PTR(-ENOMEM); - if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { - key.objectid = dir->i_ino; + key.objectid = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; } @@ -240,6 +240,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, struct btrfs_key key; int name_len; int ret; + u64 ino; if (!dir || !inode) return -EINVAL; @@ -247,19 +248,21 @@ static int btrfs_get_name(struct dentry *parent, char *name, if (!S_ISDIR(dir->i_mode)) return -EINVAL; + ino = btrfs_ino(inode); + path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->leave_spinning = 1; - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = BTRFS_I(inode)->root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { - key.objectid = inode->i_ino; - key.offset = dir->i_ino; + key.objectid = ino; + key.offset = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; } @@ -268,7 +271,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, btrfs_free_path(path); return ret; } else if (ret > 0) { - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (ino == BTRFS_FIRST_FREE_OBJECTID) { path->slots[0]--; } else { btrfs_free_path(path); @@ -277,11 +280,11 @@ static int btrfs_get_name(struct dentry *parent, char *name, } leaf = path->nodes[0]; - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { - rref = btrfs_item_ptr(leaf, path->slots[0], + if (ino == BTRFS_FIRST_FREE_OBJECTID) { + rref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - name_ptr = (unsigned long)(rref + 1); - name_len = btrfs_root_ref_name_len(leaf, rref); + name_ptr = (unsigned long)(rref + 1); + name_len = btrfs_root_ref_name_len(leaf, rref); } else { iref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_ref); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 904eae10ec65..a0e818cb0401 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7009,8 +7009,8 @@ static noinline int get_new_locations(struct inode *reloc_inode, cur_pos = extent_key->objectid - offset; last_byte = extent_key->objectid + extent_key->offset; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, - cur_pos, 0); + ret = btrfs_lookup_file_extent(NULL, root, path, + btrfs_ino(reloc_inode), cur_pos, 0); if (ret < 0) goto out; if (ret > 0) { @@ -7033,7 +7033,7 @@ static noinline int get_new_locations(struct inode *reloc_inode, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.offset != cur_pos || found_key.type != BTRFS_EXTENT_DATA_KEY || - found_key.objectid != reloc_inode->i_ino) + found_key.objectid != btrfs_ino(reloc_inode)) break; fi = btrfs_item_ptr(leaf, path->slots[0], @@ -7179,7 +7179,7 @@ next: break; } - if (inode && key.objectid != inode->i_ino) { + if (inode && key.objectid != btrfs_ino(inode)) { BUG_ON(extent_locked); btrfs_release_path(root, path); mutex_unlock(&inode->i_mutex); @@ -7488,7 +7488,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, continue; if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) continue; - if (!inode || inode->i_ino != key.objectid) { + if (!inode || btrfs_ino(inode) != key.objectid) { iput(inode); inode = btrfs_ilookup(target_root->fs_info->sb, key.objectid, target_root, 1); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5ae0bffaa4d8..41d313a0d098 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3030,7 +3030,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, * because there might be preallocation past i_size */ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, - path, inode->i_ino, -1, 0); + path, btrfs_ino(inode), -1, 0); if (ret < 0) { btrfs_free_path(path); return ret; @@ -3043,7 +3043,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, found_type = btrfs_key_type(&found_key); /* No extents, but there might be delalloc bits */ - if (found_key.objectid != inode->i_ino || + if (found_key.objectid != btrfs_ino(inode) || found_type != BTRFS_EXTENT_DATA_KEY) { /* have to trust i_size as the end */ last = (u64)-1; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..1d9410e39212 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -208,8 +208,9 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, EXTENT_NODATASUM, GFP_NOFS); } else { printk(KERN_INFO "btrfs no csum found " - "for inode %lu start %llu\n", - inode->i_ino, + "for inode %llu start %llu\n", + (unsigned long long) + btrfs_ino(inode), (unsigned long long)offset); } item = NULL; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 75899a01dded..bef020451525 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -298,6 +298,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_path *path; struct btrfs_key key; struct btrfs_key new_key; + u64 ino = btrfs_ino(inode); u64 search_start = start; u64 disk_bytenr = 0; u64 num_bytes = 0; @@ -318,14 +319,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, while (1) { recow = 0; - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, ino, search_start, -1); if (ret < 0) break; if (ret > 0 && path->slots[0] > 0 && search_start == start) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); - if (key.objectid == inode->i_ino && + if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } @@ -346,7 +347,7 @@ next_slot: } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - if (key.objectid > inode->i_ino || + if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) break; @@ -592,6 +593,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int del_slot = 0; int recow; int ret; + u64 ino = btrfs_ino(inode); btrfs_drop_extent_cache(inode, start, end - 1, 0); @@ -600,7 +602,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, again: recow = 0; split = start; - key.objectid = inode->i_ino; + key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = split; @@ -612,8 +614,7 @@ again: leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - BUG_ON(key.objectid != inode->i_ino || - key.type != BTRFS_EXTENT_DATA_KEY); + BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); BUG_ON(btrfs_file_extent_type(leaf, fi) != @@ -630,7 +631,7 @@ again: other_start = 0; other_end = start; if (extent_mergeable(leaf, path->slots[0] - 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { new_key.offset = end; btrfs_set_item_key_safe(trans, root, path, &new_key); @@ -653,7 +654,7 @@ again: other_start = end; other_end = 0; if (extent_mergeable(leaf, path->slots[0] + 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); @@ -702,7 +703,7 @@ again: ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); if (split == start) { @@ -718,7 +719,7 @@ again: other_start = end; other_end = 0; if (extent_mergeable(leaf, path->slots[0] + 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { btrfs_release_path(root, path); @@ -729,13 +730,13 @@ again: del_nr++; ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); } other_start = 0; other_end = start; if (extent_mergeable(leaf, path->slots[0] - 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { btrfs_release_path(root, path); @@ -746,7 +747,7 @@ again: del_nr++; ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); } if (del_nr == 0) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 77dd0a776c83..adec22884a3e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -138,7 +138,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, path->leave_spinning = 1; btrfs_set_trans_block_group(trans, inode); - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); key.offset = start; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(cur_size); @@ -1049,6 +1049,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, int nocow; int check_prev = 1; bool nolock = false; + u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); BUG_ON(!path); @@ -1063,14 +1064,14 @@ static noinline int run_delalloc_nocow(struct inode *inode, cow_start = (u64)-1; cur_offset = start; while (1) { - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, ino, cur_offset, 0); BUG_ON(ret < 0); if (ret > 0 && path->slots[0] > 0 && check_prev) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); - if (found_key.objectid == inode->i_ino && + if (found_key.objectid == ino && found_key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } @@ -1091,7 +1092,7 @@ next_slot: num_bytes = 0; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid > inode->i_ino || + if (found_key.objectid > ino || found_key.type > BTRFS_EXTENT_DATA_KEY || found_key.offset > end) break; @@ -1126,7 +1127,7 @@ next_slot: goto out_check; if (btrfs_extent_readonly(root, disk_bytenr)) goto out_check; - if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + if (btrfs_cross_ref_exist(trans, root, ino, found_key.offset - extent_offset, disk_bytenr)) goto out_check; @@ -1643,7 +1644,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, &hint, 0); BUG_ON(ret); - ins.objectid = inode->i_ino; + ins.objectid = btrfs_ino(inode); ins.offset = file_pos; ins.type = BTRFS_EXTENT_DATA_KEY; ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); @@ -1674,7 +1675,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ins.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_alloc_reserved_file_extent(trans, root, root->root_key.objectid, - inode->i_ino, file_pos, &ins); + btrfs_ino(inode), file_pos, &ins); BUG_ON(ret); btrfs_free_path(path); @@ -2004,8 +2005,9 @@ good: zeroit: if (printk_ratelimit()) { - printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " - "private %llu\n", page->mapping->host->i_ino, + printk(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " + "private %llu\n", + (unsigned long long)btrfs_ino(page->mapping->host), (unsigned long long)start, csum, (unsigned long long)private); } @@ -2243,7 +2245,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) /* insert an orphan item to track this unlinked/truncated file */ if (insert >= 1) { - ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); + ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); BUG_ON(ret); } @@ -2280,7 +2282,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) spin_unlock(&root->orphan_lock); if (trans && delete_item) { - ret = btrfs_del_orphan_item(trans, root, inode->i_ino); + ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); BUG_ON(ret); } @@ -2542,7 +2544,8 @@ static void btrfs_read_locked_inode(struct inode *inode) * try to precache a NULL acl entry for files that don't have * any xattrs or acls */ - maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); + maybe_acls = acls_after_inode_item(leaf, path->slots[0], + btrfs_ino(inode)); if (!maybe_acls) cache_no_acl(inode); @@ -2688,6 +2691,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_dir_item *di; struct btrfs_key key; u64 index; + u64 ino = btrfs_ino(inode); + u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); if (!path) { @@ -2696,7 +2701,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, } path->leave_spinning = 1; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -2713,17 +2718,16 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; btrfs_release_path(root, path); - ret = btrfs_del_inode_ref(trans, root, name, name_len, - inode->i_ino, - dir->i_ino, &index); + ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, + dir_ino, &index); if (ret) { printk(KERN_INFO "btrfs failed to delete reference to %.*s, " - "inode %lu parent %lu\n", name_len, name, - inode->i_ino, dir->i_ino); + "inode %llu parent %llu\n", name_len, name, + (unsigned long long)ino, (unsigned long long)dir_ino); goto err; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -2737,7 +2741,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, - inode, dir->i_ino); + inode, dir_ino); BUG_ON(ret != 0 && ret != -ENOENT); ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, @@ -2815,12 +2819,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, int check_link = 1; int err = -ENOSPC; int ret; + u64 ino = btrfs_ino(inode); + u64 dir_ino = btrfs_ino(dir); trans = btrfs_start_transaction(root, 10); if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) return trans; - if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return ERR_PTR(-ENOSPC); /* check if there is someone else holds reference */ @@ -2879,7 +2885,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, if (ret == 0 && S_ISREG(inode->i_mode)) { ret = btrfs_lookup_file_extent(trans, root, path, - inode->i_ino, (u64)-1, 0); + ino, (u64)-1, 0); if (ret < 0) { err = ret; goto out; @@ -2895,7 +2901,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, goto out; } - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2912,7 +2918,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, ref = btrfs_lookup_inode_ref(trans, root, path, dentry->d_name.name, dentry->d_name.len, - inode->i_ino, dir->i_ino, 0); + ino, dir_ino, 0); if (IS_ERR(ref)) { err = PTR_ERR(ref); goto out; @@ -2923,7 +2929,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, index = btrfs_inode_ref_index(path->nodes[0], ref); btrfs_release_path(root, path); - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2998,12 +3004,13 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, struct btrfs_key key; u64 index; int ret; + u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, name_len, -1); BUG_ON(!di || IS_ERR(di)); @@ -3016,10 +3023,10 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, - dir->i_ino, &index, name, name_len); + dir_ino, &index, name, name_len); if (ret < 0) { BUG_ON(ret != -ENOENT); - di = btrfs_search_dir_index_item(root, path, dir->i_ino, + di = btrfs_search_dir_index_item(root, path, dir_ino, name, name_len); BUG_ON(!di || IS_ERR(di)); @@ -3029,7 +3036,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, index = key.offset; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, name, name_len, -1); BUG_ON(!di || IS_ERR(di)); @@ -3058,7 +3065,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) unsigned long nr = 0; if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || - inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) return -ENOTEMPTY; trans = __unlink_start_trans(dir, dentry); @@ -3067,7 +3074,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) btrfs_set_trans_block_group(trans, dir); - if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { + if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { err = btrfs_unlink_subvol(trans, root, dir, BTRFS_I(inode)->location.objectid, dentry->d_name.name, @@ -3299,6 +3306,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int encoding; int ret; int err = 0; + u64 ino = btrfs_ino(inode); BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); @@ -3309,7 +3317,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, BUG_ON(!path); path->reada = -1; - key.objectid = inode->i_ino; + key.objectid = ino; key.offset = (u64)-1; key.type = (u8)-1; @@ -3337,7 +3345,7 @@ search_again: found_type = btrfs_key_type(&found_key); encoding = 0; - if (found_key.objectid != inode->i_ino) + if (found_key.objectid != ino) break; if (found_type < min_type) @@ -3456,7 +3464,7 @@ delete: ret = btrfs_free_extent(trans, root, extent_start, extent_num_bytes, 0, btrfs_header_owner(leaf), - inode->i_ino, extent_offset); + ino, extent_offset); BUG_ON(ret); } @@ -3655,7 +3663,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) break; err = btrfs_insert_file_extent(trans, root, - inode->i_ino, cur_offset, 0, + btrfs_ino(inode), cur_offset, 0, 0, hole_size, 0, hole_size, 0, 0, 0); if (err) @@ -3812,7 +3820,7 @@ void btrfs_evict_inode(struct inode *inode) if (!(root == root->fs_info->tree_root || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) - btrfs_return_ino(root, inode->i_ino); + btrfs_return_ino(root, btrfs_ino(inode)); nr = trans->blocks_used; btrfs_end_transaction(trans, root); @@ -3839,7 +3847,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, path = btrfs_alloc_path(); BUG_ON(!path); - di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, + di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, namelen, 0); if (IS_ERR(di)) ret = PTR_ERR(di); @@ -3892,7 +3900,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino || + if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) goto out; @@ -3931,6 +3939,7 @@ static void inode_tree_add(struct inode *inode) struct btrfs_inode *entry; struct rb_node **p; struct rb_node *parent; + u64 ino = btrfs_ino(inode); again: p = &root->inode_tree.rb_node; parent = NULL; @@ -3943,9 +3952,9 @@ again: parent = *p; entry = rb_entry(parent, struct btrfs_inode, rb_node); - if (inode->i_ino < entry->vfs_inode.i_ino) + if (ino < btrfs_ino(&entry->vfs_inode)) p = &parent->rb_left; - else if (inode->i_ino > entry->vfs_inode.i_ino) + else if (ino > btrfs_ino(&entry->vfs_inode)) p = &parent->rb_right; else { WARN_ON(!(entry->vfs_inode.i_state & @@ -4009,9 +4018,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < entry->vfs_inode.i_ino) + if (objectid < btrfs_ino(&entry->vfs_inode)) node = node->rb_left; - else if (objectid > entry->vfs_inode.i_ino) + else if (objectid > btrfs_ino(&entry->vfs_inode)) node = node->rb_right; else break; @@ -4019,7 +4028,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= entry->vfs_inode.i_ino) { + if (objectid <= btrfs_ino(&entry->vfs_inode)) { node = prev; break; } @@ -4028,7 +4037,7 @@ again: } while (node) { entry = rb_entry(node, struct btrfs_inode, rb_node); - objectid = entry->vfs_inode.i_ino + 1; + objectid = btrfs_ino(&entry->vfs_inode) + 1; inode = igrab(&entry->vfs_inode); if (inode) { spin_unlock(&root->inode_lock); @@ -4066,7 +4075,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; - return args->ino == inode->i_ino && + return args->ino == btrfs_ino(inode) && args->root == BTRFS_I(inode)->root; } @@ -4244,9 +4253,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, /* special case for "." */ if (filp->f_pos == 0) { - over = filldir(dirent, ".", 1, - 1, inode->i_ino, - DT_DIR); + over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); if (over) return 0; filp->f_pos = 1; @@ -4265,7 +4272,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) @@ -4420,8 +4427,9 @@ void btrfs_dirty_inode(struct inode *inode) if (IS_ERR(trans)) { if (printk_ratelimit()) { printk(KERN_ERR "btrfs: fail to " - "dirty inode %lu error %ld\n", - inode->i_ino, PTR_ERR(trans)); + "dirty inode %llu error %ld\n", + (unsigned long long)btrfs_ino(inode), + PTR_ERR(trans)); } return; } @@ -4431,8 +4439,9 @@ void btrfs_dirty_inode(struct inode *inode) if (ret) { if (printk_ratelimit()) { printk(KERN_ERR "btrfs: fail to " - "dirty inode %lu error %d\n", - inode->i_ino, ret); + "dirty inode %llu error %d\n", + (unsigned long long)btrfs_ino(inode), + ret); } } } @@ -4452,7 +4461,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) struct extent_buffer *leaf; int ret; - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); key.offset = (u64)-1; @@ -4484,7 +4493,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != inode->i_ino || + if (found_key.objectid != btrfs_ino(inode) || btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { BTRFS_I(inode)->index_cnt = 2; goto out; @@ -4657,29 +4666,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, int ret = 0; struct btrfs_key key; struct btrfs_root *root = BTRFS_I(parent_inode)->root; + u64 ino = btrfs_ino(inode); + u64 parent_ino = btrfs_ino(parent_inode); - if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); } else { - key.objectid = inode->i_ino; + key.objectid = ino; btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); key.offset = 0; } - if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, key.objectid, root->root_key.objectid, - parent_inode->i_ino, - index, name, name_len); + parent_ino, index, name, name_len); } else if (add_backref) { - ret = btrfs_insert_inode_ref(trans, root, - name, name_len, inode->i_ino, - parent_inode->i_ino, index); + ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, + parent_ino, index); } if (ret == 0) { ret = btrfs_insert_dir_item(trans, root, name, name_len, - parent_inode->i_ino, &key, + parent_ino, &key, btrfs_inode_type(inode), index); BUG_ON(ret); @@ -4738,7 +4747,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, mode, &index); err = PTR_ERR(inode); if (IS_ERR(inode)) @@ -4800,7 +4809,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, mode, &index); err = PTR_ERR(inode); if (IS_ERR(inode)) @@ -4928,7 +4937,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) goto out_fail; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, S_IFDIR | mode, &index); if (IS_ERR(inode)) { @@ -5049,7 +5058,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, u64 bytenr; u64 extent_start = 0; u64 extent_end = 0; - u64 objectid = inode->i_ino; + u64 objectid = btrfs_ino(inode); u32 found_type; struct btrfs_path *path = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -5557,7 +5566,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), offset, 0); if (ret < 0) goto out; @@ -5574,7 +5583,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, ret = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.objectid != inode->i_ino || + if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) { /* not our file or wrong item type, must cow */ goto out; @@ -5608,7 +5617,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, * look for other files referencing this extent, if we * find any we must cow */ - if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), key.offset - backref_offset, disk_bytenr)) goto out; @@ -5798,9 +5807,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) flush_dcache_page(bvec->bv_page); if (csum != *private) { - printk(KERN_ERR "btrfs csum failed ino %lu off" + printk(KERN_ERR "btrfs csum failed ino %llu off" " %llu csum %u private %u\n", - inode->i_ino, (unsigned long long)start, + (unsigned long long)btrfs_ino(inode), + (unsigned long long)start, csum, *private); err = -EIO; } @@ -5947,9 +5957,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) struct btrfs_dio_private *dip = bio->bi_private; if (err) { - printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " + printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " "sector %#Lx len %u err no %d\n", - dip->inode->i_ino, bio->bi_rw, + (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, (unsigned long long)bio->bi_sector, bio->bi_size, err); dip->errors = 1; @@ -6859,8 +6869,8 @@ void btrfs_destroy_inode(struct inode *inode) spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { - printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", - inode->i_ino); + printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", + (unsigned long long)btrfs_ino(inode)); list_del_init(&BTRFS_I(inode)->i_orphan); } spin_unlock(&root->orphan_lock); @@ -6999,16 +7009,17 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, u64 index = 0; u64 root_objectid; int ret; + u64 old_ino = btrfs_ino(old_inode); - if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return -EPERM; /* we only allow rename subvolume link between subvolumes */ - if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) + if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) return -EXDEV; - if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || - (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) + if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || + (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) return -ENOTEMPTY; if (S_ISDIR(old_inode->i_mode) && new_inode && @@ -7024,7 +7035,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, filemap_flush(old_inode->i_mapping); /* close the racy window with snapshot create/destroy ioctl */ - if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) down_read(&root->fs_info->subvol_sem); /* * We want to reserve the absolute worst case amount of items. So if @@ -7049,15 +7060,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (ret) goto out_fail; - if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { /* force full log commit if subvolume involved. */ root->fs_info->last_trans_log_full_commit = trans->transid; } else { ret = btrfs_insert_inode_ref(trans, dest, new_dentry->d_name.name, new_dentry->d_name.len, - old_inode->i_ino, - new_dir->i_ino, index); + old_ino, + btrfs_ino(new_dir), index); if (ret) goto out_fail; /* @@ -7073,10 +7084,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * make sure the inode gets flushed if it is replacing * something. */ - if (new_inode && new_inode->i_size && - old_inode && S_ISREG(old_inode->i_mode)) { + if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) btrfs_add_ordered_operation(trans, root, old_inode); - } old_dir->i_ctime = old_dir->i_mtime = ctime; new_dir->i_ctime = new_dir->i_mtime = ctime; @@ -7085,7 +7094,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (old_dentry->d_parent != new_dentry->d_parent) btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); - if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, old_dentry->d_name.name, @@ -7102,7 +7111,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_inode) { new_inode->i_ctime = CURRENT_TIME; - if (unlikely(new_inode->i_ino == + if (unlikely(btrfs_ino(new_inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { root_objectid = BTRFS_I(new_inode)->location.objectid; ret = btrfs_unlink_subvol(trans, dest, new_dir, @@ -7130,7 +7139,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len, 0, index); BUG_ON(ret); - if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { struct dentry *parent = dget_parent(new_dentry); btrfs_log_new_name(trans, old_inode, old_dir, parent); dput(parent); @@ -7139,7 +7148,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, out_fail: btrfs_end_transaction_throttle(trans, root); out_notrans: - if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&root->fs_info->subvol_sem); return ret; @@ -7284,7 +7293,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, &index); err = PTR_ERR(inode); @@ -7315,7 +7324,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, path = btrfs_alloc_path(); BUG_ON(!path); - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); key.offset = 0; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(name_len); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e1835f8eec93..01dccb4a70bb 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -416,7 +416,7 @@ static noinline int create_subvol(struct btrfs_root *root, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir->i_ino, &key, + name, namelen, btrfs_ino(dir), &key, BTRFS_FT_DIR, index); if (ret) goto fail; @@ -427,7 +427,7 @@ static noinline int create_subvol(struct btrfs_root *root, ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, - dir->i_ino, index, name, namelen); + btrfs_ino(dir), index, name, namelen); BUG_ON(ret); @@ -1123,7 +1123,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file, int ret = 0; u64 flags = 0; - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; down_read(&root->fs_info->subvol_sem); @@ -1150,7 +1150,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; if (copy_from_user(&flags, arg, sizeof(flags))) @@ -1633,7 +1633,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, goto out_dput; } - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { err = -EINVAL; goto out_dput; } @@ -1919,7 +1919,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, } /* clone data */ - key.objectid = src->i_ino; + key.objectid = btrfs_ino(src); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; @@ -1946,7 +1946,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_item_key_to_cpu(leaf, &key, slot); if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || - key.objectid != src->i_ino) + key.objectid != btrfs_ino(src)) break; if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { @@ -1989,7 +1989,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, goto next; memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = inode->i_ino; + new_key.objectid = btrfs_ino(inode); if (off <= key.offset) new_key.offset = key.offset + destoff - off; else @@ -2043,7 +2043,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ret = btrfs_inc_extent_ref(trans, root, disko, diskl, 0, root->root_key.objectid, - inode->i_ino, + btrfs_ino(inode), new_key.offset - datao); BUG_ON(ret); } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e6cb89357256..7b75e0c8ef8d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1410,9 +1410,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < entry->vfs_inode.i_ino) + if (objectid < btrfs_ino(&entry->vfs_inode)) node = node->rb_left; - else if (objectid > entry->vfs_inode.i_ino) + else if (objectid > btrfs_ino(&entry->vfs_inode)) node = node->rb_right; else break; @@ -1420,7 +1420,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= entry->vfs_inode.i_ino) { + if (objectid <= btrfs_ino(&entry->vfs_inode)) { node = prev; break; } @@ -1435,7 +1435,7 @@ again: return inode; } - objectid = entry->vfs_inode.i_ino + 1; + objectid = btrfs_ino(&entry->vfs_inode) + 1; if (cond_resched_lock(&root->inode_lock)) goto again; @@ -1471,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, return -ENOMEM; bytenr -= BTRFS_I(reloc_inode)->index_cnt; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, + ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), bytenr, 0); if (ret < 0) goto out; @@ -1559,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans, if (first) { inode = find_next_inode(root, key.objectid); first = 0; - } else if (inode && inode->i_ino < key.objectid) { + } else if (inode && btrfs_ino(inode) < key.objectid) { btrfs_add_delayed_iput(inode); inode = find_next_inode(root, key.objectid); } - if (inode && inode->i_ino == key.objectid) { + if (inode && btrfs_ino(inode) == key.objectid) { end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); WARN_ON(!IS_ALIGNED(key.offset, @@ -1894,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, struct inode *inode = NULL; u64 objectid; u64 start, end; + u64 ino; objectid = min_key->objectid; while (1) { @@ -1906,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root, inode = find_next_inode(root, objectid); if (!inode) break; + ino = btrfs_ino(inode); - if (inode->i_ino > max_key->objectid) { + if (ino > max_key->objectid) { iput(inode); break; } - objectid = inode->i_ino + 1; + objectid = ino + 1; if (!S_ISREG(inode->i_mode)) continue; - if (unlikely(min_key->objectid == inode->i_ino)) { + if (unlikely(min_key->objectid == ino)) { if (min_key->type > BTRFS_EXTENT_DATA_KEY) continue; if (min_key->type < BTRFS_EXTENT_DATA_KEY) @@ -1929,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, start = 0; } - if (unlikely(max_key->objectid == inode->i_ino)) { + if (unlikely(max_key->objectid == ino)) { if (max_key->type < BTRFS_EXTENT_DATA_KEY) continue; if (max_key->type > BTRFS_EXTENT_DATA_KEY) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index aef6c81e7101..f4c1184b7f1a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -972,7 +972,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode->i_ino, &key, + btrfs_ino(parent_inode), &key, BTRFS_FT_DIR, index); BUG_ON(ret); @@ -1014,7 +1014,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, */ ret = btrfs_add_root_ref(trans, tree_root, objectid, parent_root->root_key.objectid, - parent_inode->i_ino, index, + btrfs_ino(parent_inode), index, dentry->d_name.name, dentry->d_name.len); BUG_ON(ret); dput(parent); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c50271ad3157..4323dc68d6cd 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -519,7 +519,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, * file. This must be done before the btrfs_drop_extents run * so we don't try to drop this extent. */ - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), start, 0); if (ret == 0 && @@ -832,7 +832,7 @@ again: read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); /* if we already have a perfect match, we're done */ - if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, + if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), btrfs_inode_ref_index(eb, ref), name, namelen)) { goto out; @@ -960,8 +960,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, unsigned long ptr; unsigned long ptr_end; int name_len; + u64 ino = btrfs_ino(inode); - key.objectid = inode->i_ino; + key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; @@ -980,7 +981,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid != inode->i_ino || + if (key.objectid != ino || key.type != BTRFS_INODE_REF_KEY) break; ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); @@ -1011,10 +1012,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, if (inode->i_nlink == 0) { if (S_ISDIR(inode->i_mode)) { ret = replay_dir_deletes(trans, root, NULL, path, - inode->i_ino, 1); + ino, 1); BUG_ON(ret); } - ret = insert_orphan_item(trans, root, inode->i_ino); + ret = insert_orphan_item(trans, root, ino); BUG_ON(ret); } btrfs_free_path(path); @@ -2197,6 +2198,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, int ret; int err = 0; int bytes_del = 0; + u64 dir_ino = btrfs_ino(dir); if (BTRFS_I(dir)->logged_trans < trans->transid) return 0; @@ -2212,7 +2214,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, log, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2224,7 +2226,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, BUG_ON(ret); } btrfs_release_path(log, path); - di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, index, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2242,7 +2244,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, if (bytes_del) { struct btrfs_key key; - key.objectid = dir->i_ino; + key.objectid = dir_ino; key.offset = 0; key.type = BTRFS_INODE_ITEM_KEY; btrfs_release_path(log, path); @@ -2300,7 +2302,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, log = root->log_root; mutex_lock(&BTRFS_I(inode)->log_mutex); - ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, + ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), dirid, &index); mutex_unlock(&BTRFS_I(inode)->log_mutex); if (ret == -ENOSPC) { @@ -2366,13 +2368,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, int nritems; u64 first_offset = min_offset; u64 last_offset = (u64)-1; + u64 ino = btrfs_ino(inode); log = root->log_root; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; max_key.offset = (u64)-1; max_key.type = key_type; - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = key_type; min_key.offset = min_offset; @@ -2385,9 +2388,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, * we didn't find anything from this transaction, see if there * is anything at all */ - if (ret != 0 || min_key.objectid != inode->i_ino || - min_key.type != key_type) { - min_key.objectid = inode->i_ino; + if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { + min_key.objectid = ino; min_key.type = key_type; min_key.offset = (u64)-1; btrfs_release_path(root, path); @@ -2396,7 +2398,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); return ret; } - ret = btrfs_previous_item(root, path, inode->i_ino, key_type); + ret = btrfs_previous_item(root, path, ino, key_type); /* if ret == 0 there are items for this type, * create a range to tell us the last key of this type. @@ -2414,7 +2416,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, } /* go backward to find any previous key */ - ret = btrfs_previous_item(root, path, inode->i_ino, key_type); + ret = btrfs_previous_item(root, path, ino, key_type); if (ret == 0) { struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); @@ -2449,8 +2451,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, for (i = path->slots[0]; i < nritems; i++) { btrfs_item_key_to_cpu(src, &min_key, i); - if (min_key.objectid != inode->i_ino || - min_key.type != key_type) + if (min_key.objectid != ino || min_key.type != key_type) goto done; ret = overwrite_item(trans, log, dst_path, src, i, &min_key); @@ -2471,7 +2472,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, goto done; } btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); - if (tmp.objectid != inode->i_ino || tmp.type != key_type) { + if (tmp.objectid != ino || tmp.type != key_type) { last_offset = (u64)-1; goto done; } @@ -2497,8 +2498,7 @@ done: * is valid */ ret = insert_dir_log_key(trans, log, path, key_type, - inode->i_ino, first_offset, - last_offset); + ino, first_offset, last_offset); if (ret) err = ret; } @@ -2742,6 +2742,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, int nritems; int ins_start_slot = 0; int ins_nr; + u64 ino = btrfs_ino(inode); log = root->log_root; @@ -2754,11 +2755,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, return -ENOMEM; } - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = BTRFS_INODE_ITEM_KEY; min_key.offset = 0; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; /* today the code can only do partial logging of directories */ if (!S_ISDIR(inode->i_mode)) @@ -2781,8 +2782,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (inode_only == LOG_INODE_EXISTS) max_key_type = BTRFS_XATTR_ITEM_KEY; - ret = drop_objectid_items(trans, log, path, - inode->i_ino, max_key_type); + ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); } @@ -2800,7 +2800,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, break; again: /* note, ins_nr might be > 0 here, cleanup outside the loop */ - if (min_key.objectid != inode->i_ino) + if (min_key.objectid != ino) break; if (min_key.type > max_key.type) break; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 07b9bc350d5d..a8af771fc60c 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name, return -ENOMEM; /* lookup the xattr by name */ - di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, + di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, strlen(name), 0); if (!di) { ret = -ENODATA; @@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, return -ENOMEM; /* first lets see if we already have this xattr */ - di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, + di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, strlen(name), -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, } /* ok we have to create a completely new xattr */ - ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino, + ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); BUG_ON(ret); out: @@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = 0; -- cgit v1.2.2 From 82d5902d9c681be37ffa9d70482907f9f0b7ec1f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:33:24 +0800 Subject: Btrfs: Support reading/writing on disk free ino cache This is similar to block group caching. We dedicate a special inode in fs tree to save free ino cache. At the very first time we create/delete a file after mount, the free ino cache will be loaded from disk into memory. When the fs tree is commited, the cache will be written back to disk. To keep compatibility, we check the root generation against the generation of the special inode when loading the cache, so the loading will fail if the btrfs filesystem was mounted in an older kernel before. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 7 ++++ fs/btrfs/disk-io.c | 1 + fs/btrfs/extent-tree.c | 3 +- fs/btrfs/free-space-cache.c | 97 ++++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/free-space-cache.h | 11 +++++ fs/btrfs/inode-map.c | 87 ++++++++++++++++++++++++++++++++++++++++ fs/btrfs/inode-map.h | 2 + fs/btrfs/inode.c | 45 +++++++++++++-------- fs/btrfs/transaction.c | 2 + 9 files changed, 236 insertions(+), 19 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index c96a4e4c5566..b20082e27a9f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -105,6 +105,12 @@ struct btrfs_ordered_sum; /* For storing free space cache */ #define BTRFS_FREE_SPACE_OBJECTID -11ULL +/* + * The inode number assigned to the special inode for sotring + * free ino cache + */ +#define BTRFS_FREE_INO_OBJECTID -12ULL + /* dummy objectid represents multiple objectids */ #define BTRFS_MULTIPLE_OBJECTIDS -255ULL @@ -1110,6 +1116,7 @@ struct btrfs_root { wait_queue_head_t cache_wait; struct btrfs_free_space_ctl *free_ino_pinned; u64 cache_progress; + struct inode *cache_inode; struct mutex log_mutex; wait_queue_head_t log_writer_wait; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d02683b1ee16..4f12c30a5470 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2505,6 +2505,7 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) static void free_fs_root(struct btrfs_root *root) { + iput(root->cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); if (root->anon_super.s_dev) { down_write(&root->anon_super.s_umount); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a0e818cb0401..95ce8da63b28 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3145,7 +3145,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - if (root == root->fs_info->tree_root) { + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) { alloc_chunk = 0; committed = 1; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index fcbdcef6ca28..7d8b6b643403 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -209,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, return ret; } - return btrfs_update_inode(trans, root, inode); + ret = btrfs_update_inode(trans, root, inode); + return ret; } static int readahead_cache(struct inode *inode) @@ -525,6 +526,7 @@ out: spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_CLEAR; spin_unlock(&block_group->lock); + ret = 0; printk(KERN_ERR "btrfs: failed to load free space cache " "for block group %llu\n", block_group->key.objectid); @@ -893,6 +895,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&block_group->lock); + ret = 0; printk(KERN_ERR "btrfs: failed to write free space cace " "for block group %llu\n", block_group->key.objectid); @@ -2458,3 +2461,95 @@ out: return ino; } + +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&root->cache_lock); + if (root->cache_inode) + inode = igrab(root->cache_inode); + spin_unlock(&root->cache_lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, 0); + if (IS_ERR(inode)) + return inode; + + spin_lock(&root->cache_lock); + if (!root->fs_info->closing) + root->cache_inode = igrab(inode); + spin_unlock(&root->cache_lock); + + return inode; +} + +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + return __create_free_space_inode(root, trans, path, + BTRFS_FREE_INO_OBJECTID, 0); +} + +int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + int ret = 0; + u64 root_gen = btrfs_root_generation(&root->root_item); + + /* + * If we're unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + goto out; + + if (root_gen != BTRFS_I(inode)->generation) + goto out_put; + + ret = __load_free_space_cache(root, inode, ctl, path, 0); + + if (ret < 0) + printk(KERN_ERR "btrfs: failed to load free ino cache for " + "root %llu\n", root->root_key.objectid); +out_put: + iput(inode); +out: + btrfs_free_path(path); + return ret; +} + +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct inode *inode; + int ret; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); + if (ret < 0) + printk(KERN_ERR "btrfs: failed to write free ino cache " + "for root %llu\n", root->root_key.objectid); + + iput(inode); + return ret; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index af06e6b6ceaa..8f2613f779ed 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -65,6 +65,17 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path); +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); +int load_free_ino_cache(struct btrfs_fs_info *fs_info, + struct btrfs_root *root); +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); + void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, u64 bytenr, u64 size); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 5be62df90c4f..7967e85c72f5 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -137,6 +137,7 @@ out: static void start_caching(struct btrfs_root *root) { struct task_struct *tsk; + int ret; spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { @@ -147,6 +148,14 @@ static void start_caching(struct btrfs_root *root) root->cached = BTRFS_CACHE_STARTED; spin_unlock(&root->cache_lock); + ret = load_free_ino_cache(root->fs_info, root); + if (ret == 1) { + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + return; + } + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", root->root_key.objectid); BUG_ON(IS_ERR(tsk)); @@ -352,6 +361,84 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root) pinned->op = &pinned_free_ino_op; } +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + u64 alloc_hint = 0; + int ret; + int prealloc; + bool retry = false; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; +again: + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { + ret = PTR_ERR(inode); + goto out; + } + + if (IS_ERR(inode)) { + BUG_ON(retry); + retry = true; + + ret = create_free_ino_inode(root, trans, path); + if (ret) + goto out; + goto again; + } + + BTRFS_I(inode)->generation = 0; + ret = btrfs_update_inode(trans, root, inode); + WARN_ON(ret); + + if (i_size_read(inode) > 0) { + ret = btrfs_truncate_free_space_cache(root, trans, path, inode); + if (ret) + goto out_put; + } + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_FINISHED) { + ret = -1; + spin_unlock(&root->cache_lock); + goto out_put; + } + spin_unlock(&root->cache_lock); + + spin_lock(&ctl->tree_lock); + prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; + prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); + prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; + spin_unlock(&ctl->tree_lock); + + /* Just to make sure we have enough space */ + prealloc += 8 * PAGE_CACHE_SIZE; + + ret = btrfs_check_data_free_space(inode, prealloc); + if (ret) + goto out_put; + + ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, + prealloc, prealloc, &alloc_hint); + if (ret) + goto out_put; + btrfs_free_reserved_data_space(inode, prealloc); + +out_put: + iput(inode); +out: + if (ret == 0) + ret = btrfs_write_out_ino_cache(root, trans, path); + + btrfs_free_path(path); + return ret; +} + static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) { struct btrfs_path *path; diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h index eb918451b492..ddb347bfee23 100644 --- a/fs/btrfs/inode-map.h +++ b/fs/btrfs/inode-map.h @@ -5,6 +5,8 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root); void btrfs_unpin_free_ino(struct btrfs_root *root); void btrfs_return_ino(struct btrfs_root *root, u64 objectid); int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans); int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index adec22884a3e..b78d3ab789ca 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -745,6 +745,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, return alloc_hint; } +static inline bool is_free_space_inode(struct btrfs_root *root, + struct inode *inode) +{ + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) + return true; + return false; +} + /* * when extent_io.c finds a delayed allocation range in the file, * the call backs end up in this code. The basic idea is to @@ -777,7 +786,7 @@ static noinline int cow_file_range(struct inode *inode, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; - BUG_ON(root == root->fs_info->tree_root); + BUG_ON(is_free_space_inode(root, inode)); trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); @@ -1048,17 +1057,18 @@ static noinline int run_delalloc_nocow(struct inode *inode, int type; int nocow; int check_prev = 1; - bool nolock = false; + bool nolock; u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); BUG_ON(!path); - if (root == root->fs_info->tree_root) { - nolock = true; + + nolock = is_free_space_inode(root, inode); + + if (nolock) trans = btrfs_join_transaction_nolock(root, 1); - } else { + else trans = btrfs_join_transaction(root, 1); - } BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; @@ -1316,8 +1326,7 @@ static int btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid != - BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1350,8 +1359,7 @@ static int btrfs_clear_bit_hook(struct inode *inode, if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid != - BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1458,7 +1466,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - if (root == root->fs_info->tree_root) + if (is_free_space_inode(root, inode)) ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); else ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); @@ -1701,7 +1709,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_state *cached_state = NULL; int compress_type = 0; int ret; - bool nolock = false; + bool nolock; ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1); @@ -1709,7 +1717,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) return 0; BUG_ON(!ordered_extent); - nolock = (root == root->fs_info->tree_root); + nolock = is_free_space_inode(root, inode); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); @@ -3473,7 +3481,9 @@ delete: if (path->slots[0] == 0 || path->slots[0] != pending_del_slot) { - if (root->ref_cows) { + if (root->ref_cows && + BTRFS_I(inode)->location.objectid != + BTRFS_FREE_INO_OBJECTID) { err = -EAGAIN; goto out; } @@ -3765,7 +3775,7 @@ void btrfs_evict_inode(struct inode *inode) truncate_inode_pages(&inode->i_data, 0); if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || - root == root->fs_info->tree_root)) + is_free_space_inode(root, inode))) goto no_delete; if (is_bad_inode(inode)) { @@ -4382,7 +4392,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; smp_mb(); - nolock = (root->fs_info->closing && root == root->fs_info->tree_root); + if (root->fs_info->closing && is_free_space_inode(root, inode)) + nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { if (nolock) @@ -6900,7 +6911,7 @@ int btrfs_drop_inode(struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; if (btrfs_root_refs(&root->root_item) == 0 && - root != root->fs_info->tree_root) + !is_free_space_inode(root, inode)) return 1; else return generic_drop_inode(inode); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f4c1184b7f1a..4d1dbcbbaf41 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -761,6 +761,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_update_reloc_root(trans, root); btrfs_orphan_commit_root(trans, root); + btrfs_save_ino_cache(root, trans); + if (root->commit_root != root->node) { mutex_lock(&root->fs_commit_mutex); switch_commit_root(root); -- cgit v1.2.2 From a62f44a5f47ce45e524b55f91542dc386c6de7ef Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 25 Apr 2011 19:43:51 -0400 Subject: Btrfs: fix missing mutex_unlock in btrfs_del_dir_entries_in_log() It is necessary to unlock mutex_lock before it return an error when btrfs_alloc_path() fails. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c50271ad3157..f997ec0c1ba4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2209,8 +2209,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; + if (!path) { + err = -ENOMEM; + goto out_unlock; + } di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, name, name_len, -1); @@ -2271,6 +2273,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, } fail: btrfs_free_path(path); +out_unlock: mutex_unlock(&BTRFS_I(dir)->log_mutex); if (ret == -ENOSPC) { root->fs_info->last_trans_log_full_commit = trans->transid; -- cgit v1.2.2 From 43e817a1fdda17f3357602ed7964c248c8c53ae0 Mon Sep 17 00:00:00 2001 From: Itaru Kitayama Date: Mon, 25 Apr 2011 19:43:51 -0400 Subject: btrfs: fix wrong allocating flag when reading page the space cache use extent_readpages() to read free space information, so we can not use GFP_KERNEL flag to allocate memory, or it may lead to deadlock. Signed-off-by: Itaru Kitayama Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5ae0bffaa4d8..0d1196d6f786 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2681,7 +2681,7 @@ int extent_readpages(struct extent_io_tree *tree, prefetchw(&page->flags); list_del(&page->lru); if (!add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + page->index, GFP_NOFS)) { __extent_read_full_page(tree, page, get_extent, &bio, 0, &bio_flags); } -- cgit v1.2.2 From 8d413713ca744fa00cf4e05d4054d80727b84789 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 25 Apr 2011 19:43:52 -0400 Subject: Btrfs: check return value of kmalloc() The check on the return value of kmalloc() is added to some places. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ++++ fs/btrfs/inode.c | 3 +++ 2 files changed, 7 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 31f33ba56fe8..cd52f7f556ef 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8059,6 +8059,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, u64 group_start = group->key.objectid; new_extents = kmalloc(sizeof(*new_extents), GFP_NOFS); + if (!new_extents) { + ret = -ENOMEM; + goto out; + } nr_extents = 1; ret = get_new_locations(reloc_inode, extent_key, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a4157cfdd533..c718d274a352 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -953,6 +953,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, 1, 0, NULL, GFP_NOFS); while (start < end) { async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); + BUG_ON(!async_cow); async_cow->inode = inode; async_cow->root = root; async_cow->locked_page = locked_page; @@ -5001,6 +5002,8 @@ static noinline int uncompress_inline(struct btrfs_path *path, inline_size = btrfs_file_extent_inline_item_len(leaf, btrfs_item_nr(leaf, path->slots[0])); tmp = kmalloc(inline_size, GFP_NOFS); + if (!tmp) + return -ENOMEM; ptr = btrfs_file_extent_inline_start(item); read_extent_buffer(leaf, tmp, ptr, inline_size); -- cgit v1.2.2 From cfece4db110dacfd6b4b87b912c59e77e6846fc0 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 25 Apr 2011 19:43:52 -0400 Subject: btrfs: add missing spin_unlock to a rare exit path Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ef6865c17cd6..fe5aec9b3924 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2903,6 +2903,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, spin_lock(&delayed_refs->lock); if (delayed_refs->num_entries == 0) { + spin_unlock(&delayed_refs->lock); printk(KERN_INFO "delayed_refs has NO entry\n"); return ret; } -- cgit v1.2.2 From f789b684bdb96e7ec2fce79445555d4fd55fb94c Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 25 Apr 2011 19:43:52 -0400 Subject: Btrfs: Free free_space item properly in btrfs_trim_block_group() Since commit dc89e9824464e91fa0b06267864ceabe3186fd8b, we've changed to use a specific slab for alocation of free_space items. Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 11d2e9cea09e..13c29b12a213 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2301,7 +2301,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, start = entry->offset; bytes = min(entry->bytes, end - start); unlink_free_space(block_group, entry); - kfree(entry); + kmem_cache_free(btrfs_free_space_cachep, entry); } spin_unlock(&block_group->tree_lock); -- cgit v1.2.2 From a4f0162fd4490daf2c823c185fff79080d266a7c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 25 Apr 2011 19:43:52 -0400 Subject: Btrfs: free bitmaps properly when evicting the cache If our space cache is wrong, we do the right thing and free up everything that we loaded, however we don't reset the total_bitmaps counter or the thresholds or anything. So in btrfs_remove_free_space_cache make sure to call free_bitmap() if it's a bitmap, this will keep us from panicing when we check to make sure we don't have too many bitmaps. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 13c29b12a213..63731a1fb0a1 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1768,10 +1768,13 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) while ((node = rb_last(&block_group->free_space_offset)) != NULL) { info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(block_group, info); - if (info->bitmap) - kfree(info->bitmap); - kmem_cache_free(btrfs_free_space_cachep, info); + if (!info->bitmap) { + unlink_free_space(block_group, info); + kmem_cache_free(btrfs_free_space_cachep, info); + } else { + free_bitmap(block_group, info); + } + if (need_resched()) { spin_unlock(&block_group->tree_lock); cond_resched(); -- cgit v1.2.2 From 64728bbbf892ea7a4aba502c436afbe362217fb9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 25 Apr 2011 19:43:52 -0400 Subject: Btrfs: put the right bio if we have an error In btrfs_submit_direct_hook if the first btrfs_map_block fails we need to put the orig_bio, not bio. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c718d274a352..ad6b515173ac 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6041,7 +6041,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, ret = btrfs_map_block(map_tree, READ, start_sector << 9, &map_length, NULL, 0); if (ret) { - bio_put(bio); + bio_put(orig_bio); return -EIO; } -- cgit v1.2.2 From 7cf96da3ec7ca225acf4f284b0e904a1f5f98821 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 25 Apr 2011 19:43:53 -0400 Subject: Btrfs: cleanup error handling in inode.c The error processing of several places is changed like setting the error number only at the error. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ad6b515173ac..870869aab0b8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4731,9 +4731,10 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); - err = PTR_ERR(inode); - if (IS_ERR(inode)) + if (IS_ERR(inode)) { + err = PTR_ERR(inode); goto out_unlock; + } err = btrfs_init_inode_security(trans, inode, dir); if (err) { @@ -4792,9 +4793,10 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); - err = PTR_ERR(inode); - if (IS_ERR(inode)) + if (IS_ERR(inode)) { + err = PTR_ERR(inode); goto out_unlock; + } err = btrfs_init_inode_security(trans, inode, dir); if (err) { @@ -7278,9 +7280,10 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, &index); - err = PTR_ERR(inode); - if (IS_ERR(inode)) + if (IS_ERR(inode)) { + err = PTR_ERR(inode); goto out_unlock; + } err = btrfs_init_inode_security(trans, inode, dir); if (err) { -- cgit v1.2.2 From e9c549998dc24209847007e1f209f3b6c88d21ba Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 26 Apr 2011 23:28:26 -0700 Subject: Revert wrong fixes for common misspellings These changes were incorrectly fixed by codespell. They were now manually corrected. Signed-off-by: Lucas De Marchi --- fs/btrfs/ctree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2e61fe1b6b8c..8f4b81de3ae2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -718,7 +718,7 @@ struct btrfs_space_info { u64 total_bytes; /* total bytes in the space, this doesn't take mirrors into account */ u64 bytes_used; /* total bytes used, - this does't take mirrors into account */ + this doesn't take mirrors into account */ u64 bytes_pinned; /* total bytes pinned, will be freed when the transaction finishes */ u64 bytes_reserved; /* total bytes the allocator has reserved for -- cgit v1.2.2 From 306e16ce13c0f3d4fc071b45803b5b83c2606011 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 14:29:38 +0200 Subject: btrfs: rename variables clashing with global function names reported by gcc -Wshadow: page_index, page_offset, new_inode, dev_name Signed-off-by: David Sterba --- fs/btrfs/compression.c | 42 +++++++++++++++++++++--------------------- fs/btrfs/compression.h | 2 +- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent_io.c | 28 ++++++++++++++-------------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 24 ++++++++++++------------ fs/btrfs/super.c | 4 ++-- 8 files changed, 53 insertions(+), 53 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 41d1d7c70e29..d4cd0f0cd695 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -332,7 +332,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, struct compressed_bio *cb; unsigned long bytes_left; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - int page_index = 0; + int pg_index = 0; struct page *page; u64 first_byte = disk_start; struct block_device *bdev; @@ -366,8 +366,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, /* create and submit bios for the compressed pages */ bytes_left = compressed_len; - for (page_index = 0; page_index < cb->nr_pages; page_index++) { - page = compressed_pages[page_index]; + for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { + page = compressed_pages[pg_index]; page->mapping = inode->i_mapping; if (bio->bi_size) ret = io_tree->ops->merge_bio_hook(page, 0, @@ -432,7 +432,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, struct compressed_bio *cb) { unsigned long end_index; - unsigned long page_index; + unsigned long pg_index; u64 last_offset; u64 isize = i_size_read(inode); int ret; @@ -456,13 +456,13 @@ static noinline int add_ra_bio_pages(struct inode *inode, end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; while (last_offset < compressed_end) { - page_index = last_offset >> PAGE_CACHE_SHIFT; + pg_index = last_offset >> PAGE_CACHE_SHIFT; - if (page_index > end_index) + if (pg_index > end_index) break; rcu_read_lock(); - page = radix_tree_lookup(&mapping->page_tree, page_index); + page = radix_tree_lookup(&mapping->page_tree, pg_index); rcu_read_unlock(); if (page) { misses++; @@ -476,7 +476,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, if (!page) break; - if (add_to_page_cache_lru(page, mapping, page_index, + if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { page_cache_release(page); goto next; @@ -560,7 +560,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; unsigned long compressed_len; unsigned long nr_pages; - unsigned long page_index; + unsigned long pg_index; struct page *page; struct block_device *bdev; struct bio *comp_bio; @@ -613,10 +613,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; - for (page_index = 0; page_index < nr_pages; page_index++) { - cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | + for (pg_index = 0; pg_index < nr_pages; pg_index++) { + cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (!cb->compressed_pages[page_index]) + if (!cb->compressed_pages[pg_index]) goto fail2; } cb->nr_pages = nr_pages; @@ -634,8 +634,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); - for (page_index = 0; page_index < nr_pages; page_index++) { - page = cb->compressed_pages[page_index]; + for (pg_index = 0; pg_index < nr_pages; pg_index++) { + page = cb->compressed_pages[pg_index]; page->mapping = inode->i_mapping; page->index = em_start >> PAGE_CACHE_SHIFT; @@ -702,8 +702,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, return 0; fail2: - for (page_index = 0; page_index < nr_pages; page_index++) - free_page((unsigned long)cb->compressed_pages[page_index]); + for (pg_index = 0; pg_index < nr_pages; pg_index++) + free_page((unsigned long)cb->compressed_pages[pg_index]); kfree(cb->compressed_pages); fail1: @@ -945,7 +945,7 @@ void btrfs_exit_compress(void) int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio_vec *bvec, int vcnt, - unsigned long *page_index, + unsigned long *pg_index, unsigned long *pg_offset) { unsigned long buf_offset; @@ -954,7 +954,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long working_bytes = total_out - buf_start; unsigned long bytes; char *kaddr; - struct page *page_out = bvec[*page_index].bv_page; + struct page *page_out = bvec[*pg_index].bv_page; /* * start byte is the first byte of the page we're currently @@ -995,11 +995,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, /* check if we need to pick another page */ if (*pg_offset == PAGE_CACHE_SIZE) { - (*page_index)++; - if (*page_index >= vcnt) + (*pg_index)++; + if (*pg_index >= vcnt) return 0; - page_out = bvec[*page_index].bv_page; + page_out = bvec[*pg_index].bv_page; *pg_offset = 0; start_byte = page_offset(page_out) - disk_start; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 51000174b9d7..a12059f4f0fd 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio_vec *bvec, int vcnt, - unsigned long *page_index, + unsigned long *pg_index, unsigned long *pg_offset); int btrfs_submit_compressed_write(struct inode *inode, u64 start, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..b5433bbe7516 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2534,7 +2534,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, int btrfs_commit_write(struct file *file, struct page *page, unsigned from, unsigned to); struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t page_offset, u64 start, u64 end, + size_t pg_offset, u64 start, u64 end, int create); int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..990afa8656a2 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -137,7 +137,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = { * that covers the entire device */ static struct extent_map *btree_get_extent(struct inode *inode, - struct page *page, size_t page_offset, u64 start, u64 len, + struct page *page, size_t pg_offset, u64 start, u64 len, int create) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31b..b730c12fa958 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2007,7 +2007,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct btrfs_ordered_extent *ordered; int ret; int nr = 0; - size_t page_offset = 0; + size_t pg_offset = 0; size_t iosize; size_t disk_io_size; size_t blocksize = inode->i_sb->s_blocksize; @@ -2043,9 +2043,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree, char *userpage; struct extent_state *cached = NULL; - iosize = PAGE_CACHE_SIZE - page_offset; + iosize = PAGE_CACHE_SIZE - pg_offset; userpage = kmap_atomic(page, KM_USER0); - memset(userpage + page_offset, 0, iosize); + memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, @@ -2054,7 +2054,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, &cached, GFP_NOFS); break; } - em = get_extent(inode, page, page_offset, cur, + em = get_extent(inode, page, pg_offset, cur, end - cur + 1, 0); if (IS_ERR(em) || !em) { SetPageError(page); @@ -2094,7 +2094,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct extent_state *cached = NULL; userpage = kmap_atomic(page, KM_USER0); - memset(userpage + page_offset, 0, iosize); + memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); @@ -2103,7 +2103,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, unlock_extent_cached(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } /* the get_extent function already copied into the page */ @@ -2112,7 +2112,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, check_page_uptodate(tree, page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } /* we have an inline extent but it didn't get marked up @@ -2122,7 +2122,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, SetPageError(page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } @@ -2135,7 +2135,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; pnr -= page->index; ret = submit_extent_page(READ, tree, page, - sector, disk_io_size, page_offset, + sector, disk_io_size, pg_offset, bdev, bio, pnr, end_bio_extent_readpage, mirror_num, *bio_flags, @@ -2146,7 +2146,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, if (ret) SetPageError(page); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; } if (!nr) { if (!PageError(page)) @@ -2751,7 +2751,7 @@ int extent_prepare_write(struct extent_io_tree *tree, u64 cur_end; struct extent_map *em; unsigned blocksize = 1 << inode->i_blkbits; - size_t page_offset = 0; + size_t pg_offset = 0; size_t block_off_start; size_t block_off_end; int err = 0; @@ -2767,7 +2767,7 @@ int extent_prepare_write(struct extent_io_tree *tree, lock_extent(tree, page_start, page_end, GFP_NOFS); while (block_start <= block_end) { - em = get_extent(inode, page, page_offset, block_start, + em = get_extent(inode, page, pg_offset, block_start, block_end - block_start + 1, 1); if (IS_ERR(em) || !em) goto err; @@ -2811,7 +2811,7 @@ int extent_prepare_write(struct extent_io_tree *tree, block_start + iosize - 1, EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS); ret = submit_extent_page(READ, tree, page, - sector, iosize, page_offset, em->bdev, + sector, iosize, pg_offset, em->bdev, NULL, 1, end_bio_extent_preparewrite, 0, 0, 0); @@ -2828,7 +2828,7 @@ int extent_prepare_write(struct extent_io_tree *tree, &cached, GFP_NOFS); block_start = cur_end + 1; } - page_offset = block_start & (PAGE_CACHE_SIZE - 1); + pg_offset = block_start & (PAGE_CACHE_SIZE - 1); free_extent_map(em); } if (iocount) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index af2d7179c372..b9ce2f720742 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -164,7 +164,7 @@ static inline struct extent_state *extent_state_next(struct extent_state *state) typedef struct extent_map *(get_extent_t)(struct inode *inode, struct page *page, - size_t page_offset, + size_t pg_offset, u64 start, u64 len, int create); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..fc966472e3ad 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6985,7 +6985,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(old_dir)->root; struct btrfs_root *dest = BTRFS_I(new_dir)->root; - struct inode *new_inode = new_dentry->d_inode; + struct inode *newinode = new_dentry->d_inode; struct inode *old_inode = old_dentry->d_inode; struct timespec ctime = CURRENT_TIME; u64 index = 0; @@ -7000,18 +7000,18 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, return -EXDEV; if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || - (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) + (newinode && newinode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) return -ENOTEMPTY; - if (S_ISDIR(old_inode->i_mode) && new_inode && - new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) + if (S_ISDIR(old_inode->i_mode) && newinode && + newinode->i_size > BTRFS_EMPTY_DIR_SIZE) return -ENOTEMPTY; /* * we're using rename to replace one file with another. * and the replacement file is large. Start IO on it now so * we don't add too much work to the end of the transaction */ - if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && + if (newinode && S_ISREG(old_inode->i_mode) && newinode->i_size && old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) filemap_flush(old_inode->i_mapping); @@ -7065,7 +7065,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * make sure the inode gets flushed if it is replacing * something. */ - if (new_inode && new_inode->i_size && + if (newinode && newinode->i_size && old_inode && S_ISREG(old_inode->i_mode)) { btrfs_add_ordered_operation(trans, root, old_inode); } @@ -7092,16 +7092,16 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, } BUG_ON(ret); - if (new_inode) { - new_inode->i_ctime = CURRENT_TIME; - if (unlikely(new_inode->i_ino == + if (newinode) { + newinode->i_ctime = CURRENT_TIME; + if (unlikely(newinode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - root_objectid = BTRFS_I(new_inode)->location.objectid; + root_objectid = BTRFS_I(newinode)->location.objectid; ret = btrfs_unlink_subvol(trans, dest, new_dir, root_objectid, new_dentry->d_name.name, new_dentry->d_name.len); - BUG_ON(new_inode->i_nlink == 0); + BUG_ON(newinode->i_nlink == 0); } else { ret = btrfs_unlink_inode(trans, dest, new_dir, new_dentry->d_inode, @@ -7109,7 +7109,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len); } BUG_ON(ret); - if (new_inode->i_nlink == 0) { + if (newinode->i_nlink == 0) { ret = btrfs_orphan_add(trans, new_dentry->d_inode); BUG_ON(ret); } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..3e28521643fb 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -739,7 +739,7 @@ static int btrfs_set_super(struct super_block *s, void *data) * for multiple device setup. Make sure to keep it in sync. */ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) + const char *device_name, void *data) { struct block_device *bdev = NULL; struct super_block *s; @@ -762,7 +762,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, if (error) return ERR_PTR(error); - error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices); + error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices); if (error) goto error_free_subvol_name; -- cgit v1.2.2 From edc95aec57661c8e568e18f6c3f002aefa07ebc8 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 14:31:08 +0200 Subject: btrfs: remove nested duplicate variable declarations Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 1 - fs/btrfs/free-space-cache.c | 3 --- 2 files changed, 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..c60197b36bc8 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3647,7 +3647,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans, ret = 0; if (slot == 0) { - struct btrfs_disk_key disk_key; btrfs_cpu_key_to_disk(&disk_key, cpu_key); ret = fixup_low_keys(trans, root, path, &disk_key, 1); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 63731a1fb0a1..9e69c6b8409c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, while(1) { if (entry->bytes < bytes || (!entry->bitmap && entry->offset < min_start)) { - struct rb_node *node; - node = rb_next(&entry->offset_index); if (!node) break; @@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, cluster, entry, bytes, min_start); if (ret == 0) { - struct rb_node *node; node = rb_next(&entry->offset_index); if (!node) break; -- cgit v1.2.2 From 4891aca2dac612a2f21a3278d9906ade13b55788 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 16:45:00 +0200 Subject: btrfs: fix dereference before check The superblock's ->s_fs_info is properly set in btrfs_fill_super, after a call to open_ctree, which derefereces it before check. Although tree_root is set via btrfs_set_super, let's be defensive and leave the check in place. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 990afa8656a2..25e4b8f1d0ef 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1611,7 +1611,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *tree_root = btrfs_sb(sb); - struct btrfs_fs_info *fs_info = tree_root->fs_info; + struct btrfs_fs_info *fs_info = NULL; struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), @@ -1623,11 +1623,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_super_block *disk_super; - if (!extent_root || !tree_root || !fs_info || + if (!extent_root || !tree_root || !tree_root->fs_info || !chunk_root || !dev_root || !csum_root) { err = -ENOMEM; goto fail; } + fs_info = tree_root->fs_info; ret = init_srcu_struct(&fs_info->subvol_srcu); if (ret) { -- cgit v1.2.2 From c704005d886cf0bc9bc3974eb009b22fe0da32c7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 18:00:01 +0200 Subject: btrfs: unify checking of IS_ERR and null use IS_ERR_OR_NULL when possible, done by this coccinelle script: @ match @ identifier id; @@ ( - BUG_ON(IS_ERR(id) || !id); + BUG_ON(IS_ERR_OR_NULL(id)); | - IS_ERR(id) || !id + IS_ERR_OR_NULL(id) | - !id || IS_ERR(id) + IS_ERR_OR_NULL(id) ) Signed-off-by: David Sterba --- fs/btrfs/acl.c | 2 +- fs/btrfs/extent_io.c | 12 ++++++------ fs/btrfs/file.c | 2 +- fs/btrfs/inode.c | 12 ++++++------ fs/btrfs/relocation.c | 2 +- fs/btrfs/tree-log.c | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 5d505aaa72fb..1a21c99a91b8 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -287,7 +287,7 @@ int btrfs_acl_chmod(struct inode *inode) return 0; acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); - if (IS_ERR(acl) || !acl) + if (IS_ERR_OR_NULL(acl)) return PTR_ERR(acl); clone = posix_acl_clone(acl, GFP_KERNEL); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b730c12fa958..3c92712e9763 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2056,7 +2056,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, } em = get_extent(inode, page, pg_offset, cur, end - cur + 1, 0); - if (IS_ERR(em) || !em) { + if (IS_ERR_OR_NULL(em)) { SetPageError(page); unlock_extent(tree, cur, end, GFP_NOFS); break; @@ -2341,7 +2341,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, } em = epd->get_extent(inode, page, pg_offset, cur, end - cur + 1, 1); - if (IS_ERR(em) || !em) { + if (IS_ERR_OR_NULL(em)) { SetPageError(page); break; } @@ -2769,7 +2769,7 @@ int extent_prepare_write(struct extent_io_tree *tree, while (block_start <= block_end) { em = get_extent(inode, page, pg_offset, block_start, block_end - block_start + 1, 1); - if (IS_ERR(em) || !em) + if (IS_ERR_OR_NULL(em)) goto err; cur_end = min(block_end, extent_map_end(em) - 1); @@ -2899,7 +2899,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, len = end - start + 1; write_lock(&map->lock); em = lookup_extent_mapping(map, start, len); - if (!em || IS_ERR(em)) { + if (IS_ERR_OR_NULL(em)) { write_unlock(&map->lock); break; } @@ -2942,7 +2942,7 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, em = get_extent(inode, NULL, 0, start, blksize, 0); unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, &cached_state, GFP_NOFS); - if (!em || IS_ERR(em)) + if (IS_ERR_OR_NULL(em)) return 0; if (em->block_start > EXTENT_MAP_LAST_BYTE) @@ -2976,7 +2976,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode, break; len = (len + sectorsize - 1) & ~(sectorsize - 1); em = get_extent(inode, NULL, 0, offset, len, 0); - if (!em || IS_ERR(em)) + if (IS_ERR_OR_NULL(em)) return em; /* if this isn't a hole return it */ diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 75899a01dded..83abd274370b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1375,7 +1375,7 @@ static long btrfs_fallocate(struct file *file, int mode, while (1) { em = btrfs_get_extent(inode, NULL, 0, cur_offset, alloc_end - cur_offset, 0); - BUG_ON(IS_ERR(em) || !em); + BUG_ON(IS_ERR_OR_NULL(em)); last_byte = min(extent_map_end(em), alloc_end); last_byte = (last_byte + mask) & ~mask; if (em->block_start == EXTENT_MAP_HOLE || diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fc966472e3ad..ba760c3ced28 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1855,7 +1855,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, } read_unlock(&em_tree->lock); - if (!em || IS_ERR(em)) { + if (IS_ERR_OR_NULL(em)) { kfree(failrec); return -EIO; } @@ -3006,7 +3006,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); + BUG_ON(IS_ERR_OR_NULL(di)); leaf = path->nodes[0]; btrfs_dir_item_key_to_cpu(leaf, di, &key); @@ -3022,7 +3022,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, BUG_ON(ret != -ENOENT); di = btrfs_search_dir_index_item(root, path, dir->i_ino, name, name_len); - BUG_ON(!di || IS_ERR(di)); + BUG_ON(IS_ERR_OR_NULL(di)); leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); @@ -3032,7 +3032,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); + BUG_ON(IS_ERR_OR_NULL(di)); leaf = path->nodes[0]; btrfs_dir_item_key_to_cpu(leaf, di, &key); @@ -3635,7 +3635,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) while (1) { em = btrfs_get_extent(inode, NULL, 0, cur_offset, block_end - cur_offset, 0); - BUG_ON(IS_ERR(em) || !em); + BUG_ON(IS_ERR_OR_NULL(em)); last_byte = min(extent_map_end(em), block_end); last_byte = (last_byte + mask) & ~mask; if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { @@ -3841,7 +3841,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, if (IS_ERR(di)) ret = PTR_ERR(di); - if (!di || IS_ERR(di)) + if (IS_ERR_OR_NULL(di)) goto out_err; btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 199a80134312..fed0aaec0753 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3220,7 +3220,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, key.offset = 0; inode = btrfs_iget(fs_info->sb, &key, root, NULL); - if (!inode || IS_ERR(inode) || is_bad_inode(inode)) { + if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) { if (inode && !IS_ERR(inode)) iput(inode); return -ENOENT; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..d5313d63f967 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1205,7 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, } else { BUG(); } - if (!dst_di || IS_ERR(dst_di)) { + if (IS_ERR_OR_NULL(dst_di)) { /* we need a sequence number to insert, so we only * do inserts for the BTRFS_DIR_INDEX_KEY types */ @@ -1426,7 +1426,7 @@ again: dir_key->offset, name, name_len, 0); } - if (!log_di || IS_ERR(log_di)) { + if (IS_ERR_OR_NULL(log_di)) { btrfs_dir_item_key_to_cpu(eb, di, &location); btrfs_release_path(root, path); btrfs_release_path(log, log_path); -- cgit v1.2.2 From 62a45b60923a576170a1a0c309c240d9f40d193d Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Apr 2011 15:52:26 +0200 Subject: btrfs: make functions static when possible Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 4 ++-- fs/btrfs/extent-tree.c | 6 +++--- fs/btrfs/free-space-cache.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c60197b36bc8..a36c87db4dc4 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -74,8 +74,8 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p) * retake all the spinlocks in the path. You can safely use NULL * for held */ -noinline void btrfs_clear_path_blocking(struct btrfs_path *p, - struct extent_buffer *held) +static noinline void btrfs_clear_path_blocking(struct btrfs_path *p, + struct extent_buffer *held) { int i; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd52f7f556ef..7cdce82e03e7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -94,7 +94,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) return (cache->flags & bits) == bits; } -void btrfs_get_block_group(struct btrfs_block_group_cache *cache) +static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { atomic_inc(&cache->count); } @@ -3651,8 +3651,8 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, spin_unlock(&block_rsv->lock); } -void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, - struct btrfs_block_rsv *dest, u64 num_bytes) +static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, + struct btrfs_block_rsv *dest, u64 num_bytes) { struct btrfs_space_info *space_info = block_rsv->space_info; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 9e69c6b8409c..d06abe20a729 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1455,8 +1455,8 @@ out: return ret; } -bool try_merge_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info, bool update_stat) +static bool try_merge_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info, bool update_stat) { struct btrfs_free_space *left_info; struct btrfs_free_space *right_info; -- cgit v1.2.2 From f993c883ad8e111fb9e9ae603540acbe94f7246c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Apr 2011 23:35:57 +0200 Subject: btrfs: drop unused argument from extent_io_tree_init all callers pass GFP_NOFS, but the GFP mask argument is not used in the function; GFP_ATOMIC is passed to radix tree initialization and it's the only correct one, since we're using the preload/insert mechanism of radix tree. Let's drop the gfp mask from btrfs function, this will not change behaviour. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 9 ++++----- fs/btrfs/extent_io.c | 2 +- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 4 ++-- fs/btrfs/relocation.c | 2 +- fs/btrfs/transaction.c | 3 +-- 6 files changed, 10 insertions(+), 12 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 25e4b8f1d0ef..3ce80f71e98a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1080,7 +1080,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->log_transid = 0; root->last_log_commit = 0; extent_io_tree_init(&root->dirty_log_pages, - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); @@ -1712,8 +1712,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, - fs_info->btree_inode->i_mapping, - GFP_NOFS); + fs_info->btree_inode->i_mapping); extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, GFP_NOFS); @@ -1729,9 +1728,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->block_group_cache_tree = RB_ROOT; extent_io_tree_init(&fs_info->freed_extents[0], - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); extent_io_tree_init(&fs_info->freed_extents[1], - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); fs_info->pinned_extents = &fs_info->freed_extents[0]; fs_info->do_barriers = 1; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3c92712e9763..e67ed76668e0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -101,7 +101,7 @@ void extent_io_exit(void) } void extent_io_tree_init(struct extent_io_tree *tree, - struct address_space *mapping, gfp_t mask) + struct address_space *mapping) { tree->state = RB_ROOT; INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index b9ce2f720742..e9cfe8d1661c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -169,7 +169,7 @@ typedef struct extent_map *(get_extent_t)(struct inode *inode, int create); void extent_io_tree_init(struct extent_io_tree *tree, - struct address_space *mapping, gfp_t mask); + struct address_space *mapping); int try_release_extent_mapping(struct extent_map_tree *map, struct extent_io_tree *tree, struct page *page, gfp_t mask); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ba760c3ced28..3c98164f8b24 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6786,8 +6786,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree, GFP_NOFS); - extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); - extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS); + extent_io_tree_init(&ei->io_tree, &inode->i_data); + extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); mutex_init(&ei->log_mutex); btrfs_ordered_inode_tree_init(&ei->ordered_tree); INIT_LIST_HEAD(&ei->i_orphan); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index fed0aaec0753..f3edf45317bc 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3935,7 +3935,7 @@ static struct reloc_control *alloc_reloc_control(void) INIT_LIST_HEAD(&rc->reloc_roots); backref_cache_init(&rc->backref_cache); mapping_tree_init(&rc->reloc_root_tree); - extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS); + extent_io_tree_init(&rc->processed_blocks, NULL); return rc; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..955f76eb0fa8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -80,8 +80,7 @@ static noinline int join_transaction(struct btrfs_root *root) INIT_LIST_HEAD(&cur_trans->pending_snapshots); list_add_tail(&cur_trans->list, &root->fs_info->trans_list); extent_io_tree_init(&cur_trans->dirty_pages, - root->fs_info->btree_inode->i_mapping, - GFP_NOFS); + root->fs_info->btree_inode->i_mapping); spin_lock(&root->fs_info->new_trans_lock); root->fs_info->running_transaction = cur_trans; spin_unlock(&root->fs_info->new_trans_lock); -- cgit v1.2.2 From a8067e022ab54fde8953880a64572c3acca644dc Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 00:34:43 +0200 Subject: btrfs: drop unused parameter from extent_map_tree_init the GFP flags are not stored anywhere and all allocations are done via alloc_extent_map(GFP_NOFS). Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 3 +-- fs/btrfs/extent_map.c | 3 +-- fs/btrfs/extent_map.h | 2 +- fs/btrfs/inode.c | 2 +- fs/btrfs/volumes.c | 2 +- 5 files changed, 5 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3ce80f71e98a..f2ee584b8efd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1713,8 +1713,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, fs_info->btree_inode->i_mapping); - extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, - GFP_NOFS); + extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index a24a3f2fa13e..3c8f374a8e2d 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -28,12 +28,11 @@ void extent_map_exit(void) /** * extent_map_tree_init - initialize extent map tree * @tree: tree to initialize - * @mask: flags for memory allocations during tree operations * * Initialize the extent tree @tree. Should be called for each new inode * or other user of the extent_map interface. */ -void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) +void extent_map_tree_init(struct extent_map_tree *tree) { tree->map = RB_ROOT; rwlock_init(&tree->lock); diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 28b44dbd1e35..255813c51b9d 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -49,7 +49,7 @@ static inline u64 extent_map_block_end(struct extent_map *em) return em->block_start + em->block_len; } -void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask); +void extent_map_tree_init(struct extent_map_tree *tree); struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len); int add_extent_mapping(struct extent_map_tree *tree, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3c98164f8b24..f54c015cc294 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6785,7 +6785,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->force_compress = BTRFS_COMPRESS_NONE; inode = &ei->vfs_inode; - extent_map_tree_init(&ei->extent_tree, GFP_NOFS); + extent_map_tree_init(&ei->extent_tree); extent_io_tree_init(&ei->io_tree, &inode->i_data); extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); mutex_init(&ei->log_mutex); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c7367ae5a3e6..15d7dc943c9b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2849,7 +2849,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) void btrfs_mapping_init(struct btrfs_mapping_tree *tree) { - extent_map_tree_init(&tree->map_tree, GFP_NOFS); + extent_map_tree_init(&tree->map_tree); } void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) -- cgit v1.2.2 From 172ddd60a662c4d8bf2809462866ddddd6431ea5 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 00:48:27 +0200 Subject: btrfs: drop gfp parameter from alloc_extent_map pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/extent_map.c | 5 ++--- fs/btrfs/extent_map.h | 2 +- fs/btrfs/file.c | 4 ++-- fs/btrfs/inode.c | 12 ++++++------ fs/btrfs/relocation.c | 2 +- fs/btrfs/volumes.c | 4 ++-- 8 files changed, 16 insertions(+), 17 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f2ee584b8efd..e1e55679d061 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -154,7 +154,7 @@ static struct extent_map *btree_get_extent(struct inode *inode, } read_unlock(&em_tree->lock); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7cdce82e03e7..6a3d53783d55 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6694,7 +6694,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, u64 start = extent_key->objectid - offset; u64 end = start + extent_key->offset - 1; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = start; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 3c8f374a8e2d..2d0410344ea3 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -40,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree) /** * alloc_extent_map - allocate new extent map structure - * @mask: memory allocation flags * * Allocate a new extent_map structure. The new structure is * returned with a reference count of one and needs to be * freed using free_extent_map() */ -struct extent_map *alloc_extent_map(gfp_t mask) +struct extent_map *alloc_extent_map(void) { struct extent_map *em; - em = kmem_cache_alloc(extent_map_cache, mask); + em = kmem_cache_alloc(extent_map_cache, GFP_NOFS); if (!em) return NULL; em->in_tree = 0; diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 255813c51b9d..33a7890b1f40 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -56,7 +56,7 @@ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); -struct extent_map *alloc_extent_map(gfp_t mask); +struct extent_map *alloc_extent_map(void); void free_extent_map(struct extent_map *em); int __init extent_map_init(void); void extent_map_exit(void); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 83abd274370b..80eabe85409a 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -191,9 +191,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, } while (1) { if (!split) - split = alloc_extent_map(GFP_NOFS); + split = alloc_extent_map(); if (!split2) - split2 = alloc_extent_map(GFP_NOFS); + split2 = alloc_extent_map(); BUG_ON(!split || !split2); write_lock(&em_tree->lock); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f54c015cc294..26f4d56cf049 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -649,7 +649,7 @@ retry: async_extent->start + async_extent->ram_size - 1, 0); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = async_extent->start; em->len = async_extent->ram_size; @@ -826,7 +826,7 @@ static noinline int cow_file_range(struct inode *inode, (u64)-1, &ins, 1); BUG_ON(ret); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = start; em->orig_start = em->start; @@ -1177,7 +1177,7 @@ out_check: struct extent_map *em; struct extent_map_tree *em_tree; em_tree = &BTRFS_I(inode)->extent_tree; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = cur_offset; em->orig_start = em->start; @@ -5069,7 +5069,7 @@ again: else goto out; } - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { err = -ENOMEM; goto out; @@ -5382,7 +5382,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag u64 hole_start = start; u64 hole_len = len; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { err = -ENOMEM; goto out; @@ -5483,7 +5483,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, } if (!em) { - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f3edf45317bc..2097a88f60aa 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2870,7 +2870,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, struct extent_map *em; int ret = 0; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) return -ENOMEM; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 15d7dc943c9b..76acd1d235e4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2609,7 +2609,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { ret = -ENOMEM; goto error; @@ -3499,7 +3499,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, free_extent_map(em); } - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) return -ENOMEM; num_stripes = btrfs_chunk_num_stripes(leaf, chunk); -- cgit v1.2.2 From f09d1f60e6aa82fb4cfaa525e21f6287fc1516f4 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 01:08:01 +0200 Subject: btrfs: drop gfp parameter from find_extent_buffer pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/extent_io.c | 3 +-- fs/btrfs/extent_io.h | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e1e55679d061..1c0752e99066 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -963,7 +963,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, struct inode *btree_inode = root->fs_info->btree_inode; struct extent_buffer *eb; eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, - bytenr, blocksize, GFP_NOFS); + bytenr, blocksize); return eb; } @@ -2696,7 +2696,7 @@ int btree_lock_page_hook(struct page *page) goto out; len = page->private >> 2; - eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS); + eb = find_extent_buffer(io_tree, bytenr, len); if (!eb) goto out; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e67ed76668e0..ad0f0a95ad3a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3377,8 +3377,7 @@ free_eb: } struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, - u64 start, unsigned long len, - gfp_t mask) + u64 start, unsigned long len) { struct extent_buffer *eb; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e9cfe8d1661c..ff220c3c01b0 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -263,8 +263,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, struct page *page0, gfp_t mask); struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, - u64 start, unsigned long len, - gfp_t mask); + u64 start, unsigned long len); void free_extent_buffer(struct extent_buffer *eb); int read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, u64 start, int wait, -- cgit v1.2.2 From ba14419264684b290f0d0b7f48d26eafb11fc0c6 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 01:12:06 +0200 Subject: btrfs: drop gfp parameter from alloc_extent_buffer pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/extent_io.c | 7 +++---- fs/btrfs/extent_io.h | 3 +-- 3 files changed, 7 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1c0752e99066..4084959b36fd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -380,7 +380,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) len = page->private >> 2; WARN_ON(len == 0); - eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + eb = alloc_extent_buffer(tree, start, len, page); if (eb == NULL) { WARN_ON(1); goto out; @@ -525,7 +525,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, len = page->private >> 2; WARN_ON(len == 0); - eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + eb = alloc_extent_buffer(tree, start, len, page); if (eb == NULL) { ret = -EIO; goto out; @@ -974,7 +974,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, struct extent_buffer *eb; eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, - bytenr, blocksize, NULL, GFP_NOFS); + bytenr, blocksize, NULL); return eb; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ad0f0a95ad3a..9369289ce771 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3266,8 +3266,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, - struct page *page0, - gfp_t mask) + struct page *page0) { unsigned long num_pages = num_extent_pages(start, len); unsigned long i; @@ -3288,7 +3287,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, } rcu_read_unlock(); - eb = __alloc_extent_buffer(tree, start, len, mask); + eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS); if (!eb) return NULL; @@ -3305,7 +3304,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, i = 0; } for (; i < num_pages; i++, index++) { - p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); + p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM); if (!p) { WARN_ON(1); goto free_eb; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index ff220c3c01b0..3c3be74c934e 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -260,8 +260,7 @@ void set_page_extent_mapped(struct page *page); struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, - struct page *page0, - gfp_t mask); + struct page *page0); struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len); void free_extent_buffer(struct extent_buffer *eb); -- cgit v1.2.2 From b3b4aa74b58bded927f579fff787fb6fa1c0393c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 01:20:15 +0200 Subject: btrfs: drop unused parameter from btrfs_release_path parameter tree root it's not used since commit 5f39d397dfbe140a14edecd4e73c34ce23c4f9ee ("Btrfs: Create extent_buffer interface for large blocksizes") Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 28 ++++++------- fs/btrfs/ctree.h | 2 +- fs/btrfs/dir-item.c | 2 +- fs/btrfs/extent-tree.c | 44 ++++++++++---------- fs/btrfs/file-item.c | 12 +++--- fs/btrfs/file.c | 12 +++--- fs/btrfs/free-space-cache.c | 14 +++---- fs/btrfs/inode.c | 34 ++++++++-------- fs/btrfs/ioctl.c | 12 +++--- fs/btrfs/relocation.c | 30 +++++++------- fs/btrfs/root-tree.c | 10 ++--- fs/btrfs/tree-defrag.c | 2 +- fs/btrfs/tree-log.c | 98 ++++++++++++++++++++++----------------------- fs/btrfs/volumes.c | 16 ++++---- fs/btrfs/xattr.c | 4 +- 15 files changed, 160 insertions(+), 160 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a36c87db4dc4..fad8f23d70f0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -107,7 +107,7 @@ void btrfs_free_path(struct btrfs_path *p) { if (!p) return; - btrfs_release_path(NULL, p); + btrfs_release_path(p); kmem_cache_free(btrfs_path_cachep, p); } @@ -117,7 +117,7 @@ void btrfs_free_path(struct btrfs_path *p) * * It is safe to call this on paths that no locks or extent buffers held. */ -noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) +noinline void btrfs_release_path(struct btrfs_path *p) { int i; @@ -1328,7 +1328,7 @@ static noinline int reada_for_balance(struct btrfs_root *root, ret = -EAGAIN; /* release the whole path */ - btrfs_release_path(root, path); + btrfs_release_path(path); /* read the blocks */ if (block1) @@ -1475,7 +1475,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, return 0; } free_extent_buffer(tmp); - btrfs_release_path(NULL, p); + btrfs_release_path(p); return -EIO; } } @@ -1494,7 +1494,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, if (p->reada) reada_for_search(root, p, level, slot, key->objectid); - btrfs_release_path(NULL, p); + btrfs_release_path(p); ret = -EAGAIN; tmp = read_tree_block(root, blocknr, blocksize, 0); @@ -1563,7 +1563,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans, } b = p->nodes[level]; if (!b) { - btrfs_release_path(NULL, p); + btrfs_release_path(p); goto again; } BUG_ON(btrfs_header_nritems(b) == 1); @@ -1753,7 +1753,7 @@ done: if (!p->leave_spinning) btrfs_set_path_blocking(p); if (ret < 0) - btrfs_release_path(root, p); + btrfs_release_path(p); return ret; } @@ -3026,7 +3026,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, struct btrfs_file_extent_item); extent_len = btrfs_file_extent_num_bytes(leaf, fi); } - btrfs_release_path(root, path); + btrfs_release_path(path); path->keep_locks = 1; path->search_for_split = 1; @@ -3948,7 +3948,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) else return 1; - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; @@ -4072,7 +4072,7 @@ find_next_key: sret = btrfs_find_next_key(root, path, min_key, level, cache_only, min_trans); if (sret == 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } else { goto out; @@ -4151,7 +4151,7 @@ next: btrfs_node_key_to_cpu(c, &cur_key, slot); orig_lowest = path->lowest_level; - btrfs_release_path(root, path); + btrfs_release_path(path); path->lowest_level = level; ret = btrfs_search_slot(NULL, root, &cur_key, path, 0, 0); @@ -4228,7 +4228,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) again: level = 1; next = NULL; - btrfs_release_path(root, path); + btrfs_release_path(path); path->keep_locks = 1; @@ -4284,7 +4284,7 @@ again: goto again; if (ret < 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto done; } @@ -4323,7 +4323,7 @@ again: goto again; if (ret < 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto done; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b5433bbe7516..3f301f05099d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2290,7 +2290,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *parent, int start_slot, int cache_only, u64 *last_ret, struct btrfs_key *progress); -void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); +void btrfs_release_path(struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); void btrfs_set_path_blocking(struct btrfs_path *p); diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index c62f02f6ae69..ab8afed671a0 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -172,7 +172,7 @@ second_insert: ret = 0; goto out_free; } - btrfs_release_path(root, path); + btrfs_release_path(path); btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); key.offset = index; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6a3d53783d55..a160f11465f8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -379,7 +379,7 @@ again: break; caching_ctl->progress = last; - btrfs_release_path(extent_root, path); + btrfs_release_path(path); up_read(&fs_info->extent_commit_sem); mutex_unlock(&caching_ctl->mutex); if (btrfs_transaction_in_commit(fs_info)) @@ -754,7 +754,7 @@ again: atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); - btrfs_release_path(root->fs_info->extent_root, path); + btrfs_release_path(path); mutex_lock(&head->mutex); mutex_unlock(&head->mutex); @@ -934,7 +934,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans, break; } } - btrfs_release_path(root, path); + btrfs_release_path(path); if (owner < BTRFS_FIRST_FREE_OBJECTID) new_size += sizeof(*bi); @@ -1042,7 +1042,7 @@ again: return 0; #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 key.type = BTRFS_EXTENT_REF_V0_KEY; - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) { err = ret; @@ -1080,7 +1080,7 @@ again: if (match_extent_data_ref(leaf, ref, root_objectid, owner, offset)) { if (recow) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } err = 0; @@ -1141,7 +1141,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, if (match_extent_data_ref(leaf, ref, root_objectid, owner, offset)) break; - btrfs_release_path(root, path); + btrfs_release_path(path); key.offset++; ret = btrfs_insert_empty_item(trans, root, path, &key, size); @@ -1167,7 +1167,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); ret = 0; fail: - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } @@ -1293,7 +1293,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, ret = -ENOENT; #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (ret == -ENOENT && parent) { - btrfs_release_path(root, path); + btrfs_release_path(path); key.type = BTRFS_EXTENT_REF_V0_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) @@ -1322,7 +1322,7 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, } ret = btrfs_insert_empty_item(trans, root, path, &key, 0); - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } @@ -1608,7 +1608,7 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans, if (ret != -ENOENT) return ret; - btrfs_release_path(root, path); + btrfs_release_path(path); *ref_ret = NULL; if (owner < BTRFS_FIRST_FREE_OBJECTID) { @@ -1862,7 +1862,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, __run_delayed_extent_op(extent_op, leaf, item); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root->fs_info->extent_root, path); + btrfs_release_path(path); path->reada = 1; path->leave_spinning = 1; @@ -2361,7 +2361,7 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); - btrfs_release_path(root->fs_info->extent_root, path); + btrfs_release_path(path); mutex_lock(&head->mutex); mutex_unlock(&head->mutex); @@ -2732,7 +2732,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, bi = btrfs_item_ptr_offset(leaf, path->slots[0]); write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(extent_root, path); + btrfs_release_path(path); fail: if (ret) return ret; @@ -2785,7 +2785,7 @@ again: inode = lookup_free_space_inode(root, block_group, path); if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { ret = PTR_ERR(inode); - btrfs_release_path(root, path); + btrfs_release_path(path); goto out; } @@ -2854,7 +2854,7 @@ again: out_put: iput(inode); out_free: - btrfs_release_path(root, path); + btrfs_release_path(path); out: spin_lock(&block_group->lock); block_group->disk_cache_state = dcs; @@ -4541,7 +4541,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, NULL, refs_to_drop, is_data); BUG_ON(ret); - btrfs_release_path(extent_root, path); + btrfs_release_path(path); path->leave_spinning = 1; key.objectid = bytenr; @@ -4580,7 +4580,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, owner_objectid, 0); BUG_ON(ret < 0); - btrfs_release_path(extent_root, path); + btrfs_release_path(path); path->leave_spinning = 1; key.objectid = bytenr; @@ -4650,7 +4650,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, ret = btrfs_del_items(trans, extent_root, path, path->slots[0], num_to_del); BUG_ON(ret); - btrfs_release_path(extent_root, path); + btrfs_release_path(path); if (is_data) { ret = btrfs_del_csums(trans, root, bytenr, num_bytes); @@ -6480,7 +6480,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, trans->block_rsv = block_rsv; } } - btrfs_release_path(root, path); + btrfs_release_path(path); BUG_ON(err); ret = btrfs_del_root(trans, tree_root, &root->root_key); @@ -8580,7 +8580,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) memcpy(&cache->key, &found_key, sizeof(found_key)); key.objectid = found_key.objectid + found_key.offset; - btrfs_release_path(root, path); + btrfs_release_path(path); cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; @@ -8802,12 +8802,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (ret < 0) goto out; if (ret > 0) - btrfs_release_path(tree_root, path); + btrfs_release_path(path); if (ret == 0) { ret = btrfs_del_item(trans, tree_root, path); if (ret) goto out; - btrfs_release_path(tree_root, path); + btrfs_release_path(path); } spin_lock(&root->fs_info->block_group_cache_lock); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..f47e43d855aa 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, u32 item_size; if (item) - btrfs_release_path(root, path); + btrfs_release_path(path); item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, disk_bytenr, 0); if (IS_ERR(item)) { @@ -213,7 +213,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, (unsigned long long)offset); } item = NULL; - btrfs_release_path(root, path); + btrfs_release_path(path); goto found; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, @@ -631,7 +631,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, if (key.offset < bytenr) break; } - btrfs_release_path(root, path); + btrfs_release_path(path); } out: btrfs_free_path(path); @@ -722,7 +722,7 @@ again: * at this point, we know the tree has an item, but it isn't big * enough yet to put our csum in. Grow it */ - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &file_key, path, csum_size, 1); if (ret < 0) @@ -766,7 +766,7 @@ again: } insert: - btrfs_release_path(root, path); + btrfs_release_path(path); csum_offset = 0; if (found_next) { u64 tmp = total_bytes + root->sectorsize; @@ -850,7 +850,7 @@ next_sector: } btrfs_mark_buffer_dirty(path->nodes[0]); if (total_bytes < sums->len) { - btrfs_release_path(root, path); + btrfs_release_path(path); cond_resched(); goto again; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 80eabe85409a..566bdf298ea8 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -376,7 +376,7 @@ next_slot: search_start = max(key.offset, start); if (recow) { - btrfs_release_path(root, path); + btrfs_release_path(path); continue; } @@ -393,7 +393,7 @@ next_slot: ret = btrfs_duplicate_item(trans, root, path, &new_key); if (ret == -EAGAIN) { - btrfs_release_path(root, path); + btrfs_release_path(path); continue; } if (ret < 0) @@ -516,7 +516,7 @@ next_slot: del_nr = 0; del_slot = 0; - btrfs_release_path(root, path); + btrfs_release_path(path); continue; } @@ -681,7 +681,7 @@ again: new_key.offset = split; ret = btrfs_duplicate_item(trans, root, path, &new_key); if (ret == -EAGAIN) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } BUG_ON(ret < 0); @@ -721,7 +721,7 @@ again: inode->i_ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } extent_end = other_end; @@ -738,7 +738,7 @@ again: inode->i_ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } key.offset = other_start; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d06abe20a729..48fafcb85b0e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -61,7 +61,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, if (ret < 0) return ERR_PTR(ret); if (ret > 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return ERR_PTR(-ENOENT); } @@ -70,7 +70,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, struct btrfs_free_space_header); btrfs_free_space_key(leaf, header, &disk_key); btrfs_disk_key_to_cpu(&location, &disk_key); - btrfs_release_path(root, path); + btrfs_release_path(path); inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); if (!inode) @@ -134,7 +134,7 @@ int create_free_space_inode(struct btrfs_root *root, btrfs_set_inode_block_group(leaf, inode_item, block_group->key.objectid); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); key.objectid = BTRFS_FREE_SPACE_OBJECTID; key.offset = block_group->key.objectid; @@ -143,7 +143,7 @@ int create_free_space_inode(struct btrfs_root *root, ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(struct btrfs_free_space_header)); if (ret < 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } leaf = path->nodes[0]; @@ -152,7 +152,7 @@ int create_free_space_inode(struct btrfs_root *root, memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); btrfs_set_free_space_key(leaf, header, &disk_key); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } @@ -822,7 +822,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); - btrfs_release_path(root, path); + btrfs_release_path(path); goto out_free; } } @@ -832,7 +832,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, btrfs_set_free_space_bitmaps(leaf, header, bitmaps); btrfs_set_free_space_generation(leaf, header, trans->transid); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = 1; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 26f4d56cf049..2840989737b7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1164,7 +1164,7 @@ out_check: goto next_slot; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, found_key.offset - 1, page_started, @@ -1222,7 +1222,7 @@ out_check: if (cur_offset > end) break; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (cur_offset <= end && cow_start == (u64)-1) cow_start = cur_offset; @@ -2346,7 +2346,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) break; /* release the path since we're done with it */ - btrfs_release_path(root, path); + btrfs_release_path(path); /* * this is where we are basically btrfs_lookup, without the @@ -2712,7 +2712,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto err; - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_del_inode_ref(trans, root, name, name_len, inode->i_ino, @@ -2735,7 +2735,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; } ret = btrfs_delete_one_dir_name(trans, root, path, di); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, dir->i_ino); @@ -2862,7 +2862,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, } else { check_link = 0; } - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 0); @@ -2876,7 +2876,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, } else { check_link = 0; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (ret == 0 && S_ISREG(inode->i_mode)) { ret = btrfs_lookup_file_extent(trans, root, path, @@ -2888,7 +2888,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, BUG_ON(ret == 0); if (check_path_shared(root, path)) goto out; - btrfs_release_path(root, path); + btrfs_release_path(path); } if (!check_link) { @@ -2909,7 +2909,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, err = 0; goto out; } - btrfs_release_path(root, path); + btrfs_release_path(path); ref = btrfs_lookup_inode_ref(trans, root, path, dentry->d_name.name, dentry->d_name.len, @@ -2922,7 +2922,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, if (check_path_shared(root, path)) goto out; index = btrfs_inode_ref_index(path->nodes[0], ref); - btrfs_release_path(root, path); + btrfs_release_path(path); di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, dentry->d_name.name, dentry->d_name.len, 0); @@ -3013,7 +3013,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); ret = btrfs_delete_one_dir_name(trans, root, path, di); BUG_ON(ret); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, @@ -3026,7 +3026,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(root, path); + btrfs_release_path(path); index = key.offset; } @@ -3039,7 +3039,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); ret = btrfs_delete_one_dir_name(trans, root, path, di); BUG_ON(ret); - btrfs_release_path(root, path); + btrfs_release_path(path); btrfs_i_size_write(dir, dir->i_size - name_len * 2); dir->i_mtime = dir->i_ctime = CURRENT_TIME; @@ -3477,7 +3477,7 @@ delete: BUG_ON(ret); pending_del_nr = 0; } - btrfs_release_path(root, path); + btrfs_release_path(path); goto search_again; } else { path->slots[0]--; @@ -3899,7 +3899,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, if (ret) goto out; - btrfs_release_path(root->fs_info->tree_root, path); + btrfs_release_path(path); new_root = btrfs_read_fs_root_no_name(root->fs_info, location); if (IS_ERR(new_root)) { @@ -5223,7 +5223,7 @@ again: kunmap(page); free_extent_map(em); em = NULL; - btrfs_release_path(root, path); + btrfs_release_path(path); trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return ERR_CAST(trans); @@ -5249,7 +5249,7 @@ not_found_em: em->block_start = EXTENT_MAP_HOLE; set_bit(EXTENT_FLAG_VACANCY, &em->flags); insert: - btrfs_release_path(root, path); + btrfs_release_path(path); if (em->start > start || extent_map_end(em) <= start) { printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " "[%llu %llu]\n", (unsigned long long)em->start, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ffb48d6c5433..d11fc6548e15 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1396,7 +1396,7 @@ static noinline int search_ioctl(struct inode *inode, } ret = copy_to_sk(root, path, &key, sk, args->buf, &sk_offset, &num_found); - btrfs_release_path(root, path); + btrfs_release_path(path); if (ret || num_found >= sk->nr_items) break; @@ -1503,7 +1503,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, if (key.offset == BTRFS_FIRST_FREE_OBJECTID) break; - btrfs_release_path(root, path); + btrfs_release_path(path); key.objectid = key.offset; key.offset = (u64)-1; dirid = key.objectid; @@ -1982,7 +1982,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, datal = btrfs_file_extent_ram_bytes(leaf, extent); } - btrfs_release_path(root, path); + btrfs_release_path(path); if (key.offset + datal <= off || key.offset >= off+len) @@ -2092,7 +2092,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, } btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); inode->i_mtime = inode->i_ctime = CURRENT_TIME; @@ -2113,12 +2113,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_end_transaction(trans, root); } next: - btrfs_release_path(root, path); + btrfs_release_path(path); key.offset++; } ret = 0; out: - btrfs_release_path(root, path); + btrfs_release_path(path); unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); out_unlock: mutex_unlock(&src->i_mutex); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 2097a88f60aa..f7b799b151aa 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -961,7 +961,7 @@ again: lower = upper; upper = NULL; } - btrfs_release_path(root, path2); + btrfs_release_path(path2); next: if (ptr < end) { ptr += btrfs_extent_inline_ref_size(key.type); @@ -974,7 +974,7 @@ next: if (ptr >= end) path1->slots[0]++; } - btrfs_release_path(rc->extent_root, path1); + btrfs_release_path(path1); cur->checked = 1; WARN_ON(exist); @@ -1749,7 +1749,7 @@ again: btrfs_node_key_to_cpu(path->nodes[level], &key, path->slots[level]); - btrfs_release_path(src, path); + btrfs_release_path(path); path->lowest_level = level; ret = btrfs_search_slot(trans, src, &key, path, 0, 1); @@ -2496,7 +2496,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, path->locks[upper->level] = 0; slot = path->slots[upper->level]; - btrfs_release_path(NULL, path); + btrfs_release_path(path); } else { ret = btrfs_bin_search(upper->eb, key, upper->level, &slot); @@ -2737,7 +2737,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, } else { path->lowest_level = node->level; ret = btrfs_search_slot(trans, root, key, path, 0, 1); - btrfs_release_path(root, path); + btrfs_release_path(path); if (ret > 0) ret = 0; } @@ -3119,7 +3119,7 @@ static int add_tree_block(struct reloc_control *rc, #endif } - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); BUG_ON(level == -1); @@ -3505,7 +3505,7 @@ int add_data_references(struct reloc_control *rc, } path->slots[0]++; } - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); if (err) free_block_list(blocks); return err; @@ -3568,7 +3568,7 @@ next: EXTENT_DIRTY); if (ret == 0 && start <= key.objectid) { - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); rc->search_start = end + 1; } else { rc->search_start = key.objectid + key.offset; @@ -3576,7 +3576,7 @@ next: return 0; } } - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); return ret; } @@ -3713,7 +3713,7 @@ restart: flags = BTRFS_EXTENT_FLAG_DATA; if (path_change) { - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); path->search_commit_root = 1; path->skip_locking = 1; @@ -3736,7 +3736,7 @@ restart: (flags & BTRFS_EXTENT_FLAG_DATA)) { ret = add_data_references(rc, &key, path, &blocks); } else { - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); ret = 0; } if (ret < 0) { @@ -3799,7 +3799,7 @@ restart: } } - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, GFP_NOFS); @@ -3867,7 +3867,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); out: btrfs_free_path(path); return ret; @@ -4109,7 +4109,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) } leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(root->fs_info->tree_root, path); + btrfs_release_path(path); if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || key.type != BTRFS_ROOT_ITEM_KEY) @@ -4141,7 +4141,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) key.offset--; } - btrfs_release_path(root->fs_info->tree_root, path); + btrfs_release_path(path); if (list_empty(&reloc_roots)) goto out; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6928bff62daa..59a94c1d9815 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -57,7 +57,7 @@ again: btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]); if (search_key.type != BTRFS_ROOT_ITEM_KEY) { search_key.offset++; - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } ret = 0; @@ -230,7 +230,7 @@ again: memcpy(&found_key, &key, sizeof(key)); key.offset++; - btrfs_release_path(root, path); + btrfs_release_path(path); dead_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, &found_key); @@ -292,7 +292,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(tree_root, path); + btrfs_release_path(path); if (key.objectid != BTRFS_ORPHAN_OBJECTID || key.type != BTRFS_ORPHAN_ITEM_KEY) @@ -390,7 +390,7 @@ again: err = -ENOENT; if (key.type == BTRFS_ROOT_BACKREF_KEY) { - btrfs_release_path(tree_root, path); + btrfs_release_path(path); key.objectid = ref_id; key.type = BTRFS_ROOT_REF_KEY; key.offset = root_id; @@ -463,7 +463,7 @@ again: btrfs_mark_buffer_dirty(leaf); if (key.type == BTRFS_ROOT_BACKREF_KEY) { - btrfs_release_path(tree_root, path); + btrfs_release_path(path); key.objectid = ref_id; key.type = BTRFS_ROOT_REF_KEY; key.offset = root_id; diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 992ab425599d..3b580ee8ab1d 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, ret = 0; goto out; } - btrfs_release_path(root, path); + btrfs_release_path(path); wret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (wret < 0) { diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d5313d63f967..c599e8c2a53c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -333,13 +333,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, goto insert; if (item_size == 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); if (!dst_copy || !src_copy) { - btrfs_release_path(root, path); + btrfs_release_path(path); kfree(dst_copy); kfree(src_copy); return -ENOMEM; @@ -361,13 +361,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, * sync */ if (ret == 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } } insert: - btrfs_release_path(root, path); + btrfs_release_path(path); /* try to insert the key into the destination tree */ ret = btrfs_insert_empty_item(trans, root, path, key, item_size); @@ -438,7 +438,7 @@ insert: } no_copy: btrfs_mark_buffer_dirty(path->nodes[0]); - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } @@ -544,11 +544,11 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, * we don't have to do anything */ if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto out; } } - btrfs_release_path(root, path); + btrfs_release_path(path); saved_nbytes = inode_get_bytes(inode); /* drop any overlapping extents */ @@ -600,7 +600,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, key->objectid, offset, &ins); BUG_ON(ret); } - btrfs_release_path(root, path); + btrfs_release_path(path); if (btrfs_file_extent_compression(eb, item)) { csum_start = ins.objectid; @@ -629,7 +629,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, kfree(sums); } } else { - btrfs_release_path(root, path); + btrfs_release_path(path); } } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { /* inline extents are easy, we just overwrite them */ @@ -675,7 +675,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, return -ENOMEM; read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); - btrfs_release_path(root, path); + btrfs_release_path(path); inode = read_one_inode(root, location.objectid); BUG_ON(!inode); @@ -713,7 +713,7 @@ static noinline int inode_in_dir(struct btrfs_root *root, goto out; } else goto out; - btrfs_release_path(root, path); + btrfs_release_path(path); di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); if (di && !IS_ERR(di)) { @@ -724,7 +724,7 @@ static noinline int inode_in_dir(struct btrfs_root *root, goto out; match = 1; out: - btrfs_release_path(root, path); + btrfs_release_path(path); return match; } @@ -884,7 +884,7 @@ again: if (!backref_in_log(log, key, victim_name, victim_name_len)) { btrfs_inc_nlink(inode); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_unlink_inode(trans, root, dir, inode, victim_name, @@ -901,7 +901,7 @@ again: */ search_done = 1; } - btrfs_release_path(root, path); + btrfs_release_path(path); insert: /* insert our name */ @@ -922,7 +922,7 @@ out: BUG_ON(ret); out_nowrite: - btrfs_release_path(root, path); + btrfs_release_path(path); iput(dir); iput(inode); return 0; @@ -999,9 +999,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, if (key.offset == 0) break; key.offset--; - btrfs_release_path(root, path); + btrfs_release_path(path); } - btrfs_release_path(root, path); + btrfs_release_path(path); if (nlink != inode->i_nlink) { inode->i_nlink = nlink; btrfs_update_inode(trans, root, inode); @@ -1052,7 +1052,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, root, path); BUG_ON(ret); - btrfs_release_path(root, path); + btrfs_release_path(path); inode = read_one_inode(root, key.offset); BUG_ON(!inode); @@ -1068,7 +1068,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, */ key.offset = (u64)-1; } - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } @@ -1096,7 +1096,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_item(trans, root, path, &key, 0); - btrfs_release_path(root, path); + btrfs_release_path(path); if (ret == 0) { btrfs_inc_nlink(inode); btrfs_update_inode(trans, root, inode); @@ -1192,7 +1192,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, exists = 1; else exists = 0; - btrfs_release_path(root, path); + btrfs_release_path(path); if (key->type == BTRFS_DIR_ITEM_KEY) { dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, @@ -1236,13 +1236,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, if (key->type == BTRFS_DIR_INDEX_KEY) goto insert; out: - btrfs_release_path(root, path); + btrfs_release_path(path); kfree(name); iput(dir); return 0; insert: - btrfs_release_path(root, path); + btrfs_release_path(path); ret = insert_one_name(trans, root, path, key->objectid, key->offset, name, name_len, log_type, &log_key); @@ -1363,7 +1363,7 @@ next: *end_ret = found_end; ret = 0; out: - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } @@ -1428,8 +1428,8 @@ again: } if (IS_ERR_OR_NULL(log_di)) { btrfs_dir_item_key_to_cpu(eb, di, &location); - btrfs_release_path(root, path); - btrfs_release_path(log, log_path); + btrfs_release_path(path); + btrfs_release_path(log_path); inode = read_one_inode(root, location.objectid); BUG_ON(!inode); @@ -1453,7 +1453,7 @@ again: ret = 0; goto out; } - btrfs_release_path(log, log_path); + btrfs_release_path(log_path); kfree(name); ptr = (unsigned long)(di + 1); @@ -1461,8 +1461,8 @@ again: } ret = 0; out: - btrfs_release_path(root, path); - btrfs_release_path(log, log_path); + btrfs_release_path(path); + btrfs_release_path(log_path); return ret; } @@ -1550,7 +1550,7 @@ again: break; dir_key.offset = found_key.offset + 1; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (range_end == (u64)-1) break; range_start = range_end + 1; @@ -1561,11 +1561,11 @@ next_type: if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { key_type = BTRFS_DIR_LOG_INDEX_KEY; dir_key.type = BTRFS_DIR_INDEX_KEY; - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } out: - btrfs_release_path(root, path); + btrfs_release_path(path); btrfs_free_path(log_path); iput(dir); return ret; @@ -2225,7 +2225,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, bytes_del += name_len; BUG_ON(ret); } - btrfs_release_path(log, path); + btrfs_release_path(path); di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, index, name, name_len, -1); if (IS_ERR(di)) { @@ -2247,7 +2247,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, key.objectid = dir->i_ino; key.offset = 0; key.type = BTRFS_INODE_ITEM_KEY; - btrfs_release_path(log, path); + btrfs_release_path(path); ret = btrfs_search_slot(trans, log, &key, path, 0, 1); if (ret < 0) { @@ -2269,7 +2269,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(path->nodes[0]); } else ret = 0; - btrfs_release_path(log, path); + btrfs_release_path(path); } fail: btrfs_free_path(path); @@ -2344,7 +2344,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, struct btrfs_dir_log_item); btrfs_set_dir_log_end(path->nodes[0], item, last_offset); btrfs_mark_buffer_dirty(path->nodes[0]); - btrfs_release_path(log, path); + btrfs_release_path(path); return 0; } @@ -2393,10 +2393,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, min_key.objectid = inode->i_ino; min_key.type = key_type; min_key.offset = (u64)-1; - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); if (ret < 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } ret = btrfs_previous_item(root, path, inode->i_ino, key_type); @@ -2432,7 +2432,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, } } } - btrfs_release_path(root, path); + btrfs_release_path(path); /* find the first key from this transaction again */ ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); @@ -2490,8 +2490,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, } } done: - btrfs_release_path(root, path); - btrfs_release_path(log, dst_path); + btrfs_release_path(path); + btrfs_release_path(dst_path); if (err == 0) { *last_offset_ret = last_offset; @@ -2588,9 +2588,9 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, log, path); BUG_ON(ret); - btrfs_release_path(log, path); + btrfs_release_path(path); } - btrfs_release_path(log, path); + btrfs_release_path(path); return ret; } @@ -2696,7 +2696,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, } btrfs_mark_buffer_dirty(dst_path->nodes[0]); - btrfs_release_path(log, dst_path); + btrfs_release_path(dst_path); kfree(ins_data); /* @@ -2845,7 +2845,7 @@ next_slot: } ins_nr = 0; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (min_key.offset < (u64)-1) min_key.offset++; @@ -2868,8 +2868,8 @@ next_slot: } WARN_ON(ins_nr); if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { - btrfs_release_path(root, path); - btrfs_release_path(log, dst_path); + btrfs_release_path(path); + btrfs_release_path(dst_path); ret = log_directory_changes(trans, root, inode, path, dst_path); if (ret) { err = ret; @@ -3136,7 +3136,7 @@ again: } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); - btrfs_release_path(log_root_tree, path); + btrfs_release_path(path); if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) break; @@ -3171,7 +3171,7 @@ again: if (found_key.offset == 0) break; } - btrfs_release_path(log_root_tree, path); + btrfs_release_path(path); /* step one is to pin it all, step two is to replay just inodes */ if (wc.pin) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 76acd1d235e4..e21130d3f98a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1475,7 +1475,7 @@ next_slot: goto error; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(root, path); + btrfs_release_path(path); continue; } @@ -1947,7 +1947,7 @@ again: chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk); chunk_type = btrfs_chunk_type(leaf, chunk); - btrfs_release_path(chunk_root, path); + btrfs_release_path(path); if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_relocate_chunk(chunk_root, chunk_tree, @@ -2065,7 +2065,7 @@ int btrfs_balance(struct btrfs_root *dev_root) if (found_key.offset == 0) break; - btrfs_release_path(chunk_root, path); + btrfs_release_path(path); ret = btrfs_relocate_chunk(chunk_root, chunk_root->root_key.objectid, found_key.objectid, @@ -2137,7 +2137,7 @@ again: goto done; if (ret) { ret = 0; - btrfs_release_path(root, path); + btrfs_release_path(path); break; } @@ -2146,7 +2146,7 @@ again: btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) { - btrfs_release_path(root, path); + btrfs_release_path(path); break; } @@ -2154,14 +2154,14 @@ again: length = btrfs_dev_extent_length(l, dev_extent); if (key.offset + length <= new_size) { - btrfs_release_path(root, path); + btrfs_release_path(path); break; } chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, chunk_offset); @@ -3813,7 +3813,7 @@ again: } if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { key.objectid = 0; - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } ret = 0; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index cfd660550ded..4ca88d1e18e2 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -120,13 +120,13 @@ static int do_setxattr(struct btrfs_trans_handle *trans, ret = btrfs_delete_one_dir_name(trans, root, path, di); BUG_ON(ret); - btrfs_release_path(root, path); + btrfs_release_path(path); /* if we don't have a value then we are removing the xattr */ if (!value) goto out; } else { - btrfs_release_path(root, path); + btrfs_release_path(path); if (flags & XATTR_REPLACE) { /* we couldn't find the attr to replace */ -- cgit v1.2.2 From 8cc33e5c19bf01c7617608669be8c1b4f663eb2f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 2 May 2011 15:29:25 +0200 Subject: btrfs: Document a mutex lock/unlock sequence --- fs/btrfs/extent-tree.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a160f11465f8..fba1348cb2a0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -756,6 +756,10 @@ again: btrfs_release_path(path); + /* + * Mutex was contended, block until it's released and try + * again + */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); @@ -2297,6 +2301,10 @@ again: atomic_inc(&ref->refs); spin_unlock(&delayed_refs->lock); + /* + * Mutex was contended, block until it's + * released and try again + */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); @@ -2363,6 +2371,10 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, btrfs_release_path(path); + /* + * Mutex was contended, block until it's released and let + * caller try again + */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); -- cgit v1.2.2 From 621496f4fd56195b7b273521f467c2945165481f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 4 May 2011 12:56:49 +0200 Subject: btrfs: remove unused function prototypes function prototypes without a body Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 12 ------------ fs/btrfs/delayed-ref.h | 5 ----- fs/btrfs/disk-io.h | 11 ----------- fs/btrfs/extent_io.h | 9 --------- fs/btrfs/transaction.h | 3 --- fs/btrfs/tree-log.h | 1 - fs/btrfs/volumes.h | 2 -- 7 files changed, 43 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3f301f05099d..b66216e636c2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2108,12 +2108,9 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, u64 num_bytes, u64 *refs, u64 *flags); int btrfs_pin_extent(struct btrfs_root *root, u64 bytenr, u64 num, int reserved); -int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct extent_buffer *leaf); int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 offset, u64 bytenr); -int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy); struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr); @@ -2463,15 +2460,10 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 file_start, int contig); -int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, - u64 start, unsigned long len); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, int cow); -int btrfs_csum_truncate(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - u64 isize); int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list); /* inode.c */ @@ -2520,7 +2512,6 @@ unsigned long btrfs_force_ra(struct address_space *mapping, int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); int btrfs_readpage(struct file *file, struct page *page); void btrfs_evict_inode(struct inode *inode); -void btrfs_put_inode(struct inode *inode); int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); void btrfs_dirty_inode(struct inode *inode); struct inode *btrfs_alloc_inode(struct super_block *sb); @@ -2531,8 +2522,6 @@ void btrfs_destroy_cachep(void); long btrfs_ioctl_trans_end(struct file *file); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct btrfs_root *root, int *was_new); -int btrfs_commit_write(struct file *file, struct page *page, - unsigned from, unsigned to); struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 end, int create); @@ -2571,7 +2560,6 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); int btrfs_sync_file(struct file *file, int datasync); int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); -int btrfs_check_file(struct btrfs_root *root, struct inode *inode); extern const struct file_operations btrfs_file_operations; int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, u64 start, u64 end, u64 *hint_byte, int drop_cache); diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 50e3cf92fbda..946ed71ab84f 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -167,11 +167,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); -int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, u64 orig_parent, - u64 parent, u64 orig_ref_root, u64 ref_root, - u64 orig_ref_generation, u64 ref_generation, - u64 owner_objectid, int pin); int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head); int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 07b20dc2fd95..758f3ca614ee 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -65,25 +65,14 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, struct btrfs_key *location); int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); -int btrfs_insert_dev_radix(struct btrfs_root *root, - struct block_device *bdev, - u64 device_id, - u64 block_start, - u64 num_blocks); void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_mark_buffer_dirty(struct extent_buffer *buf); -void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf); int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); int btrfs_set_buffer_uptodate(struct extent_buffer *buf); -int wait_on_tree_block_writeback(struct btrfs_root *root, - struct extent_buffer *buf); int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len); void btrfs_csum_final(u32 crc, char *result); -int btrfs_open_device(struct btrfs_device *dev); -int btrfs_verify_block_csum(struct btrfs_root *root, - struct extent_buffer *buf); int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int metadata); int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 3c3be74c934e..d1c5a57c9984 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -215,14 +215,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); -int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask); -int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, - u64 end, gfp_t mask); int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask); -int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask); int find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, int bits); struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, @@ -298,8 +292,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); int set_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); -int test_extent_buffer_dirty(struct extent_io_tree *tree, - struct extent_buffer *eb); int set_extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb); int clear_extent_buffer_uptodate(struct extent_io_tree *tree, @@ -317,7 +309,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, unsigned long *map_start, unsigned long *map_len, int km); void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); -int release_extent_buffer_tail_pages(struct extent_buffer *eb); int extent_range_uptodate(struct extent_io_tree *tree, u64 start, u64 end); int extent_clear_unlock_delalloc(struct inode *inode, diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e441acc6c584..000a41008c3b 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); -int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, - struct btrfs_root *root); int btrfs_add_dead_root(struct btrfs_root *root); -int btrfs_drop_dead_root(struct btrfs_root *root); int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); int btrfs_clean_old_snapshots(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index 3dfae84c8cc8..2270ac58d746 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h @@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, struct inode *inode, u64 dirid); -int btrfs_join_running_log_trans(struct btrfs_root *root); int btrfs_end_log_trans(struct btrfs_root *root); int btrfs_pin_log_trans(struct btrfs_root *root); int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..036b276b4860 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -209,8 +209,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans, int btrfs_rm_device(struct btrfs_root *root, char *device_path); int btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); -int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, - u64 logical, struct page *page); int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size); struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, -- cgit v1.2.2 From f2a97a9dbd86eb1ef956bdf20e05c507b32beb96 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 5 May 2011 12:44:41 +0200 Subject: btrfs: remove all unused functions Remove static and global declarations and/or definitions. Reduces size of btrfs.ko by ~3.4kB. text data bss dec hex filename 402081 7464 200 409745 64091 btrfs.ko.base 398620 7144 200 405964 631cc btrfs.ko.remove-all Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 78 --------------- fs/btrfs/delayed-ref.c | 38 -------- fs/btrfs/delayed-ref.h | 1 - fs/btrfs/disk-io.c | 27 ------ fs/btrfs/disk-io.h | 7 -- fs/btrfs/extent_io.c | 227 -------------------------------------------- fs/btrfs/extent_io.h | 21 ---- fs/btrfs/free-space-cache.c | 15 --- fs/btrfs/free-space-cache.h | 1 - fs/btrfs/inode.c | 52 ---------- fs/btrfs/locking.c | 25 ----- fs/btrfs/locking.h | 2 - fs/btrfs/ref-cache.c | 164 -------------------------------- fs/btrfs/ref-cache.h | 24 ----- fs/btrfs/relocation.c | 2 +- fs/btrfs/root-tree.c | 47 --------- fs/btrfs/sysfs.c | 65 ------------- fs/btrfs/volumes.c | 19 ---- fs/btrfs/volumes.h | 3 - 19 files changed, 1 insertion(+), 817 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b66216e636c2..e37d441617d2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1440,26 +1440,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); } -static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb, - struct btrfs_chunk *c, int nr, - u64 val) -{ - btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val); -} - static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, struct btrfs_chunk *c, int nr) { return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); } -static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb, - struct btrfs_chunk *c, int nr, - u64 val) -{ - btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val); -} - /* struct btrfs_block_group_item */ BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, used, 64); @@ -1517,14 +1503,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item) return (struct btrfs_timespec *)ptr; } -static inline struct btrfs_timespec * -btrfs_inode_otime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, otime); - return (struct btrfs_timespec *)ptr; -} - BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); @@ -1875,33 +1853,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) return (u8 *)ptr; } -static inline u8 *btrfs_super_fsid(struct extent_buffer *eb) -{ - unsigned long ptr = offsetof(struct btrfs_super_block, fsid); - return (u8 *)ptr; -} - -static inline u8 *btrfs_header_csum(struct extent_buffer *eb) -{ - unsigned long ptr = offsetof(struct btrfs_header, csum); - return (u8 *)ptr; -} - -static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb) -{ - return NULL; -} - -static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb) -{ - return NULL; -} - -static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb) -{ - return NULL; -} - static inline int btrfs_is_leaf(struct extent_buffer *eb) { return btrfs_header_level(eb) == 0; @@ -2055,22 +2006,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb) return sb->s_fs_info; } -static inline int btrfs_set_root_name(struct btrfs_root *root, - const char *name, int len) -{ - /* if we already have a name just free it */ - kfree(root->name); - - root->name = kmalloc(len+1, GFP_KERNEL); - if (!root->name) - return -ENOMEM; - - memcpy(root->name, name, len); - root->name[len] = '\0'; - - return 0; -} - static inline u32 btrfs_level_size(struct btrfs_root *root, int level) { if (level == 0) @@ -2304,11 +2239,6 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, void *data, u32 data_size); -int btrfs_insert_some_items(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - int nr); int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -2354,8 +2284,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *item); int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct btrfs_root_item *item, struct btrfs_key *key); -int btrfs_search_root(struct btrfs_root *root, u64 search_start, - u64 *found_objectid); int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); int btrfs_find_orphan_roots(struct btrfs_root *tree_root); int btrfs_set_root_node(struct btrfs_root_item *item, @@ -2494,8 +2422,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u32 min_type); int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); -int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, - int sync); int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, struct extent_state **cached_state); int btrfs_writepages(struct address_space *mapping, @@ -2579,10 +2505,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, /* sysfs.c */ int btrfs_init_sysfs(void); void btrfs_exit_sysfs(void); -int btrfs_sysfs_add_super(struct btrfs_fs_info *fs); -int btrfs_sysfs_add_root(struct btrfs_root *root); -void btrfs_sysfs_del_root(struct btrfs_root *root); -void btrfs_sysfs_del_super(struct btrfs_fs_info *root); /* xattr.c */ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index bce28f653899..cb9b9a431fc9 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -280,44 +280,6 @@ again: return 1; } -/* - * This checks to see if there are any delayed refs in the - * btree for a given bytenr. It returns one if it finds any - * and zero otherwise. - * - * If it only finds a head node, it returns 0. - * - * The idea is to use this when deciding if you can safely delete an - * extent from the extent allocation tree. There may be a pending - * ref in the rbtree that adds or removes references, so as long as this - * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent - * allocation tree. - */ -int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr) -{ - struct btrfs_delayed_ref_node *ref; - struct btrfs_delayed_ref_root *delayed_refs; - struct rb_node *prev_node; - int ret = 0; - - delayed_refs = &trans->transaction->delayed_refs; - spin_lock(&delayed_refs->lock); - - ref = find_ref_head(&delayed_refs->root, bytenr, NULL); - if (ref) { - prev_node = rb_prev(&ref->rb_node); - if (!prev_node) - goto out; - ref = rb_entry(prev_node, struct btrfs_delayed_ref_node, - rb_node); - if (ref->bytenr == bytenr) - ret = 1; - } -out: - spin_unlock(&delayed_refs->lock); - return ret; -} - /* * helper function to update an extent delayed ref in the * rbtree. existing and update must both have the same diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 946ed71ab84f..e287e3b0eab0 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -166,7 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); -int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head); int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4084959b36fd..fa287c551ffc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -650,12 +650,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) return 256 * limit; } -int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) -{ - return atomic_read(&info->nr_async_bios) > - btrfs_async_submit_limit(info); -} - static void run_one_async_start(struct btrfs_work *work) { struct async_submit_bio *async; @@ -1283,21 +1277,6 @@ out: return root; } -struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, - u64 root_objectid) -{ - struct btrfs_root *root; - - if (root_objectid == BTRFS_ROOT_TREE_OBJECTID) - return fs_info->tree_root; - if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID) - return fs_info->extent_root; - - root = radix_tree_lookup(&fs_info->fs_roots_radix, - (unsigned long)root_objectid); - return root; -} - struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, struct btrfs_key *location) { @@ -1369,11 +1348,6 @@ fail: return ERR_PTR(ret); } -struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, - struct btrfs_key *location, - const char *name, int namelen) -{ - return btrfs_read_fs_root_no_name(fs_info, location); #if 0 struct btrfs_root *root; int ret; @@ -1402,7 +1376,6 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, root->in_sysfs = 1; return root; #endif -} static int btrfs_congested_fn(void *congested_data, int bdi_bits) { diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 758f3ca614ee..2d75f9e896f6 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -55,11 +55,6 @@ int btrfs_commit_super(struct btrfs_root *root); int btrfs_error_commit_super(struct btrfs_root *root); struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); -struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, - u64 root_objectid); -struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, - struct btrfs_key *location, - const char *name, int namelen); struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, struct btrfs_key *location); struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, @@ -80,8 +75,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, unsigned long bio_flags, u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done); - -int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); int btrfs_write_tree_block(struct extent_buffer *buf); int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9369289ce771..91208296ff2b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -941,13 +941,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, NULL, mask); } -static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) -{ - return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, - NULL, mask); -} - int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask) { @@ -963,11 +956,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, cached_state, mask); } -int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) -{ - return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); -} - /* * either insert or lock state struct between start and end use mask to tell * us if waiting is desired. @@ -1027,25 +1015,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) mask); } -/* - * helper function to set pages and extents in the tree dirty - */ -int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end) -{ - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; - struct page *page; - - while (index <= end_index) { - page = find_get_page(tree->mapping, index); - BUG_ON(!page); - __set_page_dirty_nobuffers(page); - page_cache_release(page); - index++; - } - return 0; -} - /* * helper function to set both pages and extents in the tree writeback */ @@ -1819,46 +1788,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err) bio_put(bio); } -/* - * IO done from prepare_write is pretty simple, we just unlock - * the structs in the extent tree when done, and set the uptodate bits - * as appropriate. - */ -static void end_bio_extent_preparewrite(struct bio *bio, int err) -{ - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_io_tree *tree; - u64 start; - u64 end; - - do { - struct page *page = bvec->bv_page; - struct extent_state *cached = NULL; - tree = &BTRFS_I(page->mapping->host)->io_tree; - - start = ((u64)page->index << PAGE_CACHE_SHIFT) + - bvec->bv_offset; - end = start + bvec->bv_len - 1; - - if (--bvec >= bio->bi_io_vec) - prefetchw(&bvec->bv_page->flags); - - if (uptodate) { - set_extent_uptodate(tree, start, end, &cached, - GFP_ATOMIC); - } else { - ClearPageUptodate(page); - SetPageError(page); - } - - unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); - - } while (bvec >= bio->bi_io_vec); - - bio_put(bio); -} - struct bio * btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, gfp_t gfp_flags) @@ -2719,128 +2648,6 @@ int extent_invalidatepage(struct extent_io_tree *tree, return 0; } -/* - * simple commit_write call, set_range_dirty is used to mark both - * the pages and the extent records as dirty - */ -int extent_commit_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to) -{ - loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; - - set_page_extent_mapped(page); - set_page_dirty(page); - - if (pos > inode->i_size) { - i_size_write(inode, pos); - mark_inode_dirty(inode); - } - return 0; -} - -int extent_prepare_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to, get_extent_t *get_extent) -{ - u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; - u64 page_end = page_start + PAGE_CACHE_SIZE - 1; - u64 block_start; - u64 orig_block_start; - u64 block_end; - u64 cur_end; - struct extent_map *em; - unsigned blocksize = 1 << inode->i_blkbits; - size_t pg_offset = 0; - size_t block_off_start; - size_t block_off_end; - int err = 0; - int iocount = 0; - int ret = 0; - int isnew; - - set_page_extent_mapped(page); - - block_start = (page_start + from) & ~((u64)blocksize - 1); - block_end = (page_start + to - 1) | (blocksize - 1); - orig_block_start = block_start; - - lock_extent(tree, page_start, page_end, GFP_NOFS); - while (block_start <= block_end) { - em = get_extent(inode, page, pg_offset, block_start, - block_end - block_start + 1, 1); - if (IS_ERR_OR_NULL(em)) - goto err; - - cur_end = min(block_end, extent_map_end(em) - 1); - block_off_start = block_start & (PAGE_CACHE_SIZE - 1); - block_off_end = block_off_start + blocksize; - isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); - - if (!PageUptodate(page) && isnew && - (block_off_end > to || block_off_start < from)) { - void *kaddr; - - kaddr = kmap_atomic(page, KM_USER0); - if (block_off_end > to) - memset(kaddr + to, 0, block_off_end - to); - if (block_off_start < from) - memset(kaddr + block_off_start, 0, - from - block_off_start); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); - } - if ((em->block_start != EXTENT_MAP_HOLE && - em->block_start != EXTENT_MAP_INLINE) && - !isnew && !PageUptodate(page) && - (block_off_end > to || block_off_start < from) && - !test_range_bit(tree, block_start, cur_end, - EXTENT_UPTODATE, 1, NULL)) { - u64 sector; - u64 extent_offset = block_start - em->start; - size_t iosize; - sector = (em->block_start + extent_offset) >> 9; - iosize = (cur_end - block_start + blocksize) & - ~((u64)blocksize - 1); - /* - * we've already got the extent locked, but we - * need to split the state such that our end_bio - * handler can clear the lock. - */ - set_extent_bit(tree, block_start, - block_start + iosize - 1, - EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS); - ret = submit_extent_page(READ, tree, page, - sector, iosize, pg_offset, em->bdev, - NULL, 1, - end_bio_extent_preparewrite, 0, - 0, 0); - if (ret && !err) - err = ret; - iocount++; - block_start = block_start + iosize; - } else { - struct extent_state *cached = NULL; - - set_extent_uptodate(tree, block_start, cur_end, &cached, - GFP_NOFS); - unlock_extent_cached(tree, block_start, cur_end, - &cached, GFP_NOFS); - block_start = cur_end + 1; - } - pg_offset = block_start & (PAGE_CACHE_SIZE - 1); - free_extent_map(em); - } - if (iocount) { - wait_extent_bit(tree, orig_block_start, - block_end, EXTENT_LOCKED); - } - check_page_uptodate(tree, page); -err: - /* FIXME, zero out newly allocated blocks on error */ - return err; -} - /* * a helper for releasepage, this tests for areas of the page that * are locked or under IO and drops the related state bits if it is safe @@ -2927,33 +2734,6 @@ int try_release_extent_mapping(struct extent_map_tree *map, return try_release_extent_state(map, tree, page, mask); } -sector_t extent_bmap(struct address_space *mapping, sector_t iblock, - get_extent_t *get_extent) -{ - struct inode *inode = mapping->host; - struct extent_state *cached_state = NULL; - u64 start = iblock << inode->i_blkbits; - sector_t sector = 0; - size_t blksize = (1 << inode->i_blkbits); - struct extent_map *em; - - lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, - 0, &cached_state, GFP_NOFS); - em = get_extent(inode, NULL, 0, start, blksize, 0); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, - start + blksize - 1, &cached_state, GFP_NOFS); - if (IS_ERR_OR_NULL(em)) - return 0; - - if (em->block_start > EXTENT_MAP_LAST_BYTE) - goto out; - - sector = (em->block_start + start - em->start) >> inode->i_blkbits; -out: - free_extent_map(em); - return sector; -} - /* * helper function for fiemap, which doesn't want to see any holes. * This maps until we find something past 'last' @@ -3437,13 +3217,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, return 0; } -int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, - struct extent_buffer *eb) -{ - return wait_on_extent_writeback(tree, eb->start, - eb->start + eb->len - 1); -} - int set_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index d1c5a57c9984..4e8445a4757c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -153,15 +153,6 @@ static inline int extent_compress_type(unsigned long bio_flags) struct extent_map_tree; -static inline struct extent_state *extent_state_next(struct extent_state *state) -{ - struct rb_node *node; - node = rb_next(&state->rb_node); - if (!node) - return NULL; - return rb_entry(node, struct extent_state, rb_node); -} - typedef struct extent_map *(get_extent_t)(struct inode *inode, struct page *page, size_t pg_offset, @@ -237,17 +228,8 @@ int extent_readpages(struct extent_io_tree *tree, struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_extent_t get_extent); -int extent_prepare_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to, get_extent_t *get_extent); -int extent_commit_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to); -sector_t extent_bmap(struct address_space *mapping, sector_t iblock, - get_extent_t *get_extent); int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent); -int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end); int set_state_private(struct extent_io_tree *tree, u64 start, u64 private); int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); void set_page_extent_mapped(struct page *page); @@ -284,9 +266,6 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_offset, unsigned long len); void memset_extent_buffer(struct extent_buffer *eb, char c, unsigned long start, unsigned long len); -int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, - struct extent_buffer *eb); -int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end); int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); int clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 48fafcb85b0e..0290b0c7b003 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1685,21 +1685,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) -{ - struct btrfs_free_space *info; - struct rb_node *n; - u64 ret = 0; - - for (n = rb_first(&block_group->free_space_offset); n; - n = rb_next(n)) { - info = rb_entry(n, struct btrfs_free_space, offset_index); - ret += info->bytes; - } - - return ret; -} - /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn't match the block group diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 65c3b935289f..12b2b5165f8a 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -55,7 +55,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_group_cache *block_group, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2840989737b7..57122a5e8473 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7185,58 +7185,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) return 0; } -int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, - int sync) -{ - struct btrfs_inode *binode; - struct inode *inode = NULL; - - spin_lock(&root->fs_info->delalloc_lock); - while (!list_empty(&root->fs_info->delalloc_inodes)) { - binode = list_entry(root->fs_info->delalloc_inodes.next, - struct btrfs_inode, delalloc_inodes); - inode = igrab(&binode->vfs_inode); - if (inode) { - list_move_tail(&binode->delalloc_inodes, - &root->fs_info->delalloc_inodes); - break; - } - - list_del_init(&binode->delalloc_inodes); - cond_resched_lock(&root->fs_info->delalloc_lock); - } - spin_unlock(&root->fs_info->delalloc_lock); - - if (inode) { - if (sync) { - filemap_write_and_wait(inode->i_mapping); - /* - * We have to do this because compression doesn't - * actually set PG_writeback until it submits the pages - * for IO, which happens in an async thread, so we could - * race and not actually wait for any writeback pages - * because they've not been submitted yet. Technically - * this could still be the case for the ordered stuff - * since the async thread may not have started to do its - * work yet. If this becomes the case then we need to - * figure out a way to make sure that in writepage we - * wait for any async pages to be submitted before - * returning so that fdatawait does what its supposed to - * do. - */ - btrfs_wait_ordered_range(inode, 0, (u64)-1); - } else { - filemap_flush(inode->i_mapping); - } - if (delay_iput) - btrfs_add_delayed_iput(inode); - else - iput(inode); - return 1; - } - return 0; -} - static int btrfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 6151f2ea38bb..66fa43dc3f0f 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -185,31 +185,6 @@ sleep: return 0; } -/* - * Very quick trylock, this does not spin or schedule. It returns - * 1 with the spinlock held if it was able to take the lock, or it - * returns zero if it was unable to take the lock. - * - * After this call, scheduling is not safe without first calling - * btrfs_set_lock_blocking() - */ -int btrfs_try_tree_lock(struct extent_buffer *eb) -{ - if (spin_trylock(&eb->lock)) { - if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { - /* - * we've got the spinlock, but the real owner is - * blocking. Drop the spinlock and return failure - */ - spin_unlock(&eb->lock); - return 0; - } - return 1; - } - /* someone else has the spinlock giveup */ - return 0; -} - int btrfs_tree_unlock(struct extent_buffer *eb) { /* diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index 6c4ce457168c..5c33a560a2f1 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -21,8 +21,6 @@ int btrfs_tree_lock(struct extent_buffer *eb); int btrfs_tree_unlock(struct extent_buffer *eb); - -int btrfs_try_tree_lock(struct extent_buffer *eb); int btrfs_try_spin_lock(struct extent_buffer *eb); void btrfs_set_lock_blocking(struct extent_buffer *eb); diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index a97314cf6bd6..82d569cb6267 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c @@ -23,56 +23,6 @@ #include "ref-cache.h" #include "transaction.h" -/* - * leaf refs are used to cache the information about which extents - * a given leaf has references on. This allows us to process that leaf - * in btrfs_drop_snapshot without needing to read it back from disk. - */ - -/* - * kmalloc a leaf reference struct and update the counters for the - * total ref cache size - */ -struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, - int nr_extents) -{ - struct btrfs_leaf_ref *ref; - size_t size = btrfs_leaf_ref_size(nr_extents); - - ref = kmalloc(size, GFP_NOFS); - if (ref) { - spin_lock(&root->fs_info->ref_cache_lock); - root->fs_info->total_ref_cache_size += size; - spin_unlock(&root->fs_info->ref_cache_lock); - - memset(ref, 0, sizeof(*ref)); - atomic_set(&ref->usage, 1); - INIT_LIST_HEAD(&ref->list); - } - return ref; -} - -/* - * free a leaf reference struct and update the counters for the - * total ref cache size - */ -void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) -{ - if (!ref) - return; - WARN_ON(atomic_read(&ref->usage) == 0); - if (atomic_dec_and_test(&ref->usage)) { - size_t size = btrfs_leaf_ref_size(ref->nritems); - - BUG_ON(ref->in_tree); - kfree(ref); - - spin_lock(&root->fs_info->ref_cache_lock); - root->fs_info->total_ref_cache_size -= size; - spin_unlock(&root->fs_info->ref_cache_lock); - } -} - static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, struct rb_node *node) { @@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) } return NULL; } - -int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, - int shared) -{ - struct btrfs_leaf_ref *ref = NULL; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; - - if (shared) - tree = &root->fs_info->shared_ref_tree; - if (!tree) - return 0; - - spin_lock(&tree->lock); - while (!list_empty(&tree->list)) { - ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); - BUG_ON(ref->tree != tree); - if (ref->root_gen > max_root_gen) - break; - if (!xchg(&ref->in_tree, 0)) { - cond_resched_lock(&tree->lock); - continue; - } - - rb_erase(&ref->rb_node, &tree->root); - list_del_init(&ref->list); - - spin_unlock(&tree->lock); - btrfs_free_leaf_ref(root, ref); - cond_resched(); - spin_lock(&tree->lock); - } - spin_unlock(&tree->lock); - return 0; -} - -/* - * find the leaf ref for a given extent. This returns the ref struct with - * a usage reference incremented - */ -struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, - u64 bytenr) -{ - struct rb_node *rb; - struct btrfs_leaf_ref *ref = NULL; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; -again: - if (tree) { - spin_lock(&tree->lock); - rb = tree_search(&tree->root, bytenr); - if (rb) - ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); - if (ref) - atomic_inc(&ref->usage); - spin_unlock(&tree->lock); - if (ref) - return ref; - } - if (tree != &root->fs_info->shared_ref_tree) { - tree = &root->fs_info->shared_ref_tree; - goto again; - } - return NULL; -} - -/* - * add a fully filled in leaf ref struct - * remove all the refs older than a given root generation - */ -int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, - int shared) -{ - int ret = 0; - struct rb_node *rb; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; - - if (shared) - tree = &root->fs_info->shared_ref_tree; - - spin_lock(&tree->lock); - rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node); - if (rb) { - ret = -EEXIST; - } else { - atomic_inc(&ref->usage); - ref->tree = tree; - ref->in_tree = 1; - list_add_tail(&ref->list, &tree->list); - } - spin_unlock(&tree->lock); - return ret; -} - -/* - * remove a single leaf ref from the tree. This drops the ref held by the tree - * only - */ -int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) -{ - struct btrfs_leaf_ref_tree *tree; - - if (!xchg(&ref->in_tree, 0)) - return 0; - - tree = ref->tree; - spin_lock(&tree->lock); - - rb_erase(&ref->rb_node, &tree->root); - list_del_init(&ref->list); - - spin_unlock(&tree->lock); - - btrfs_free_leaf_ref(root, ref); - return 0; -} diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h index e2a55cb2072b..24f7001f6387 100644 --- a/fs/btrfs/ref-cache.h +++ b/fs/btrfs/ref-cache.h @@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents) return sizeof(struct btrfs_leaf_ref) + sizeof(struct btrfs_extent_info) * nr_extents; } - -static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree) -{ - tree->root = RB_ROOT; - INIT_LIST_HEAD(&tree->list); - spin_lock_init(&tree->lock); -} - -static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree) -{ - return RB_EMPTY_ROOT(&tree->root); -} - -void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree); -struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, - int nr_extents); -void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); -struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, - u64 bytenr); -int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, - int shared); -int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, - int shared); -int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); #endif diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f7b799b151aa..f726e72dd362 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -507,6 +507,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans, return 1; } + static int should_ignore_root(struct btrfs_root *root) { struct btrfs_root *reloc_root; @@ -529,7 +530,6 @@ static int should_ignore_root(struct btrfs_root *root) */ return 1; } - /* * find reloc tree by address of tree root */ diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 59a94c1d9815..3bcfe5a7c330 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -21,53 +21,6 @@ #include "disk-io.h" #include "print-tree.h" -/* - * search forward for a root, starting with objectid 'search_start' - * if a root key is found, the objectid we find is filled into 'found_objectid' - * and 0 is returned. < 0 is returned on error, 1 if there is nothing - * left in the tree. - */ -int btrfs_search_root(struct btrfs_root *root, u64 search_start, - u64 *found_objectid) -{ - struct btrfs_path *path; - struct btrfs_key search_key; - int ret; - - root = root->fs_info->tree_root; - search_key.objectid = search_start; - search_key.type = (u8)-1; - search_key.offset = (u64)-1; - - path = btrfs_alloc_path(); - BUG_ON(!path); -again: - ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); - if (ret < 0) - goto out; - if (ret == 0) { - ret = 1; - goto out; - } - if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { - ret = btrfs_next_leaf(root, path); - if (ret) - goto out; - } - btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]); - if (search_key.type != BTRFS_ROOT_ITEM_KEY) { - search_key.offset++; - btrfs_release_path(path); - goto again; - } - ret = 0; - *found_objectid = search_key.objectid; - -out: - btrfs_free_path(path); - return ret; -} - /* * lookup the root with the highest offset for a given objectid. The key we do * find is copied into 'key'. If we find something return 0, otherwise 1, < 0 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 4ce16ef702a3..ab9633fd72a4 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -189,71 +189,6 @@ static struct kobj_type btrfs_super_ktype = { /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; -int btrfs_sysfs_add_super(struct btrfs_fs_info *fs) -{ - int error; - char *name; - char c; - int len = strlen(fs->sb->s_id) + 1; - int i; - - name = kmalloc(len, GFP_NOFS); - if (!name) { - error = -ENOMEM; - goto fail; - } - - for (i = 0; i < len; i++) { - c = fs->sb->s_id[i]; - if (c == '/' || c == '\\') - c = '!'; - name[i] = c; - } - name[len] = '\0'; - - fs->super_kobj.kset = btrfs_kset; - error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype, - NULL, "%s", name); - kfree(name); - if (error) - goto fail; - - return 0; - -fail: - printk(KERN_ERR "btrfs: sysfs creation for super failed\n"); - return error; -} - -int btrfs_sysfs_add_root(struct btrfs_root *root) -{ - int error; - - error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype, - &root->fs_info->super_kobj, - "%s", root->name); - if (error) - goto fail; - - return 0; - -fail: - printk(KERN_ERR "btrfs: sysfs creation for root failed\n"); - return error; -} - -void btrfs_sysfs_del_root(struct btrfs_root *root) -{ - kobject_put(&root->root_kobj); - wait_for_completion(&root->kobj_unregister); -} - -void btrfs_sysfs_del_super(struct btrfs_fs_info *fs) -{ - kobject_put(&fs->super_kobj); - wait_for_completion(&fs->kobj_unregister); -} - int btrfs_init_sysfs(void) { btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e21130d3f98a..cd0b31a9ba3d 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -44,16 +44,6 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root); static DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); -void btrfs_lock_volumes(void) -{ - mutex_lock(&uuid_mutex); -} - -void btrfs_unlock_volumes(void) -{ - mutex_unlock(&uuid_mutex); -} - static void lock_chunks(struct btrfs_root *root) { mutex_lock(&root->fs_info->chunk_mutex); @@ -3688,15 +3678,6 @@ static int read_one_dev(struct btrfs_root *root, return ret; } -int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) -{ - struct btrfs_dev_item *dev_item; - - dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, - dev_item); - return read_one_dev(root, buf, dev_item); -} - int btrfs_read_sys_array(struct btrfs_root *root) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 036b276b4860..5669ae8ea1c9 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -196,7 +196,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree); void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, int mirror_num, int async_submit); -int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder); int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, @@ -216,8 +215,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); int btrfs_init_new_device(struct btrfs_root *root, char *path); int btrfs_balance(struct btrfs_root *dev_root); -void btrfs_unlock_volumes(void); -void btrfs_lock_volumes(void); int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, -- cgit v1.2.2 From 182608c8294b5fe90d7bbd4b026c82bf0a24b736 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 5 May 2011 13:13:16 +0200 Subject: btrfs: remove old unused commented out code Remove code which has been #if0-ed out for a very long time and does not seem to be related to current codebase anymore. Signed-off-by: David Sterba --- fs/btrfs/delayed-ref.c | 76 --- fs/btrfs/disk-io.c | 29 - fs/btrfs/extent-tree.c | 1661 +----------------------------------------------- fs/btrfs/inode.c | 172 ----- fs/btrfs/transaction.c | 134 ---- 5 files changed, 1 insertion(+), 2071 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index cb9b9a431fc9..125cf76fcd08 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -709,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) return btrfs_delayed_node_to_head(ref); return NULL; } - -/* - * add a delayed ref to the tree. This does all of the accounting required - * to make sure the delayed ref is eventually processed before this - * transaction commits. - * - * The main point of this call is to add and remove a backreference in a single - * shot, taking the lock only once, and only searching for the head node once. - * - * It is the same as doing a ref add and delete in two separate calls. - */ -#if 0 -int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, u64 orig_parent, - u64 parent, u64 orig_ref_root, u64 ref_root, - u64 orig_ref_generation, u64 ref_generation, - u64 owner_objectid, int pin) -{ - struct btrfs_delayed_ref *ref; - struct btrfs_delayed_ref *old_ref; - struct btrfs_delayed_ref_head *head_ref; - struct btrfs_delayed_ref_root *delayed_refs; - int ret; - - ref = kmalloc(sizeof(*ref), GFP_NOFS); - if (!ref) - return -ENOMEM; - - old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS); - if (!old_ref) { - kfree(ref); - return -ENOMEM; - } - - /* - * the parent = 0 case comes from cases where we don't actually - * know the parent yet. It will get updated later via a add/drop - * pair. - */ - if (parent == 0) - parent = bytenr; - if (orig_parent == 0) - orig_parent = bytenr; - - head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); - if (!head_ref) { - kfree(ref); - kfree(old_ref); - return -ENOMEM; - } - delayed_refs = &trans->transaction->delayed_refs; - spin_lock(&delayed_refs->lock); - - /* - * insert both the head node and the new ref without dropping - * the spin lock - */ - ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes, - (u64)-1, 0, 0, 0, - BTRFS_UPDATE_DELAYED_HEAD, 0); - BUG_ON(ret); - - ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes, - parent, ref_root, ref_generation, - owner_objectid, BTRFS_ADD_DELAYED_REF, 0); - BUG_ON(ret); - - ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes, - orig_parent, orig_ref_root, - orig_ref_generation, owner_objectid, - BTRFS_DROP_DELAYED_REF, pin); - BUG_ON(ret); - spin_unlock(&delayed_refs->lock); - return 0; -} -#endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fa287c551ffc..de7b4770ab17 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1348,35 +1348,6 @@ fail: return ERR_PTR(ret); } -#if 0 - struct btrfs_root *root; - int ret; - - root = btrfs_read_fs_root_no_name(fs_info, location); - if (!root) - return NULL; - - if (root->in_sysfs) - return root; - - ret = btrfs_set_root_name(root, name, namelen); - if (ret) { - free_extent_buffer(root->node); - kfree(root); - return ERR_PTR(ret); - } - - ret = btrfs_sysfs_add_root(root); - if (ret) { - free_extent_buffer(root->node); - kfree(root->name); - kfree(root); - return ERR_PTR(ret); - } - root->in_sysfs = 1; - return root; -#endif - static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index fba1348cb2a0..b457f195636e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2522,126 +2522,6 @@ out: return ret; } -#if 0 -int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct extent_buffer *buf, u32 nr_extents) -{ - struct btrfs_key key; - struct btrfs_file_extent_item *fi; - u64 root_gen; - u32 nritems; - int i; - int level; - int ret = 0; - int shared = 0; - - if (!root->ref_cows) - return 0; - - if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { - shared = 0; - root_gen = root->root_key.offset; - } else { - shared = 1; - root_gen = trans->transid - 1; - } - - level = btrfs_header_level(buf); - nritems = btrfs_header_nritems(buf); - - if (level == 0) { - struct btrfs_leaf_ref *ref; - struct btrfs_extent_info *info; - - ref = btrfs_alloc_leaf_ref(root, nr_extents); - if (!ref) { - ret = -ENOMEM; - goto out; - } - - ref->root_gen = root_gen; - ref->bytenr = buf->start; - ref->owner = btrfs_header_owner(buf); - ref->generation = btrfs_header_generation(buf); - ref->nritems = nr_extents; - info = ref->extents; - - for (i = 0; nr_extents > 0 && i < nritems; i++) { - u64 disk_bytenr; - btrfs_item_key_to_cpu(buf, &key, i); - if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(buf, i, - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(buf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); - if (disk_bytenr == 0) - continue; - - info->bytenr = disk_bytenr; - info->num_bytes = - btrfs_file_extent_disk_num_bytes(buf, fi); - info->objectid = key.objectid; - info->offset = key.offset; - info++; - } - - ret = btrfs_add_leaf_ref(root, ref, shared); - if (ret == -EEXIST && shared) { - struct btrfs_leaf_ref *old; - old = btrfs_lookup_leaf_ref(root, ref->bytenr); - BUG_ON(!old); - btrfs_remove_leaf_ref(root, old); - btrfs_free_leaf_ref(root, old); - ret = btrfs_add_leaf_ref(root, ref, shared); - } - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } -out: - return ret; -} - -/* when a block goes through cow, we update the reference counts of - * everything that block points to. The internal pointers of the block - * can be in just about any order, and it is likely to have clusters of - * things that are close together and clusters of things that are not. - * - * To help reduce the seeks that come with updating all of these reference - * counts, sort them by byte number before actual updates are done. - * - * struct refsort is used to match byte number to slot in the btree block. - * we sort based on the byte number and then use the slot to actually - * find the item. - * - * struct refsort is smaller than strcut btrfs_item and smaller than - * struct btrfs_key_ptr. Since we're currently limited to the page size - * for a btree block, there's no way for a kmalloc of refsorts for a - * single node to be bigger than a page. - */ -struct refsort { - u64 bytenr; - u32 slot; -}; - -/* - * for passing into sort() - */ -static int refsort_cmp(const void *a_void, const void *b_void) -{ - const struct refsort *a = a_void; - const struct refsort *b = b_void; - - if (a->bytenr < b->bytenr) - return -1; - if (a->bytenr > b->bytenr) - return 1; - return 0; -} -#endif - static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -3223,18 +3103,6 @@ commit_trans: goto again; } -#if 0 /* I hope we never need this code again, just in case */ - printk(KERN_ERR "no space left, need %llu, %llu bytes_used, " - "%llu bytes_reserved, " "%llu bytes_pinned, " - "%llu bytes_readonly, %llu may use %llu total\n", - (unsigned long long)bytes, - (unsigned long long)data_sinfo->bytes_used, - (unsigned long long)data_sinfo->bytes_reserved, - (unsigned long long)data_sinfo->bytes_pinned, - (unsigned long long)data_sinfo->bytes_readonly, - (unsigned long long)data_sinfo->bytes_may_use, - (unsigned long long)data_sinfo->total_bytes); -#endif return -ENOSPC; } data_sinfo->bytes_may_use += bytes; @@ -3867,23 +3735,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) u64 meta_used; u64 data_used; int csum_size = btrfs_super_csum_size(&fs_info->super_copy); -#if 0 - /* - * per tree used space accounting can be inaccuracy, so we - * can't rely on it. - */ - spin_lock(&fs_info->extent_root->accounting_lock); - num_bytes = btrfs_root_used(&fs_info->extent_root->root_item); - spin_unlock(&fs_info->extent_root->accounting_lock); - spin_lock(&fs_info->csum_root->accounting_lock); - num_bytes += btrfs_root_used(&fs_info->csum_root->root_item); - spin_unlock(&fs_info->csum_root->accounting_lock); - - spin_lock(&fs_info->tree_root->accounting_lock); - num_bytes += btrfs_root_used(&fs_info->tree_root->root_item); - spin_unlock(&fs_info->tree_root->accounting_lock); -#endif sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); spin_lock(&sinfo->lock); data_used = sinfo->bytes_used; @@ -3936,10 +3788,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) block_rsv->reserved = block_rsv->size; block_rsv->full = 1; } -#if 0 - printk(KERN_INFO"global block rsv size %llu reserved %llu\n", - block_rsv->size, block_rsv->reserved); -#endif + spin_unlock(&sinfo->lock); spin_unlock(&block_rsv->lock); } @@ -6596,1514 +6445,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, return ret; } -#if 0 -static unsigned long calc_ra(unsigned long start, unsigned long last, - unsigned long nr) -{ - return min(last, start + nr - 1); -} - -static noinline int relocate_inode_pages(struct inode *inode, u64 start, - u64 len) -{ - u64 page_start; - u64 page_end; - unsigned long first_index; - unsigned long last_index; - unsigned long i; - struct page *page; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct file_ra_state *ra; - struct btrfs_ordered_extent *ordered; - unsigned int total_read = 0; - unsigned int total_dirty = 0; - int ret = 0; - - ra = kzalloc(sizeof(*ra), GFP_NOFS); - if (!ra) - return -ENOMEM; - - mutex_lock(&inode->i_mutex); - first_index = start >> PAGE_CACHE_SHIFT; - last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; - - /* make sure the dirty trick played by the caller work */ - ret = invalidate_inode_pages2_range(inode->i_mapping, - first_index, last_index); - if (ret) - goto out_unlock; - - file_ra_state_init(ra, inode->i_mapping); - - for (i = first_index ; i <= last_index; i++) { - if (total_read % ra->ra_pages == 0) { - btrfs_force_ra(inode->i_mapping, ra, NULL, i, - calc_ra(i, last_index, ra->ra_pages)); - } - total_read++; -again: - if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode)) - BUG_ON(1); - page = grab_cache_page(inode->i_mapping, i); - if (!page) { - ret = -ENOMEM; - goto out_unlock; - } - if (!PageUptodate(page)) { - btrfs_readpage(NULL, page); - lock_page(page); - if (!PageUptodate(page)) { - unlock_page(page); - page_cache_release(page); - ret = -EIO; - goto out_unlock; - } - } - wait_on_page_writeback(page); - - page_start = (u64)page->index << PAGE_CACHE_SHIFT; - page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(io_tree, page_start, page_end, GFP_NOFS); - - ordered = btrfs_lookup_ordered_extent(inode, page_start); - if (ordered) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - goto again; - } - set_page_extent_mapped(page); - - if (i == first_index) - set_extent_bits(io_tree, page_start, page_end, - EXTENT_BOUNDARY, GFP_NOFS); - btrfs_set_extent_delalloc(inode, page_start, page_end); - - set_page_dirty(page); - total_dirty++; - - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - } - -out_unlock: - kfree(ra); - mutex_unlock(&inode->i_mutex); - balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty); - return ret; -} - -static noinline int relocate_data_extent(struct inode *reloc_inode, - struct btrfs_key *extent_key, - u64 offset) -{ - struct btrfs_root *root = BTRFS_I(reloc_inode)->root; - struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree; - struct extent_map *em; - u64 start = extent_key->objectid - offset; - u64 end = start + extent_key->offset - 1; - - em = alloc_extent_map(); - BUG_ON(!em); - - em->start = start; - em->len = extent_key->offset; - em->block_len = extent_key->offset; - em->block_start = extent_key->objectid; - em->bdev = root->fs_info->fs_devices->latest_bdev; - set_bit(EXTENT_FLAG_PINNED, &em->flags); - - /* setup extent map to cheat btrfs_readpage */ - lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); - while (1) { - int ret; - write_lock(&em_tree->lock); - ret = add_extent_mapping(em_tree, em); - write_unlock(&em_tree->lock); - if (ret != -EEXIST) { - free_extent_map(em); - break; - } - btrfs_drop_extent_cache(reloc_inode, start, end, 0); - } - unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); - - return relocate_inode_pages(reloc_inode, start, extent_key->offset); -} - -struct btrfs_ref_path { - u64 extent_start; - u64 nodes[BTRFS_MAX_LEVEL]; - u64 root_objectid; - u64 root_generation; - u64 owner_objectid; - u32 num_refs; - int lowest_level; - int current_level; - int shared_level; - - struct btrfs_key node_keys[BTRFS_MAX_LEVEL]; - u64 new_nodes[BTRFS_MAX_LEVEL]; -}; - -struct disk_extent { - u64 ram_bytes; - u64 disk_bytenr; - u64 disk_num_bytes; - u64 offset; - u64 num_bytes; - u8 compression; - u8 encryption; - u16 other_encoding; -}; - -static int is_cowonly_root(u64 root_objectid) -{ - if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || - root_objectid == BTRFS_EXTENT_TREE_OBJECTID || - root_objectid == BTRFS_CHUNK_TREE_OBJECTID || - root_objectid == BTRFS_DEV_TREE_OBJECTID || - root_objectid == BTRFS_TREE_LOG_OBJECTID || - root_objectid == BTRFS_CSUM_TREE_OBJECTID) - return 1; - return 0; -} - -static noinline int __next_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path, - int first_time) -{ - struct extent_buffer *leaf; - struct btrfs_path *path; - struct btrfs_extent_ref *ref; - struct btrfs_key key; - struct btrfs_key found_key; - u64 bytenr; - u32 nritems; - int level; - int ret = 1; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - if (first_time) { - ref_path->lowest_level = -1; - ref_path->current_level = -1; - ref_path->shared_level = -1; - goto walk_up; - } -walk_down: - level = ref_path->current_level - 1; - while (level >= -1) { - u64 parent; - if (level < ref_path->lowest_level) - break; - - if (level >= 0) - bytenr = ref_path->nodes[level]; - else - bytenr = ref_path->extent_start; - BUG_ON(bytenr == 0); - - parent = ref_path->nodes[level + 1]; - ref_path->nodes[level + 1] = 0; - ref_path->current_level = level; - BUG_ON(parent == 0); - - key.objectid = bytenr; - key.offset = parent + 1; - key.type = BTRFS_EXTENT_REF_KEY; - - ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); - if (ret < 0) - goto out; - BUG_ON(ret == 0); - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(extent_root, path); - if (ret < 0) - goto out; - if (ret > 0) - goto next; - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid == bytenr && - found_key.type == BTRFS_EXTENT_REF_KEY) { - if (level < ref_path->shared_level) - ref_path->shared_level = level; - goto found; - } -next: - level--; - btrfs_release_path(extent_root, path); - cond_resched(); - } - /* reached lowest level */ - ret = 1; - goto out; -walk_up: - level = ref_path->current_level; - while (level < BTRFS_MAX_LEVEL - 1) { - u64 ref_objectid; - - if (level >= 0) - bytenr = ref_path->nodes[level]; - else - bytenr = ref_path->extent_start; - - BUG_ON(bytenr == 0); - - key.objectid = bytenr; - key.offset = 0; - key.type = BTRFS_EXTENT_REF_KEY; - - ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); - if (ret < 0) - goto out; - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(extent_root, path); - if (ret < 0) - goto out; - if (ret > 0) { - /* the extent was freed by someone */ - if (ref_path->lowest_level == level) - goto out; - btrfs_release_path(extent_root, path); - goto walk_down; - } - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != bytenr || - found_key.type != BTRFS_EXTENT_REF_KEY) { - /* the extent was freed by someone */ - if (ref_path->lowest_level == level) { - ret = 1; - goto out; - } - btrfs_release_path(extent_root, path); - goto walk_down; - } -found: - ref = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_extent_ref); - ref_objectid = btrfs_ref_objectid(leaf, ref); - if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) { - if (first_time) { - level = (int)ref_objectid; - BUG_ON(level >= BTRFS_MAX_LEVEL); - ref_path->lowest_level = level; - ref_path->current_level = level; - ref_path->nodes[level] = bytenr; - } else { - WARN_ON(ref_objectid != level); - } - } else { - WARN_ON(level != -1); - } - first_time = 0; - - if (ref_path->lowest_level == level) { - ref_path->owner_objectid = ref_objectid; - ref_path->num_refs = btrfs_ref_num_refs(leaf, ref); - } - - /* - * the block is tree root or the block isn't in reference - * counted tree. - */ - if (found_key.objectid == found_key.offset || - is_cowonly_root(btrfs_ref_root(leaf, ref))) { - ref_path->root_objectid = btrfs_ref_root(leaf, ref); - ref_path->root_generation = - btrfs_ref_generation(leaf, ref); - if (level < 0) { - /* special reference from the tree log */ - ref_path->nodes[0] = found_key.offset; - ref_path->current_level = 0; - } - ret = 0; - goto out; - } - - level++; - BUG_ON(ref_path->nodes[level] != 0); - ref_path->nodes[level] = found_key.offset; - ref_path->current_level = level; - - /* - * the reference was created in the running transaction, - * no need to continue walking up. - */ - if (btrfs_ref_generation(leaf, ref) == trans->transid) { - ref_path->root_objectid = btrfs_ref_root(leaf, ref); - ref_path->root_generation = - btrfs_ref_generation(leaf, ref); - ret = 0; - goto out; - } - - btrfs_release_path(extent_root, path); - cond_resched(); - } - /* reached max tree level, but no tree root found. */ - BUG(); -out: - btrfs_free_path(path); - return ret; -} - -static int btrfs_first_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path, - u64 extent_start) -{ - memset(ref_path, 0, sizeof(*ref_path)); - ref_path->extent_start = extent_start; - - return __next_ref_path(trans, extent_root, ref_path, 1); -} - -static int btrfs_next_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path) -{ - return __next_ref_path(trans, extent_root, ref_path, 0); -} - -static noinline int get_new_locations(struct inode *reloc_inode, - struct btrfs_key *extent_key, - u64 offset, int no_fragment, - struct disk_extent **extents, - int *nr_extents) -{ - struct btrfs_root *root = BTRFS_I(reloc_inode)->root; - struct btrfs_path *path; - struct btrfs_file_extent_item *fi; - struct extent_buffer *leaf; - struct disk_extent *exts = *extents; - struct btrfs_key found_key; - u64 cur_pos; - u64 last_byte; - u32 nritems; - int nr = 0; - int max = *nr_extents; - int ret; - - WARN_ON(!no_fragment && *extents); - if (!exts) { - max = 1; - exts = kmalloc(sizeof(*exts) * max, GFP_NOFS); - if (!exts) - return -ENOMEM; - } - - path = btrfs_alloc_path(); - if (!path) { - if (exts != *extents) - kfree(exts); - return -ENOMEM; - } - - cur_pos = extent_key->objectid - offset; - last_byte = extent_key->objectid + extent_key->offset; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, - cur_pos, 0); - if (ret < 0) - goto out; - if (ret > 0) { - ret = -ENOENT; - goto out; - } - - while (1) { - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(root, path); - if (ret < 0) - goto out; - if (ret > 0) - break; - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.offset != cur_pos || - found_key.type != BTRFS_EXTENT_DATA_KEY || - found_key.objectid != reloc_inode->i_ino) - break; - - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) != - BTRFS_FILE_EXTENT_REG || - btrfs_file_extent_disk_bytenr(leaf, fi) == 0) - break; - - if (nr == max) { - struct disk_extent *old = exts; - max *= 2; - exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); - if (!exts) { - ret = -ENOMEM; - goto out; - } - memcpy(exts, old, sizeof(*exts) * nr); - if (old != *extents) - kfree(old); - } - - exts[nr].disk_bytenr = - btrfs_file_extent_disk_bytenr(leaf, fi); - exts[nr].disk_num_bytes = - btrfs_file_extent_disk_num_bytes(leaf, fi); - exts[nr].offset = btrfs_file_extent_offset(leaf, fi); - exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); - exts[nr].compression = btrfs_file_extent_compression(leaf, fi); - exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); - exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, - fi); - BUG_ON(exts[nr].offset > 0); - BUG_ON(exts[nr].compression || exts[nr].encryption); - BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); - - cur_pos += exts[nr].num_bytes; - nr++; - - if (cur_pos + offset >= last_byte) - break; - - if (no_fragment) { - ret = 1; - goto out; - } - path->slots[0]++; - } - - BUG_ON(cur_pos + offset > last_byte); - if (cur_pos + offset < last_byte) { - ret = -ENOENT; - goto out; - } - ret = 0; -out: - btrfs_free_path(path); - if (ret) { - if (exts != *extents) - kfree(exts); - } else { - *extents = exts; - *nr_extents = nr; - } - return ret; -} - -static noinline int replace_one_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *extent_key, - struct btrfs_key *leaf_key, - struct btrfs_ref_path *ref_path, - struct disk_extent *new_extents, - int nr_extents) -{ - struct extent_buffer *leaf; - struct btrfs_file_extent_item *fi; - struct inode *inode = NULL; - struct btrfs_key key; - u64 lock_start = 0; - u64 lock_end = 0; - u64 num_bytes; - u64 ext_offset; - u64 search_end = (u64)-1; - u32 nritems; - int nr_scaned = 0; - int extent_locked = 0; - int extent_type; - int ret; - - memcpy(&key, leaf_key, sizeof(key)); - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { - if (key.objectid < ref_path->owner_objectid || - (key.objectid == ref_path->owner_objectid && - key.type < BTRFS_EXTENT_DATA_KEY)) { - key.objectid = ref_path->owner_objectid; - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = 0; - } - } - - while (1) { - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); - if (ret < 0) - goto out; - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); -next: - if (extent_locked && ret > 0) { - /* - * the file extent item was modified by someone - * before the extent got locked. - */ - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - extent_locked = 0; - } - - if (path->slots[0] >= nritems) { - if (++nr_scaned > 2) - break; - - BUG_ON(extent_locked); - ret = btrfs_next_leaf(root, path); - if (ret < 0) - goto out; - if (ret > 0) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - } - - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { - if ((key.objectid > ref_path->owner_objectid) || - (key.objectid == ref_path->owner_objectid && - key.type > BTRFS_EXTENT_DATA_KEY) || - key.offset >= search_end) - break; - } - - if (inode && key.objectid != inode->i_ino) { - BUG_ON(extent_locked); - btrfs_release_path(root, path); - mutex_unlock(&inode->i_mutex); - iput(inode); - inode = NULL; - continue; - } - - if (key.type != BTRFS_EXTENT_DATA_KEY) { - path->slots[0]++; - ret = 1; - goto next; - } - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - extent_type = btrfs_file_extent_type(leaf, fi); - if ((extent_type != BTRFS_FILE_EXTENT_REG && - extent_type != BTRFS_FILE_EXTENT_PREALLOC) || - (btrfs_file_extent_disk_bytenr(leaf, fi) != - extent_key->objectid)) { - path->slots[0]++; - ret = 1; - goto next; - } - - num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - ext_offset = btrfs_file_extent_offset(leaf, fi); - - if (search_end == (u64)-1) { - search_end = key.offset - ext_offset + - btrfs_file_extent_ram_bytes(leaf, fi); - } - - if (!extent_locked) { - lock_start = key.offset; - lock_end = lock_start + num_bytes - 1; - } else { - if (lock_start > key.offset || - lock_end + 1 < key.offset + num_bytes) { - unlock_extent(&BTRFS_I(inode)->io_tree, - lock_start, lock_end, GFP_NOFS); - extent_locked = 0; - } - } - - if (!inode) { - btrfs_release_path(root, path); - - inode = btrfs_iget_locked(root->fs_info->sb, - key.objectid, root); - if (inode->i_state & I_NEW) { - BTRFS_I(inode)->root = root; - BTRFS_I(inode)->location.objectid = - key.objectid; - BTRFS_I(inode)->location.type = - BTRFS_INODE_ITEM_KEY; - BTRFS_I(inode)->location.offset = 0; - btrfs_read_locked_inode(inode); - unlock_new_inode(inode); - } - /* - * some code call btrfs_commit_transaction while - * holding the i_mutex, so we can't use mutex_lock - * here. - */ - if (is_bad_inode(inode) || - !mutex_trylock(&inode->i_mutex)) { - iput(inode); - inode = NULL; - key.offset = (u64)-1; - goto skip; - } - } - - if (!extent_locked) { - struct btrfs_ordered_extent *ordered; - - btrfs_release_path(root, path); - - lock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - ordered = btrfs_lookup_first_ordered_extent(inode, - lock_end); - if (ordered && - ordered->file_offset <= lock_end && - ordered->file_offset + ordered->len > lock_start) { - unlock_extent(&BTRFS_I(inode)->io_tree, - lock_start, lock_end, GFP_NOFS); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - key.offset += num_bytes; - goto skip; - } - if (ordered) - btrfs_put_ordered_extent(ordered); - - extent_locked = 1; - continue; - } - - if (nr_extents == 1) { - /* update extent pointer in place */ - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extents[0].disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extents[0].disk_num_bytes); - btrfs_mark_buffer_dirty(leaf); - - btrfs_drop_extent_cache(inode, key.offset, - key.offset + num_bytes - 1, 0); - - ret = btrfs_inc_extent_ref(trans, root, - new_extents[0].disk_bytenr, - new_extents[0].disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, - key.objectid); - BUG_ON(ret); - - ret = btrfs_free_extent(trans, root, - extent_key->objectid, - extent_key->offset, - leaf->start, - btrfs_header_owner(leaf), - btrfs_header_generation(leaf), - key.objectid, 0); - BUG_ON(ret); - - btrfs_release_path(root, path); - key.offset += num_bytes; - } else { - BUG_ON(1); -#if 0 - u64 alloc_hint; - u64 extent_len; - int i; - /* - * drop old extent pointer at first, then insert the - * new pointers one bye one - */ - btrfs_release_path(root, path); - ret = btrfs_drop_extents(trans, root, inode, key.offset, - key.offset + num_bytes, - key.offset, &alloc_hint); - BUG_ON(ret); - - for (i = 0; i < nr_extents; i++) { - if (ext_offset >= new_extents[i].num_bytes) { - ext_offset -= new_extents[i].num_bytes; - continue; - } - extent_len = min(new_extents[i].num_bytes - - ext_offset, num_bytes); - - ret = btrfs_insert_empty_item(trans, root, - path, &key, - sizeof(*fi)); - BUG_ON(ret); - - leaf = path->nodes[0]; - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_generation(leaf, fi, - trans->transid); - btrfs_set_file_extent_type(leaf, fi, - BTRFS_FILE_EXTENT_REG); - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extents[i].disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extents[i].disk_num_bytes); - btrfs_set_file_extent_ram_bytes(leaf, fi, - new_extents[i].ram_bytes); - - btrfs_set_file_extent_compression(leaf, fi, - new_extents[i].compression); - btrfs_set_file_extent_encryption(leaf, fi, - new_extents[i].encryption); - btrfs_set_file_extent_other_encoding(leaf, fi, - new_extents[i].other_encoding); - - btrfs_set_file_extent_num_bytes(leaf, fi, - extent_len); - ext_offset += new_extents[i].offset; - btrfs_set_file_extent_offset(leaf, fi, - ext_offset); - btrfs_mark_buffer_dirty(leaf); - - btrfs_drop_extent_cache(inode, key.offset, - key.offset + extent_len - 1, 0); - - ret = btrfs_inc_extent_ref(trans, root, - new_extents[i].disk_bytenr, - new_extents[i].disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, key.objectid); - BUG_ON(ret); - btrfs_release_path(root, path); - - inode_add_bytes(inode, extent_len); - - ext_offset = 0; - num_bytes -= extent_len; - key.offset += extent_len; - - if (num_bytes == 0) - break; - } - BUG_ON(i >= nr_extents); -#endif - } - - if (extent_locked) { - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - extent_locked = 0; - } -skip: - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS && - key.offset >= search_end) - break; - - cond_resched(); - } - ret = 0; -out: - btrfs_release_path(root, path); - if (inode) { - mutex_unlock(&inode->i_mutex); - if (extent_locked) { - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - } - iput(inode); - } - return ret; -} - -int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *buf, u64 orig_start) -{ - int level; - int ret; - - BUG_ON(btrfs_header_generation(buf) != trans->transid); - BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); - - level = btrfs_header_level(buf); - if (level == 0) { - struct btrfs_leaf_ref *ref; - struct btrfs_leaf_ref *orig_ref; - - orig_ref = btrfs_lookup_leaf_ref(root, orig_start); - if (!orig_ref) - return -ENOENT; - - ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems); - if (!ref) { - btrfs_free_leaf_ref(root, orig_ref); - return -ENOMEM; - } - - ref->nritems = orig_ref->nritems; - memcpy(ref->extents, orig_ref->extents, - sizeof(ref->extents[0]) * ref->nritems); - - btrfs_free_leaf_ref(root, orig_ref); - - ref->root_gen = trans->transid; - ref->bytenr = buf->start; - ref->owner = btrfs_header_owner(buf); - ref->generation = btrfs_header_generation(buf); - - ret = btrfs_add_leaf_ref(root, ref, 0); - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } - return 0; -} - -static noinline int invalidate_extent_cache(struct btrfs_root *root, - struct extent_buffer *leaf, - struct btrfs_block_group_cache *group, - struct btrfs_root *target_root) -{ - struct btrfs_key key; - struct inode *inode = NULL; - struct btrfs_file_extent_item *fi; - struct extent_state *cached_state = NULL; - u64 num_bytes; - u64 skip_objectid = 0; - u32 nritems; - u32 i; - - nritems = btrfs_header_nritems(leaf); - for (i = 0; i < nritems; i++) { - btrfs_item_key_to_cpu(leaf, &key, i); - if (key.objectid == skip_objectid || - key.type != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) - continue; - if (!inode || inode->i_ino != key.objectid) { - iput(inode); - inode = btrfs_ilookup(target_root->fs_info->sb, - key.objectid, target_root, 1); - } - if (!inode) { - skip_objectid = key.objectid; - continue; - } - num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - - lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, - key.offset + num_bytes - 1, 0, &cached_state, - GFP_NOFS); - btrfs_drop_extent_cache(inode, key.offset, - key.offset + num_bytes - 1, 1); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, - key.offset + num_bytes - 1, &cached_state, - GFP_NOFS); - cond_resched(); - } - iput(inode); - return 0; -} - -static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *leaf, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode) -{ - struct btrfs_key key; - struct btrfs_key extent_key; - struct btrfs_file_extent_item *fi; - struct btrfs_leaf_ref *ref; - struct disk_extent *new_extent; - u64 bytenr; - u64 num_bytes; - u32 nritems; - u32 i; - int ext_index; - int nr_extent; - int ret; - - new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); - if (!new_extent) - return -ENOMEM; - - ref = btrfs_lookup_leaf_ref(root, leaf->start); - BUG_ON(!ref); - - ext_index = -1; - nritems = btrfs_header_nritems(leaf); - for (i = 0; i < nritems; i++) { - btrfs_item_key_to_cpu(leaf, &key, i); - if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); - num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); - if (bytenr == 0) - continue; - - ext_index++; - if (bytenr >= group->key.objectid + group->key.offset || - bytenr + num_bytes <= group->key.objectid) - continue; - - extent_key.objectid = bytenr; - extent_key.offset = num_bytes; - extent_key.type = BTRFS_EXTENT_ITEM_KEY; - nr_extent = 1; - ret = get_new_locations(reloc_inode, &extent_key, - group->key.objectid, 1, - &new_extent, &nr_extent); - if (ret > 0) - continue; - BUG_ON(ret < 0); - - BUG_ON(ref->extents[ext_index].bytenr != bytenr); - BUG_ON(ref->extents[ext_index].num_bytes != num_bytes); - ref->extents[ext_index].bytenr = new_extent->disk_bytenr; - ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; - - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extent->disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extent->disk_num_bytes); - btrfs_mark_buffer_dirty(leaf); - - ret = btrfs_inc_extent_ref(trans, root, - new_extent->disk_bytenr, - new_extent->disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, key.objectid); - BUG_ON(ret); - - ret = btrfs_free_extent(trans, root, - bytenr, num_bytes, leaf->start, - btrfs_header_owner(leaf), - btrfs_header_generation(leaf), - key.objectid, 0); - BUG_ON(ret); - cond_resched(); - } - kfree(new_extent); - BUG_ON(ext_index + 1 != ref->nritems); - btrfs_free_leaf_ref(root, ref); - return 0; -} - -int btrfs_free_reloc_root(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - int ret; - - if (root->reloc_root) { - reloc_root = root->reloc_root; - root->reloc_root = NULL; - list_add(&reloc_root->dead_list, - &root->fs_info->dead_reloc_roots); - - btrfs_set_root_bytenr(&reloc_root->root_item, - reloc_root->node->start); - btrfs_set_root_level(&root->root_item, - btrfs_header_level(reloc_root->node)); - memset(&reloc_root->root_item.drop_progress, 0, - sizeof(struct btrfs_disk_key)); - reloc_root->root_item.drop_level = 0; - - ret = btrfs_update_root(trans, root->fs_info->tree_root, - &reloc_root->root_key, - &reloc_root->root_item); - BUG_ON(ret); - } - return 0; -} - -int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *reloc_root; - struct btrfs_root *prev_root = NULL; - struct list_head dead_roots; - int ret; - unsigned long nr; - - INIT_LIST_HEAD(&dead_roots); - list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots); - - while (!list_empty(&dead_roots)) { - reloc_root = list_entry(dead_roots.prev, - struct btrfs_root, dead_list); - list_del_init(&reloc_root->dead_list); - - BUG_ON(reloc_root->commit_root != NULL); - while (1) { - trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - - mutex_lock(&root->fs_info->drop_mutex); - ret = btrfs_drop_snapshot(trans, reloc_root); - if (ret != -EAGAIN) - break; - mutex_unlock(&root->fs_info->drop_mutex); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, root); - BUG_ON(ret); - btrfs_btree_balance_dirty(root, nr); - } - - free_extent_buffer(reloc_root->node); - - ret = btrfs_del_root(trans, root->fs_info->tree_root, - &reloc_root->root_key); - BUG_ON(ret); - mutex_unlock(&root->fs_info->drop_mutex); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, root); - BUG_ON(ret); - btrfs_btree_balance_dirty(root, nr); - - kfree(prev_root); - prev_root = reloc_root; - } - if (prev_root) { - btrfs_remove_leaf_refs(prev_root, (u64)-1, 0); - kfree(prev_root); - } - return 0; -} - -int btrfs_add_dead_reloc_root(struct btrfs_root *root) -{ - list_add(&root->dead_list, &root->fs_info->dead_reloc_roots); - return 0; -} - -int btrfs_cleanup_reloc_trees(struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - struct btrfs_trans_handle *trans; - struct btrfs_key location; - int found; - int ret; - - mutex_lock(&root->fs_info->tree_reloc_mutex); - ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL); - BUG_ON(ret); - found = !list_empty(&root->fs_info->dead_reloc_roots); - mutex_unlock(&root->fs_info->tree_reloc_mutex); - - if (found) { - trans = btrfs_start_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - ret = btrfs_commit_transaction(trans, root); - BUG_ON(ret); - } - - location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID; - location.offset = (u64)-1; - location.type = BTRFS_ROOT_ITEM_KEY; - - reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); - BUG_ON(!reloc_root); - ret = btrfs_orphan_cleanup(reloc_root); - BUG_ON(ret); - return 0; -} - -static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - struct extent_buffer *eb; - struct btrfs_root_item *root_item; - struct btrfs_key root_key; - int ret; - - BUG_ON(!root->ref_cows); - if (root->reloc_root) - return 0; - - root_item = kmalloc(sizeof(*root_item), GFP_NOFS); - if (!root_item) - return -ENOMEM; - - ret = btrfs_copy_root(trans, root, root->commit_root, - &eb, BTRFS_TREE_RELOC_OBJECTID); - BUG_ON(ret); - - root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; - root_key.offset = root->root_key.objectid; - root_key.type = BTRFS_ROOT_ITEM_KEY; - - memcpy(root_item, &root->root_item, sizeof(root_item)); - btrfs_set_root_refs(root_item, 0); - btrfs_set_root_bytenr(root_item, eb->start); - btrfs_set_root_level(root_item, btrfs_header_level(eb)); - btrfs_set_root_generation(root_item, trans->transid); - - btrfs_tree_unlock(eb); - free_extent_buffer(eb); - - ret = btrfs_insert_root(trans, root->fs_info->tree_root, - &root_key, root_item); - BUG_ON(ret); - kfree(root_item); - - reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, - &root_key); - BUG_ON(IS_ERR(reloc_root)); - reloc_root->last_trans = trans->transid; - reloc_root->commit_root = NULL; - reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; - - root->reloc_root = reloc_root; - return 0; -} - -/* - * Core function of space balance. - * - * The idea is using reloc trees to relocate tree blocks in reference - * counted roots. There is one reloc tree for each subvol, and all - * reloc trees share same root key objectid. Reloc trees are snapshots - * of the latest committed roots of subvols (root->commit_root). - * - * To relocate a tree block referenced by a subvol, there are two steps. - * COW the block through subvol's reloc tree, then update block pointer - * in the subvol to point to the new block. Since all reloc trees share - * same root key objectid, doing special handing for tree blocks owned - * by them is easy. Once a tree block has been COWed in one reloc tree, - * we can use the resulting new block directly when the same block is - * required to COW again through other reloc trees. By this way, relocated - * tree blocks are shared between reloc trees, so they are also shared - * between subvols. - */ -static noinline int relocate_one_path(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *first_key, - struct btrfs_ref_path *ref_path, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode) -{ - struct btrfs_root *reloc_root; - struct extent_buffer *eb = NULL; - struct btrfs_key *keys; - u64 *nodes; - int level; - int shared_level; - int lowest_level = 0; - int ret; - - if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) - lowest_level = ref_path->owner_objectid; - - if (!root->ref_cows) { - path->lowest_level = lowest_level; - ret = btrfs_search_slot(trans, root, first_key, path, 0, 1); - BUG_ON(ret < 0); - path->lowest_level = 0; - btrfs_release_path(root, path); - return 0; - } - - mutex_lock(&root->fs_info->tree_reloc_mutex); - ret = init_reloc_tree(trans, root); - BUG_ON(ret); - reloc_root = root->reloc_root; - - shared_level = ref_path->shared_level; - ref_path->shared_level = BTRFS_MAX_LEVEL - 1; - - keys = ref_path->node_keys; - nodes = ref_path->new_nodes; - memset(&keys[shared_level + 1], 0, - sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1)); - memset(&nodes[shared_level + 1], 0, - sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1)); - - if (nodes[lowest_level] == 0) { - path->lowest_level = lowest_level; - ret = btrfs_search_slot(trans, reloc_root, first_key, path, - 0, 1); - BUG_ON(ret); - for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) { - eb = path->nodes[level]; - if (!eb || eb == reloc_root->node) - break; - nodes[level] = eb->start; - if (level == 0) - btrfs_item_key_to_cpu(eb, &keys[level], 0); - else - btrfs_node_key_to_cpu(eb, &keys[level], 0); - } - if (nodes[0] && - ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - eb = path->nodes[0]; - ret = replace_extents_in_leaf(trans, reloc_root, eb, - group, reloc_inode); - BUG_ON(ret); - } - btrfs_release_path(reloc_root, path); - } else { - ret = btrfs_merge_path(trans, reloc_root, keys, nodes, - lowest_level); - BUG_ON(ret); - } - - /* - * replace tree blocks in the fs tree with tree blocks in - * the reloc tree. - */ - ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level); - BUG_ON(ret < 0); - - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - ret = btrfs_search_slot(trans, reloc_root, first_key, path, - 0, 0); - BUG_ON(ret); - extent_buffer_get(path->nodes[0]); - eb = path->nodes[0]; - btrfs_release_path(reloc_root, path); - ret = invalidate_extent_cache(reloc_root, eb, group, root); - BUG_ON(ret); - free_extent_buffer(eb); - } - - mutex_unlock(&root->fs_info->tree_reloc_mutex); - path->lowest_level = 0; - return 0; -} - -static noinline int relocate_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *first_key, - struct btrfs_ref_path *ref_path) -{ - int ret; - - ret = relocate_one_path(trans, root, path, first_key, - ref_path, NULL, NULL); - BUG_ON(ret); - - return 0; -} - -static noinline int del_extent_zero(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_path *path, - struct btrfs_key *extent_key) -{ - int ret; - - ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1); - if (ret) - goto out; - ret = btrfs_del_item(trans, extent_root, path); -out: - btrfs_release_path(extent_root, path); - return ret; -} - -static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info, - struct btrfs_ref_path *ref_path) -{ - struct btrfs_key root_key; - - root_key.objectid = ref_path->root_objectid; - root_key.type = BTRFS_ROOT_ITEM_KEY; - if (is_cowonly_root(ref_path->root_objectid)) - root_key.offset = 0; - else - root_key.offset = (u64)-1; - - return btrfs_read_fs_root_no_name(fs_info, &root_key); -} - -static noinline int relocate_one_extent(struct btrfs_root *extent_root, - struct btrfs_path *path, - struct btrfs_key *extent_key, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode, int pass) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *found_root; - struct btrfs_ref_path *ref_path = NULL; - struct disk_extent *new_extents = NULL; - int nr_extents = 0; - int loops; - int ret; - int level; - struct btrfs_key first_key; - u64 prev_block = 0; - - - trans = btrfs_start_transaction(extent_root, 1); - BUG_ON(IS_ERR(trans)); - - if (extent_key->objectid == 0) { - ret = del_extent_zero(trans, extent_root, path, extent_key); - goto out; - } - - ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); - if (!ref_path) { - ret = -ENOMEM; - goto out; - } - - for (loops = 0; ; loops++) { - if (loops == 0) { - ret = btrfs_first_ref_path(trans, extent_root, ref_path, - extent_key->objectid); - } else { - ret = btrfs_next_ref_path(trans, extent_root, ref_path); - } - if (ret < 0) - goto out; - if (ret > 0) - break; - - if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID || - ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID) - continue; - - found_root = read_ref_root(extent_root->fs_info, ref_path); - BUG_ON(!found_root); - /* - * for reference counted tree, only process reference paths - * rooted at the latest committed root. - */ - if (found_root->ref_cows && - ref_path->root_generation != found_root->root_key.offset) - continue; - - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - if (pass == 0) { - /* - * copy data extents to new locations - */ - u64 group_start = group->key.objectid; - ret = relocate_data_extent(reloc_inode, - extent_key, - group_start); - if (ret < 0) - goto out; - break; - } - level = 0; - } else { - level = ref_path->owner_objectid; - } - - if (prev_block != ref_path->nodes[level]) { - struct extent_buffer *eb; - u64 block_start = ref_path->nodes[level]; - u64 block_size = btrfs_level_size(found_root, level); - - eb = read_tree_block(found_root, block_start, - block_size, 0); - if (!eb) { - ret = -EIO; - goto out; - } - btrfs_tree_lock(eb); - BUG_ON(level != btrfs_header_level(eb)); - - if (level == 0) - btrfs_item_key_to_cpu(eb, &first_key, 0); - else - btrfs_node_key_to_cpu(eb, &first_key, 0); - - btrfs_tree_unlock(eb); - free_extent_buffer(eb); - prev_block = block_start; - } - - mutex_lock(&extent_root->fs_info->trans_mutex); - btrfs_record_root_in_trans(found_root); - mutex_unlock(&extent_root->fs_info->trans_mutex); - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - /* - * try to update data extent references while - * keeping metadata shared between snapshots. - */ - if (pass == 1) { - ret = relocate_one_path(trans, found_root, - path, &first_key, ref_path, - group, reloc_inode); - if (ret < 0) - goto out; - continue; - } - /* - * use fallback method to process the remaining - * references. - */ - if (!new_extents) { - u64 group_start = group->key.objectid; - new_extents = kmalloc(sizeof(*new_extents), - GFP_NOFS); - if (!new_extents) { - ret = -ENOMEM; - goto out; - } - nr_extents = 1; - ret = get_new_locations(reloc_inode, - extent_key, - group_start, 1, - &new_extents, - &nr_extents); - if (ret) - goto out; - } - ret = replace_one_extent(trans, found_root, - path, extent_key, - &first_key, ref_path, - new_extents, nr_extents); - } else { - ret = relocate_tree_block(trans, found_root, path, - &first_key, ref_path); - } - if (ret < 0) - goto out; - } - ret = 0; -out: - btrfs_end_transaction(trans, extent_root); - kfree(new_extents); - kfree(ref_path); - return ret; -} -#endif - static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) { u64 num_devices; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 57122a5e8473..5ff52b644a60 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3093,178 +3093,6 @@ out: return err; } -#if 0 -/* - * when truncating bytes in a file, it is possible to avoid reading - * the leaves that contain only checksum items. This can be the - * majority of the IO required to delete a large file, but it must - * be done carefully. - * - * The keys in the level just above the leaves are checked to make sure - * the lowest key in a given leaf is a csum key, and starts at an offset - * after the new size. - * - * Then the key for the next leaf is checked to make sure it also has - * a checksum item for the same file. If it does, we know our target leaf - * contains only checksum items, and it can be safely freed without reading - * it. - * - * This is just an optimization targeted at large files. It may do - * nothing. It will return 0 unless things went badly. - */ -static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct inode *inode, u64 new_size) -{ - struct btrfs_key key; - int ret; - int nritems; - struct btrfs_key found_key; - struct btrfs_key other_key; - struct btrfs_leaf_ref *ref; - u64 leaf_gen; - u64 leaf_start; - - path->lowest_level = 1; - key.objectid = inode->i_ino; - key.type = BTRFS_CSUM_ITEM_KEY; - key.offset = new_size; -again: - ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) - goto out; - - if (path->nodes[1] == NULL) { - ret = 0; - goto out; - } - ret = 0; - btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]); - nritems = btrfs_header_nritems(path->nodes[1]); - - if (!nritems) - goto out; - - if (path->slots[1] >= nritems) - goto next_node; - - /* did we find a key greater than anything we want to delete? */ - if (found_key.objectid > inode->i_ino || - (found_key.objectid == inode->i_ino && found_key.type > key.type)) - goto out; - - /* we check the next key in the node to make sure the leave contains - * only checksum items. This comparison doesn't work if our - * leaf is the last one in the node - */ - if (path->slots[1] + 1 >= nritems) { -next_node: - /* search forward from the last key in the node, this - * will bring us into the next node in the tree - */ - btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1); - - /* unlikely, but we inc below, so check to be safe */ - if (found_key.offset == (u64)-1) - goto out; - - /* search_forward needs a path with locks held, do the - * search again for the original key. It is possible - * this will race with a balance and return a path that - * we could modify, but this drop is just an optimization - * and is allowed to miss some leaves. - */ - btrfs_release_path(root, path); - found_key.offset++; - - /* setup a max key for search_forward */ - other_key.offset = (u64)-1; - other_key.type = key.type; - other_key.objectid = key.objectid; - - path->keep_locks = 1; - ret = btrfs_search_forward(root, &found_key, &other_key, - path, 0, 0); - path->keep_locks = 0; - if (ret || found_key.objectid != key.objectid || - found_key.type != key.type) { - ret = 0; - goto out; - } - - key.offset = found_key.offset; - btrfs_release_path(root, path); - cond_resched(); - goto again; - } - - /* we know there's one more slot after us in the tree, - * read that key so we can verify it is also a checksum item - */ - btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1); - - if (found_key.objectid < inode->i_ino) - goto next_key; - - if (found_key.type != key.type || found_key.offset < new_size) - goto next_key; - - /* - * if the key for the next leaf isn't a csum key from this objectid, - * we can't be sure there aren't good items inside this leaf. - * Bail out - */ - if (other_key.objectid != inode->i_ino || other_key.type != key.type) - goto out; - - leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]); - leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]); - /* - * it is safe to delete this leaf, it contains only - * csum items from this inode at an offset >= new_size - */ - ret = btrfs_del_leaf(trans, root, path, leaf_start); - BUG_ON(ret); - - if (root->ref_cows && leaf_gen < trans->transid) { - ref = btrfs_alloc_leaf_ref(root, 0); - if (ref) { - ref->root_gen = root->root_key.offset; - ref->bytenr = leaf_start; - ref->owner = 0; - ref->generation = leaf_gen; - ref->nritems = 0; - - btrfs_sort_leaf_ref(ref); - - ret = btrfs_add_leaf_ref(root, ref, 0); - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } else { - WARN_ON(1); - } - } -next_key: - btrfs_release_path(root, path); - - if (other_key.objectid == inode->i_ino && - other_key.type == key.type && other_key.offset > key.offset) { - key.offset = other_key.offset; - cond_resched(); - goto again; - } - ret = 0; -out: - /* fixup any changes we've made to the path */ - path->lowest_level = 0; - path->keep_locks = 0; - btrfs_release_path(root, path); - return ret; -} - -#endif - /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 955f76eb0fa8..211aceeb9ea0 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -346,49 +346,6 @@ out_unlock: return ret; } -#if 0 -/* - * rate limit against the drop_snapshot code. This helps to slow down new - * operations if the drop_snapshot code isn't able to keep up. - */ -static void throttle_on_drops(struct btrfs_root *root) -{ - struct btrfs_fs_info *info = root->fs_info; - int harder_count = 0; - -harder: - if (atomic_read(&info->throttles)) { - DEFINE_WAIT(wait); - int thr; - thr = atomic_read(&info->throttle_gen); - - do { - prepare_to_wait(&info->transaction_throttle, - &wait, TASK_UNINTERRUPTIBLE); - if (!atomic_read(&info->throttles)) { - finish_wait(&info->transaction_throttle, &wait); - break; - } - schedule(); - finish_wait(&info->transaction_throttle, &wait); - } while (thr == atomic_read(&info->throttle_gen)); - harder_count++; - - if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 && - harder_count < 2) - goto harder; - - if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 && - harder_count < 10) - goto harder; - - if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 && - harder_count < 20) - goto harder; - } -} -#endif - void btrfs_throttle(struct btrfs_root *root) { mutex_lock(&root->fs_info->trans_mutex); @@ -808,97 +765,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) return ret; } -#if 0 -/* - * when dropping snapshots, we generate a ton of delayed refs, and it makes - * sense not to join the transaction while it is trying to flush the current - * queue of delayed refs out. - * - * This is used by the drop snapshot code only - */ -static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info) -{ - DEFINE_WAIT(wait); - - mutex_lock(&info->trans_mutex); - while (info->running_transaction && - info->running_transaction->delayed_refs.flushing) { - prepare_to_wait(&info->transaction_wait, &wait, - TASK_UNINTERRUPTIBLE); - mutex_unlock(&info->trans_mutex); - - schedule(); - - mutex_lock(&info->trans_mutex); - finish_wait(&info->transaction_wait, &wait); - } - mutex_unlock(&info->trans_mutex); - return 0; -} - -/* - * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on - * all of them - */ -int btrfs_drop_dead_root(struct btrfs_root *root) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *tree_root = root->fs_info->tree_root; - unsigned long nr; - int ret; - - while (1) { - /* - * we don't want to jump in and create a bunch of - * delayed refs if the transaction is starting to close - */ - wait_transaction_pre_flush(tree_root->fs_info); - trans = btrfs_start_transaction(tree_root, 1); - - /* - * we've joined a transaction, make sure it isn't - * closing right now - */ - if (trans->transaction->delayed_refs.flushing) { - btrfs_end_transaction(trans, tree_root); - continue; - } - - ret = btrfs_drop_snapshot(trans, root); - if (ret != -EAGAIN) - break; - - ret = btrfs_update_root(trans, tree_root, - &root->root_key, - &root->root_item); - if (ret) - break; - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, tree_root); - BUG_ON(ret); - - btrfs_btree_balance_dirty(tree_root, nr); - cond_resched(); - } - BUG_ON(ret); - - ret = btrfs_del_root(trans, tree_root, &root->root_key); - BUG_ON(ret); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, tree_root); - BUG_ON(ret); - - free_extent_buffer(root->node); - free_extent_buffer(root->commit_root); - kfree(root); - - btrfs_btree_balance_dirty(tree_root, nr); - return ret; -} -#endif - /* * new snapshots need to be created at a very specific time in the * transaction commit. This does the actual creation -- cgit v1.2.2 From 70f23fd66bc821a0e99647f70a809e277cc93c4c Mon Sep 17 00:00:00 2001 From: "Justin P. Mattock" Date: Tue, 10 May 2011 10:16:21 +0200 Subject: treewide: fix a few typos in comments - kenrel -> kernel - whetehr -> whether - ttt -> tt - sss -> ss Signed-off-by: Justin P. Mattock Signed-off-by: Jiri Kosina --- fs/btrfs/relocation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 199a80134312..f340f7c99d09 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -709,7 +709,7 @@ again: WARN_ON(cur->checked); if (!list_empty(&cur->upper)) { /* - * the backref was added previously when processsing + * the backref was added previously when processing * backref of type BTRFS_TREE_BLOCK_REF_KEY */ BUG_ON(!list_is_singular(&cur->upper)); -- cgit v1.2.2 From a2de733c78fa7af51ba9670482fa7d392aa67c57 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 8 Mar 2011 14:14:00 +0100 Subject: btrfs: scrub This adds an initial implementation for scrub. It works quite straightforward. The usermode issues an ioctl for each device in the fs. For each device, it enumerates the allocated device chunks. For each chunk, the contained extents are enumerated and the data checksums fetched. The extents are read sequentially and the checksums verified. If an error occurs (checksum or EIO), a good copy is searched for. If one is found, the bad copy will be rewritten. All enumerations happen from the commit roots. During a transaction commit, the scrubs get paused and afterwards continue from the new roots. This commit is based on the series originally posted to linux-btrfs with some improvements that resulted from comments from David Sterba, Ilya Dryomov and Jan Schmidt. Signed-off-by: Arne Jansen --- fs/btrfs/Makefile | 2 +- fs/btrfs/ctree.h | 37 +- fs/btrfs/disk-io.c | 12 + fs/btrfs/file-item.c | 8 +- fs/btrfs/inode.c | 2 +- fs/btrfs/ioctl.h | 37 ++ fs/btrfs/relocation.c | 2 +- fs/btrfs/scrub.c | 1492 ++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/transaction.c | 3 + fs/btrfs/tree-log.c | 6 +- fs/btrfs/volumes.c | 4 +- fs/btrfs/volumes.h | 6 + 12 files changed, 1600 insertions(+), 11 deletions(-) create mode 100644 fs/btrfs/scrub.c (limited to 'fs/btrfs') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 31610ea73aec..8fda3133c1b8 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ - compression.o delayed-ref.o relocation.o + compression.o delayed-ref.o relocation.o scrub.o diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2e61fe1b6b8c..31141ba6072d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ #include "extent_io.h" #include "extent_map.h" #include "async-thread.h" +#include "ioctl.h" struct btrfs_trans_handle; struct btrfs_transaction; @@ -510,6 +512,12 @@ struct btrfs_extent_item_v0 { /* use full backrefs for extent pointers in the block */ #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) +/* + * this flag is only used internally by scrub and may be changed at any time + * it is only declared here to avoid collisions + */ +#define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) + struct btrfs_tree_block_info { struct btrfs_disk_key key; u8 level; @@ -1077,6 +1085,17 @@ struct btrfs_fs_info { void *bdev_holder; + /* private scrub information */ + struct mutex scrub_lock; + atomic_t scrubs_running; + atomic_t scrub_pause_req; + atomic_t scrubs_paused; + atomic_t scrub_cancel_req; + wait_queue_head_t scrub_pause_wait; + struct rw_semaphore scrub_super_lock; + int scrub_workers_refcnt; + struct btrfs_workers scrub_workers; + /* filesystem state */ u64 fs_state; }; @@ -2472,8 +2491,8 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, int btrfs_csum_truncate(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 isize); -int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, - u64 end, struct list_head *list); +int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, + struct list_head *list, int search_commit); /* inode.c */ /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ @@ -2637,4 +2656,18 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, u64 *bytes_to_reserve); void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending); + +/* scrub.c */ +int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, + struct btrfs_scrub_progress *progress); +int btrfs_scrub_pause(struct btrfs_root *root); +int btrfs_scrub_pause_super(struct btrfs_root *root); +int btrfs_scrub_continue(struct btrfs_root *root); +int btrfs_scrub_continue_super(struct btrfs_root *root); +int btrfs_scrub_cancel(struct btrfs_root *root); +int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev); +int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); +int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, + struct btrfs_scrub_progress *progress); + #endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fe5aec9b3924..e48e8095c61f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1773,6 +1773,17 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); + mutex_init(&fs_info->scrub_lock); + atomic_set(&fs_info->scrubs_running, 0); + atomic_set(&fs_info->scrub_pause_req, 0); + atomic_set(&fs_info->scrubs_paused, 0); + atomic_set(&fs_info->scrub_cancel_req, 0); + init_waitqueue_head(&fs_info->scrub_pause_wait); + init_rwsem(&fs_info->scrub_super_lock); + fs_info->scrub_workers_refcnt = 0; + btrfs_init_workers(&fs_info->scrub_workers, "scrub", + fs_info->thread_pool_size, &fs_info->generic_worker); + sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); sb->s_bdi = &fs_info->bdi; @@ -2599,6 +2610,7 @@ int close_ctree(struct btrfs_root *root) fs_info->closing = 1; smp_mb(); + btrfs_scrub_cancel(root); btrfs_put_block_group_cache(fs_info); /* diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..39ca7c1250e7 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -266,7 +266,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, - struct list_head *list) + struct list_head *list, int search_commit) { struct btrfs_key key; struct btrfs_path *path; @@ -283,6 +283,12 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, path = btrfs_alloc_path(); BUG_ON(!path); + if (search_commit) { + path->skip_locking = 1; + path->reada = 2; + path->search_commit_root = 1; + } + key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = start; key.type = BTRFS_EXTENT_CSUM_KEY; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 870869aab0b8..27142446b30a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1007,7 +1007,7 @@ static noinline int csum_exist_in_range(struct btrfs_root *root, LIST_HEAD(list); ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, - bytenr + num_bytes - 1, &list); + bytenr + num_bytes - 1, &list, 0); if (ret == 0 && list_empty(&list)) return 0; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 8fb382167b13..37ac030d64b4 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -42,6 +42,43 @@ struct btrfs_ioctl_vol_args_v2 { char name[BTRFS_SUBVOL_NAME_MAX + 1]; }; +/* + * structure to report errors and progress to userspace, either as a + * result of a finished scrub, a canceled scrub or a progress inquiry + */ +struct btrfs_scrub_progress { + __u64 data_extents_scrubbed; /* # of data extents scrubbed */ + __u64 tree_extents_scrubbed; /* # of tree extents scrubbed */ + __u64 data_bytes_scrubbed; /* # of data bytes scrubbed */ + __u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */ + __u64 read_errors; /* # of read errors encountered (EIO) */ + __u64 csum_errors; /* # of failed csum checks */ + __u64 verify_errors; /* # of occurences, where the metadata + * of a tree block did not match the + * expected values, like generation or + * logical */ + __u64 no_csum; /* # of 4k data block for which no csum + * is present, probably the result of + * data written with nodatasum */ + __u64 csum_discards; /* # of csum for which no data was found + * in the extent tree. */ + __u64 super_errors; /* # of bad super blocks encountered */ + __u64 malloc_errors; /* # of internal kmalloc errors. These + * will likely cause an incomplete + * scrub */ + __u64 uncorrectable_errors; /* # of errors where either no intact + * copy was found or the writeback + * failed */ + __u64 corrected_errors; /* # of errors corrected */ + __u64 last_physical; /* last physical address scrubbed. In + * case a scrub was aborted, this can + * be used to restart the scrub */ + __u64 unverified_errors; /* # of occurences where a read for a + * full (64k) bio failed, but the re- + * check succeeded for each 4k piece. + * Intermittent error. */ +}; + #define BTRFS_INO_LOOKUP_PATH_MAX 4080 struct btrfs_ioctl_ino_lookup_args { __u64 treeid; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 58250e09eb05..db1dffa9952b 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4242,7 +4242,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, - disk_bytenr + len - 1, &list); + disk_bytenr + len - 1, &list, 0); while (!list_empty(&list)) { sums = list_entry(list.next, struct btrfs_ordered_sum, list); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c new file mode 100644 index 000000000000..70f9fa772ee9 --- /dev/null +++ b/fs/btrfs/scrub.c @@ -0,0 +1,1492 @@ +/* + * Copyright (C) 2011 STRATO. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ctree.h" +#include "volumes.h" +#include "disk-io.h" +#include "ordered-data.h" + +/* + * This is only the first step towards a full-features scrub. It reads all + * extent and super block and verifies the checksums. In case a bad checksum + * is found or the extent cannot be read, good data will be written back if + * any can be found. + * + * Future enhancements: + * - To enhance the performance, better read-ahead strategies for the + * extent-tree can be employed. + * - In case an unrepairable extent is encountered, track which files are + * affected and report them + * - In case of a read error on files with nodatasum, map the file and read + * the extent to trigger a writeback of the good copy + * - track and record media errors, throw out bad devices + * - add a readonly mode + * - add a mode to also read unallocated space + * - make the prefetch cancellable + */ + +struct scrub_bio; +struct scrub_page; +struct scrub_dev; +struct scrub_fixup; +static void scrub_bio_end_io(struct bio *bio, int err); +static void scrub_checksum(struct btrfs_work *work); +static int scrub_checksum_data(struct scrub_dev *sdev, + struct scrub_page *spag, void *buffer); +static int scrub_checksum_tree_block(struct scrub_dev *sdev, + struct scrub_page *spag, u64 logical, + void *buffer); +static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer); +static void scrub_recheck_end_io(struct bio *bio, int err); +static void scrub_fixup_worker(struct btrfs_work *work); +static void scrub_fixup(struct scrub_fixup *fixup); + +#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ +#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ + +struct scrub_page { + u64 flags; /* extent flags */ + u64 generation; + u64 mirror_num; + int have_csum; + u8 csum[BTRFS_CSUM_SIZE]; +}; + +struct scrub_bio { + int index; + struct scrub_dev *sdev; + struct bio *bio; + int err; + u64 logical; + u64 physical; + struct scrub_page spag[SCRUB_PAGES_PER_BIO]; + u64 count; + int next_free; + struct btrfs_work work; +}; + +struct scrub_dev { + struct scrub_bio *bios[SCRUB_BIOS_PER_DEV]; + struct btrfs_device *dev; + int first_free; + int curr; + atomic_t in_flight; + spinlock_t list_lock; + wait_queue_head_t list_wait; + u16 csum_size; + struct list_head csum_list; + atomic_t cancel_req; + /* + * statistics + */ + struct btrfs_scrub_progress stat; + spinlock_t stat_lock; +}; + +struct scrub_fixup { + struct scrub_dev *sdev; + struct bio *bio; + u64 logical; + u64 physical; + struct scrub_page spag; + struct btrfs_work work; + int err; + int recheck; +}; + +static void scrub_free_csums(struct scrub_dev *sdev) +{ + while (!list_empty(&sdev->csum_list)) { + struct btrfs_ordered_sum *sum; + sum = list_first_entry(&sdev->csum_list, + struct btrfs_ordered_sum, list); + list_del(&sum->list); + kfree(sum); + } +} + +static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) +{ + int i; + int j; + struct page *last_page; + + if (!sdev) + return; + + for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { + struct scrub_bio *sbio = sdev->bios[i]; + struct bio *bio; + + if (!sbio) + break; + + bio = sbio->bio; + if (bio) { + last_page = NULL; + for (j = 0; j < bio->bi_vcnt; ++j) { + if (bio->bi_io_vec[j].bv_page == last_page) + continue; + last_page = bio->bi_io_vec[j].bv_page; + __free_page(last_page); + } + bio_put(bio); + } + kfree(sbio); + } + + scrub_free_csums(sdev); + kfree(sdev); +} + +static noinline_for_stack +struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) +{ + struct scrub_dev *sdev; + int i; + int j; + int ret; + struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; + + sdev = kzalloc(sizeof(*sdev), GFP_NOFS); + if (!sdev) + goto nomem; + sdev->dev = dev; + for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { + struct bio *bio; + struct scrub_bio *sbio; + + sbio = kzalloc(sizeof(*sbio), GFP_NOFS); + if (!sbio) + goto nomem; + sdev->bios[i] = sbio; + + bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); + if (!bio) + goto nomem; + + sbio->index = i; + sbio->sdev = sdev; + sbio->bio = bio; + sbio->count = 0; + sbio->work.func = scrub_checksum; + bio->bi_private = sdev->bios[i]; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_sector = 0; + bio->bi_bdev = dev->bdev; + bio->bi_size = 0; + + for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { + struct page *page; + page = alloc_page(GFP_NOFS); + if (!page) + goto nomem; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) + goto nomem; + } + WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); + + if (i != SCRUB_BIOS_PER_DEV-1) + sdev->bios[i]->next_free = i + 1; + else + sdev->bios[i]->next_free = -1; + } + sdev->first_free = 0; + sdev->curr = -1; + atomic_set(&sdev->in_flight, 0); + atomic_set(&sdev->cancel_req, 0); + sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy); + INIT_LIST_HEAD(&sdev->csum_list); + + spin_lock_init(&sdev->list_lock); + spin_lock_init(&sdev->stat_lock); + init_waitqueue_head(&sdev->list_wait); + return sdev; + +nomem: + scrub_free_dev(sdev); + return ERR_PTR(-ENOMEM); +} + +/* + * scrub_recheck_error gets called when either verification of the page + * failed or the bio failed to read, e.g. with EIO. In the latter case, + * recheck_error gets called for every page in the bio, even though only + * one may be bad + */ +static void scrub_recheck_error(struct scrub_bio *sbio, int ix) +{ + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct bio *bio = NULL; + struct page *page = NULL; + struct scrub_fixup *fixup = NULL; + int ret; + + /* + * while we're in here we do not want the transaction to commit. + * To prevent it, we increment scrubs_running. scrub_pause will + * have to wait until we're finished + * we can safely increment scrubs_running here, because we're + * in the context of the original bio which is still marked in_flight + */ + atomic_inc(&fs_info->scrubs_running); + + fixup = kzalloc(sizeof(*fixup), GFP_NOFS); + if (!fixup) + goto malloc_error; + + fixup->logical = sbio->logical + ix * PAGE_SIZE; + fixup->physical = sbio->physical + ix * PAGE_SIZE; + fixup->spag = sbio->spag[ix]; + fixup->sdev = sdev; + + bio = bio_alloc(GFP_NOFS, 1); + if (!bio) + goto malloc_error; + bio->bi_private = fixup; + bio->bi_size = 0; + bio->bi_bdev = sdev->dev->bdev; + fixup->bio = bio; + fixup->recheck = 0; + + page = alloc_page(GFP_NOFS); + if (!page) + goto malloc_error; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) + goto malloc_error; + + if (!sbio->err) { + /* + * shorter path: just a checksum error, go ahead and correct it + */ + scrub_fixup_worker(&fixup->work); + return; + } + + /* + * an I/O-error occured for one of the blocks in the bio, not + * necessarily for this one, so first try to read it separately + */ + fixup->work.func = scrub_fixup_worker; + fixup->recheck = 1; + bio->bi_end_io = scrub_recheck_end_io; + bio->bi_sector = fixup->physical >> 9; + bio->bi_bdev = sdev->dev->bdev; + submit_bio(0, bio); + + return; + +malloc_error: + if (bio) + bio_put(bio); + if (page) + __free_page(page); + kfree(fixup); + spin_lock(&sdev->stat_lock); + ++sdev->stat.malloc_errors; + spin_unlock(&sdev->stat_lock); + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); +} + +static void scrub_recheck_end_io(struct bio *bio, int err) +{ + struct scrub_fixup *fixup = bio->bi_private; + struct btrfs_fs_info *fs_info = fixup->sdev->dev->dev_root->fs_info; + + fixup->err = err; + btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work); +} + +static int scrub_fixup_check(struct scrub_fixup *fixup) +{ + int ret = 1; + struct page *page; + void *buffer; + u64 flags = fixup->spag.flags; + + page = fixup->bio->bi_io_vec[0].bv_page; + buffer = kmap_atomic(page, KM_USER0); + if (flags & BTRFS_EXTENT_FLAG_DATA) { + ret = scrub_checksum_data(fixup->sdev, + &fixup->spag, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { + ret = scrub_checksum_tree_block(fixup->sdev, + &fixup->spag, + fixup->logical, + buffer); + } else { + WARN_ON(1); + } + kunmap_atomic(buffer, KM_USER0); + + return ret; +} + +static void scrub_fixup_worker(struct btrfs_work *work) +{ + struct scrub_fixup *fixup; + struct btrfs_fs_info *fs_info; + u64 flags; + int ret = 1; + + fixup = container_of(work, struct scrub_fixup, work); + fs_info = fixup->sdev->dev->dev_root->fs_info; + flags = fixup->spag.flags; + + if (fixup->recheck && fixup->err == 0) + ret = scrub_fixup_check(fixup); + + if (ret || fixup->err) + scrub_fixup(fixup); + + __free_page(fixup->bio->bi_io_vec[0].bv_page); + bio_put(fixup->bio); + + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); + + kfree(fixup); +} + +static void scrub_fixup_end_io(struct bio *bio, int err) +{ + complete((struct completion *)bio->bi_private); +} + +static void scrub_fixup(struct scrub_fixup *fixup) +{ + struct scrub_dev *sdev = fixup->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; + struct btrfs_multi_bio *multi = NULL; + struct bio *bio = fixup->bio; + u64 length; + int i; + int ret; + DECLARE_COMPLETION_ONSTACK(complete); + + if ((fixup->spag.flags & BTRFS_EXTENT_FLAG_DATA) && + (fixup->spag.have_csum == 0)) { + /* + * nodatasum, don't try to fix anything + * FIXME: we can do better, open the inode and trigger a + * writeback + */ + goto uncorrectable; + } + + length = PAGE_SIZE; + ret = btrfs_map_block(map_tree, REQ_WRITE, fixup->logical, &length, + &multi, 0); + if (ret || !multi || length < PAGE_SIZE) { + printk(KERN_ERR + "scrub_fixup: btrfs_map_block failed us for %llu\n", + (unsigned long long)fixup->logical); + WARN_ON(1); + return; + } + + if (multi->num_stripes == 1) { + /* there aren't any replicas */ + goto uncorrectable; + } + + /* + * first find a good copy + */ + for (i = 0; i < multi->num_stripes; ++i) { + if (i == fixup->spag.mirror_num) + continue; + + bio->bi_sector = multi->stripes[i].physical >> 9; + bio->bi_bdev = multi->stripes[i].dev->bdev; + bio->bi_size = PAGE_SIZE; + bio->bi_next = NULL; + bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_comp_cpu = -1; + bio->bi_end_io = scrub_fixup_end_io; + bio->bi_private = &complete; + + submit_bio(0, bio); + + wait_for_completion(&complete); + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + /* I/O-error, this is not a good copy */ + continue; + + ret = scrub_fixup_check(fixup); + if (ret == 0) + break; + } + if (i == multi->num_stripes) + goto uncorrectable; + + /* + * the bio now contains good data, write it back + */ + bio->bi_sector = fixup->physical >> 9; + bio->bi_bdev = sdev->dev->bdev; + bio->bi_size = PAGE_SIZE; + bio->bi_next = NULL; + bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_comp_cpu = -1; + bio->bi_end_io = scrub_fixup_end_io; + bio->bi_private = &complete; + + submit_bio(REQ_WRITE, bio); + + wait_for_completion(&complete); + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + /* I/O-error, writeback failed, give up */ + goto uncorrectable; + + kfree(multi); + spin_lock(&sdev->stat_lock); + ++sdev->stat.corrected_errors; + spin_unlock(&sdev->stat_lock); + + if (printk_ratelimit()) + printk(KERN_ERR "btrfs: fixed up at %llu\n", + (unsigned long long)fixup->logical); + return; + +uncorrectable: + kfree(multi); + spin_lock(&sdev->stat_lock); + ++sdev->stat.uncorrectable_errors; + spin_unlock(&sdev->stat_lock); + + if (printk_ratelimit()) + printk(KERN_ERR "btrfs: unable to fixup at %llu\n", + (unsigned long long)fixup->logical); +} + +static void scrub_bio_end_io(struct bio *bio, int err) +{ + struct scrub_bio *sbio = bio->bi_private; + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + + sbio->err = err; + + btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); +} + +static void scrub_checksum(struct btrfs_work *work) +{ + struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); + struct scrub_dev *sdev = sbio->sdev; + struct page *page; + void *buffer; + int i; + u64 flags; + u64 logical; + int ret; + + if (sbio->err) { + struct bio *bio; + struct bio *old_bio; + + for (i = 0; i < sbio->count; ++i) + scrub_recheck_error(sbio, i); + spin_lock(&sdev->stat_lock); + ++sdev->stat.read_errors; + spin_unlock(&sdev->stat_lock); + + /* + * FIXME: allocate a new bio after a media error. I haven't + * figured out how to reuse this one + */ + old_bio = sbio->bio; + bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); + if (!bio) { + /* + * alloc failed. cancel the scrub and don't requeue + * this sbio + */ + printk(KERN_ERR "btrfs scrub: allocation failure, " + "cancelling scrub\n"); + atomic_inc(&sdev->dev->dev_root->fs_info-> + scrub_cancel_req); + goto out_no_enqueue; + } + sbio->bio = bio; + bio->bi_private = sbio; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_sector = 0; + bio->bi_bdev = sbio->sdev->dev->bdev; + bio->bi_size = 0; + for (i = 0; i < SCRUB_PAGES_PER_BIO; ++i) { + struct page *page; + page = old_bio->bi_io_vec[i].bv_page; + bio_add_page(bio, page, PAGE_SIZE, 0); + } + bio_put(old_bio); + goto out; + } + for (i = 0; i < sbio->count; ++i) { + page = sbio->bio->bi_io_vec[i].bv_page; + buffer = kmap_atomic(page, KM_USER0); + flags = sbio->spag[i].flags; + logical = sbio->logical + i * PAGE_SIZE; + ret = 0; + if (flags & BTRFS_EXTENT_FLAG_DATA) { + ret = scrub_checksum_data(sdev, sbio->spag + i, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { + ret = scrub_checksum_tree_block(sdev, sbio->spag + i, + logical, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_SUPER) { + BUG_ON(i); + (void)scrub_checksum_super(sbio, buffer); + } else { + WARN_ON(1); + } + kunmap_atomic(buffer, KM_USER0); + if (ret) + scrub_recheck_error(sbio, i); + } + +out: + spin_lock(&sdev->list_lock); + sbio->next_free = sdev->first_free; + sdev->first_free = sbio->index; + spin_unlock(&sdev->list_lock); +out_no_enqueue: + atomic_dec(&sdev->in_flight); + wake_up(&sdev->list_wait); +} + +static int scrub_checksum_data(struct scrub_dev *sdev, + struct scrub_page *spag, void *buffer) +{ + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + struct btrfs_root *root = sdev->dev->dev_root; + + if (!spag->have_csum) + return 0; + + crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, spag->csum, sdev->csum_size)) + fail = 1; + + spin_lock(&sdev->stat_lock); + ++sdev->stat.data_extents_scrubbed; + sdev->stat.data_bytes_scrubbed += PAGE_SIZE; + if (fail) + ++sdev->stat.csum_errors; + spin_unlock(&sdev->stat_lock); + + return fail; +} + +static int scrub_checksum_tree_block(struct scrub_dev *sdev, + struct scrub_page *spag, u64 logical, + void *buffer) +{ + struct btrfs_header *h; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + int crc_fail = 0; + + /* + * we don't use the getter functions here, as we + * a) don't have an extent buffer and + * b) the page is already kmapped + */ + h = (struct btrfs_header *)buffer; + + if (logical != le64_to_cpu(h->bytenr)) + ++fail; + + if (spag->generation != le64_to_cpu(h->generation)) + ++fail; + + if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) + ++fail; + + if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, + BTRFS_UUID_SIZE)) + ++fail; + + crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, + PAGE_SIZE - BTRFS_CSUM_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, h->csum, sdev->csum_size)) + ++crc_fail; + + spin_lock(&sdev->stat_lock); + ++sdev->stat.tree_extents_scrubbed; + sdev->stat.tree_bytes_scrubbed += PAGE_SIZE; + if (crc_fail) + ++sdev->stat.csum_errors; + if (fail) + ++sdev->stat.verify_errors; + spin_unlock(&sdev->stat_lock); + + return fail || crc_fail; +} + +static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) +{ + struct btrfs_super_block *s; + u64 logical; + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + + s = (struct btrfs_super_block *)buffer; + logical = sbio->logical; + + if (logical != le64_to_cpu(s->bytenr)) + ++fail; + + if (sbio->spag[0].generation != le64_to_cpu(s->generation)) + ++fail; + + if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) + ++fail; + + crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, + PAGE_SIZE - BTRFS_CSUM_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, s->csum, sbio->sdev->csum_size)) + ++fail; + + if (fail) { + /* + * if we find an error in a super block, we just report it. + * They will get written with the next transaction commit + * anyway + */ + spin_lock(&sdev->stat_lock); + ++sdev->stat.super_errors; + spin_unlock(&sdev->stat_lock); + } + + return fail; +} + +static int scrub_submit(struct scrub_dev *sdev) +{ + struct scrub_bio *sbio; + + if (sdev->curr == -1) + return 0; + + sbio = sdev->bios[sdev->curr]; + + sbio->bio->bi_sector = sbio->physical >> 9; + sbio->bio->bi_size = sbio->count * PAGE_SIZE; + sbio->bio->bi_next = NULL; + sbio->bio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bio->bi_comp_cpu = -1; + sbio->bio->bi_bdev = sdev->dev->bdev; + sbio->err = 0; + sdev->curr = -1; + atomic_inc(&sdev->in_flight); + + submit_bio(0, sbio->bio); + + return 0; +} + +static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, + u64 physical, u64 flags, u64 gen, u64 mirror_num, + u8 *csum, int force) +{ + struct scrub_bio *sbio; + +again: + /* + * grab a fresh bio or wait for one to become available + */ + while (sdev->curr == -1) { + spin_lock(&sdev->list_lock); + sdev->curr = sdev->first_free; + if (sdev->curr != -1) { + sdev->first_free = sdev->bios[sdev->curr]->next_free; + sdev->bios[sdev->curr]->next_free = -1; + sdev->bios[sdev->curr]->count = 0; + spin_unlock(&sdev->list_lock); + } else { + spin_unlock(&sdev->list_lock); + wait_event(sdev->list_wait, sdev->first_free != -1); + } + } + sbio = sdev->bios[sdev->curr]; + if (sbio->count == 0) { + sbio->physical = physical; + sbio->logical = logical; + } else if (sbio->physical + sbio->count * PAGE_SIZE != physical) { + scrub_submit(sdev); + goto again; + } + sbio->spag[sbio->count].flags = flags; + sbio->spag[sbio->count].generation = gen; + sbio->spag[sbio->count].have_csum = 0; + sbio->spag[sbio->count].mirror_num = mirror_num; + if (csum) { + sbio->spag[sbio->count].have_csum = 1; + memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); + } + ++sbio->count; + if (sbio->count == SCRUB_PAGES_PER_BIO || force) + scrub_submit(sdev); + + return 0; +} + +static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, + u8 *csum) +{ + struct btrfs_ordered_sum *sum = NULL; + int ret = 0; + unsigned long i; + unsigned long num_sectors; + u32 sectorsize = sdev->dev->dev_root->sectorsize; + + while (!list_empty(&sdev->csum_list)) { + sum = list_first_entry(&sdev->csum_list, + struct btrfs_ordered_sum, list); + if (sum->bytenr > logical) + return 0; + if (sum->bytenr + sum->len > logical) + break; + + ++sdev->stat.csum_discards; + list_del(&sum->list); + kfree(sum); + sum = NULL; + } + if (!sum) + return 0; + + num_sectors = sum->len / sectorsize; + for (i = 0; i < num_sectors; ++i) { + if (sum->sums[i].bytenr == logical) { + memcpy(csum, &sum->sums[i].sum, sdev->csum_size); + ret = 1; + break; + } + } + if (ret && i == num_sectors - 1) { + list_del(&sum->list); + kfree(sum); + } + return ret; +} + +/* scrub extent tries to collect up to 64 kB for each bio */ +static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len, + u64 physical, u64 flags, u64 gen, u64 mirror_num) +{ + int ret; + u8 csum[BTRFS_CSUM_SIZE]; + + while (len) { + u64 l = min_t(u64, len, PAGE_SIZE); + int have_csum = 0; + + if (flags & BTRFS_EXTENT_FLAG_DATA) { + /* push csums to sbio */ + have_csum = scrub_find_csum(sdev, logical, l, csum); + if (have_csum == 0) + ++sdev->stat.no_csum; + } + ret = scrub_page(sdev, logical, l, physical, flags, gen, + mirror_num, have_csum ? csum : NULL, 0); + if (ret) + return ret; + len -= l; + logical += l; + physical += l; + } + return 0; +} + +static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, + struct map_lookup *map, int num, u64 base, u64 length) +{ + struct btrfs_path *path; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct btrfs_root *root = fs_info->extent_root; + struct btrfs_root *csum_root = fs_info->csum_root; + struct btrfs_extent_item *extent; + u64 flags; + int ret; + int slot; + int i; + u64 nstripes; + int start_stripe; + struct extent_buffer *l; + struct btrfs_key key; + u64 physical; + u64 logical; + u64 generation; + u64 mirror_num; + + u64 increment = map->stripe_len; + u64 offset; + + nstripes = length; + offset = 0; + do_div(nstripes, map->stripe_len); + if (map->type & BTRFS_BLOCK_GROUP_RAID0) { + offset = map->stripe_len * num; + increment = map->stripe_len * map->num_stripes; + mirror_num = 0; + } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { + int factor = map->num_stripes / map->sub_stripes; + offset = map->stripe_len * (num / map->sub_stripes); + increment = map->stripe_len * factor; + mirror_num = num % map->sub_stripes; + } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { + increment = map->stripe_len; + mirror_num = num % map->num_stripes; + } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { + increment = map->stripe_len; + mirror_num = num % map->num_stripes; + } else { + increment = map->stripe_len; + mirror_num = 0; + } + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + path->reada = 2; + path->search_commit_root = 1; + path->skip_locking = 1; + + /* + * find all extents for each stripe and just read them to get + * them into the page cache + * FIXME: we can do better. build a more intelligent prefetching + */ + logical = base + offset; + physical = map->stripes[num].physical; + ret = 0; + for (i = 0; i < nstripes; ++i) { + key.objectid = logical; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = (u64)0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + l = path->nodes[0]; + slot = path->slots[0]; + btrfs_item_key_to_cpu(l, &key, slot); + if (key.objectid != logical) { + ret = btrfs_previous_item(root, path, 0, + BTRFS_EXTENT_ITEM_KEY); + if (ret < 0) + goto out; + } + + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid >= logical + map->stripe_len) + break; + + path->slots[0]++; + } + btrfs_release_path(root, path); + logical += increment; + physical += map->stripe_len; + cond_resched(); + } + + /* + * collect all data csums for the stripe to avoid seeking during + * the scrub. This might currently (crc32) end up to be about 1MB + */ + start_stripe = 0; +again: + logical = base + offset + start_stripe * increment; + for (i = start_stripe; i < nstripes; ++i) { + ret = btrfs_lookup_csums_range(csum_root, logical, + logical + map->stripe_len - 1, + &sdev->csum_list, 1); + if (ret) + goto out; + + logical += increment; + cond_resched(); + } + /* + * now find all extents for each stripe and scrub them + */ + logical = base + offset + start_stripe * increment; + physical = map->stripes[num].physical + start_stripe * map->stripe_len; + ret = 0; + for (i = start_stripe; i < nstripes; ++i) { + /* + * canceled? + */ + if (atomic_read(&fs_info->scrub_cancel_req) || + atomic_read(&sdev->cancel_req)) { + ret = -ECANCELED; + goto out; + } + /* + * check to see if we have to pause + */ + if (atomic_read(&fs_info->scrub_pause_req)) { + /* push queued extents */ + scrub_submit(sdev); + wait_event(sdev->list_wait, + atomic_read(&sdev->in_flight) == 0); + atomic_inc(&fs_info->scrubs_paused); + wake_up(&fs_info->scrub_pause_wait); + mutex_lock(&fs_info->scrub_lock); + while (atomic_read(&fs_info->scrub_pause_req)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrub_pause_req) == 0); + mutex_lock(&fs_info->scrub_lock); + } + atomic_dec(&fs_info->scrubs_paused); + mutex_unlock(&fs_info->scrub_lock); + wake_up(&fs_info->scrub_pause_wait); + scrub_free_csums(sdev); + start_stripe = i; + goto again; + } + + key.objectid = logical; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = (u64)0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + l = path->nodes[0]; + slot = path->slots[0]; + btrfs_item_key_to_cpu(l, &key, slot); + if (key.objectid != logical) { + ret = btrfs_previous_item(root, path, 0, + BTRFS_EXTENT_ITEM_KEY); + if (ret < 0) + goto out; + } + + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid + key.offset <= logical) + goto next; + + if (key.objectid >= logical + map->stripe_len) + break; + + if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) + goto next; + + extent = btrfs_item_ptr(l, slot, + struct btrfs_extent_item); + flags = btrfs_extent_flags(l, extent); + generation = btrfs_extent_generation(l, extent); + + if (key.objectid < logical && + (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { + printk(KERN_ERR + "btrfs scrub: tree block %llu spanning " + "stripes, ignored. logical=%llu\n", + (unsigned long long)key.objectid, + (unsigned long long)logical); + goto next; + } + + /* + * trim extent to this stripe + */ + if (key.objectid < logical) { + key.offset -= logical - key.objectid; + key.objectid = logical; + } + if (key.objectid + key.offset > + logical + map->stripe_len) { + key.offset = logical + map->stripe_len - + key.objectid; + } + + ret = scrub_extent(sdev, key.objectid, key.offset, + key.objectid - logical + physical, + flags, generation, mirror_num); + if (ret) + goto out; + +next: + path->slots[0]++; + } + btrfs_release_path(root, path); + logical += increment; + physical += map->stripe_len; + spin_lock(&sdev->stat_lock); + sdev->stat.last_physical = physical; + spin_unlock(&sdev->stat_lock); + } + /* push queued extents */ + scrub_submit(sdev); + +out: + btrfs_free_path(path); + return ret < 0 ? ret : 0; +} + +static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, + u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) +{ + struct btrfs_mapping_tree *map_tree = + &sdev->dev->dev_root->fs_info->mapping_tree; + struct map_lookup *map; + struct extent_map *em; + int i; + int ret = -EINVAL; + + read_lock(&map_tree->map_tree.lock); + em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); + read_unlock(&map_tree->map_tree.lock); + + if (!em) + return -EINVAL; + + map = (struct map_lookup *)em->bdev; + if (em->start != chunk_offset) + goto out; + + if (em->len < length) + goto out; + + for (i = 0; i < map->num_stripes; ++i) { + if (map->stripes[i].dev == sdev->dev) { + ret = scrub_stripe(sdev, map, i, chunk_offset, length); + if (ret) + goto out; + } + } +out: + free_extent_map(em); + + return ret; +} + +static noinline_for_stack +int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) +{ + struct btrfs_dev_extent *dev_extent = NULL; + struct btrfs_path *path; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u64 length; + u64 chunk_tree; + u64 chunk_objectid; + u64 chunk_offset; + int ret; + int slot; + struct extent_buffer *l; + struct btrfs_key key; + struct btrfs_key found_key; + struct btrfs_block_group_cache *cache; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + path->reada = 2; + path->search_commit_root = 1; + path->skip_locking = 1; + + key.objectid = sdev->dev->devid; + key.offset = 0ull; + key.type = BTRFS_DEV_EXTENT_KEY; + + + while (1) { + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + ret = 0; + + l = path->nodes[0]; + slot = path->slots[0]; + + btrfs_item_key_to_cpu(l, &found_key, slot); + + if (found_key.objectid != sdev->dev->devid) + break; + + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + break; + + if (found_key.offset >= end) + break; + + if (found_key.offset < key.offset) + break; + + dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); + length = btrfs_dev_extent_length(l, dev_extent); + + if (found_key.offset + length <= start) { + key.offset = found_key.offset + length; + btrfs_release_path(root, path); + continue; + } + + chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); + chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); + chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); + + /* + * get a reference on the corresponding block group to prevent + * the chunk from going away while we scrub it + */ + cache = btrfs_lookup_block_group(fs_info, chunk_offset); + if (!cache) { + ret = -ENOENT; + goto out; + } + ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, + chunk_offset, length); + btrfs_put_block_group(cache); + if (ret) + break; + + key.offset = found_key.offset + length; + btrfs_release_path(root, path); + } + +out: + btrfs_free_path(path); + return ret; +} + +static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) +{ + int i; + u64 bytenr; + u64 gen; + int ret; + struct btrfs_device *device = sdev->dev; + struct btrfs_root *root = device->dev_root; + + gen = root->fs_info->last_trans_committed; + + for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { + bytenr = btrfs_sb_offset(i); + if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) + break; + + ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr, + BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); + if (ret) + return ret; + } + wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); + + return 0; +} + +/* + * get a reference count on fs_info->scrub_workers. start worker if necessary + */ +static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (fs_info->scrub_workers_refcnt == 0) + btrfs_start_workers(&fs_info->scrub_workers, 1); + ++fs_info->scrub_workers_refcnt; + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (--fs_info->scrub_workers_refcnt == 0) + btrfs_stop_workers(&fs_info->scrub_workers); + WARN_ON(fs_info->scrub_workers_refcnt < 0); + mutex_unlock(&fs_info->scrub_lock); +} + + +int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, + struct btrfs_scrub_progress *progress) +{ + struct scrub_dev *sdev; + struct btrfs_fs_info *fs_info = root->fs_info; + int ret; + struct btrfs_device *dev; + + if (root->fs_info->closing) + return -EINVAL; + + /* + * check some assumptions + */ + if (root->sectorsize != PAGE_SIZE || + root->sectorsize != root->leafsize || + root->sectorsize != root->nodesize) { + printk(KERN_ERR "btrfs_scrub: size assumptions fail\n"); + return -EINVAL; + } + + ret = scrub_workers_get(root); + if (ret) + return ret; + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (!dev || dev->missing) { + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -ENODEV; + } + mutex_lock(&fs_info->scrub_lock); + + if (!dev->in_fs_metadata) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -ENODEV; + } + + if (dev->scrub_device) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -EINPROGRESS; + } + sdev = scrub_setup_dev(dev); + if (IS_ERR(sdev)) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return PTR_ERR(sdev); + } + dev->scrub_device = sdev; + + atomic_inc(&fs_info->scrubs_running); + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + + down_read(&fs_info->scrub_super_lock); + ret = scrub_supers(sdev); + up_read(&fs_info->scrub_super_lock); + + if (!ret) + ret = scrub_enumerate_chunks(sdev, start, end); + + wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); + + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); + + if (progress) + memcpy(progress, &sdev->stat, sizeof(*progress)); + + mutex_lock(&fs_info->scrub_lock); + dev->scrub_device = NULL; + mutex_unlock(&fs_info->scrub_lock); + + scrub_free_dev(sdev); + scrub_workers_put(root); + + return ret; +} + +int btrfs_scrub_pause(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + atomic_inc(&fs_info->scrub_pause_req); + while (atomic_read(&fs_info->scrubs_paused) != + atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrubs_paused) == + atomic_read(&fs_info->scrubs_running)); + mutex_lock(&fs_info->scrub_lock); + } + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +int btrfs_scrub_continue(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + atomic_dec(&fs_info->scrub_pause_req); + wake_up(&fs_info->scrub_pause_wait); + return 0; +} + +int btrfs_scrub_pause_super(struct btrfs_root *root) +{ + down_write(&root->fs_info->scrub_super_lock); + return 0; +} + +int btrfs_scrub_continue_super(struct btrfs_root *root) +{ + up_write(&root->fs_info->scrub_super_lock); + return 0; +} + +int btrfs_scrub_cancel(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (!atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + return -ENOTCONN; + } + + atomic_inc(&fs_info->scrub_cancel_req); + while (atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrubs_running) == 0); + mutex_lock(&fs_info->scrub_lock); + } + atomic_dec(&fs_info->scrub_cancel_req); + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct scrub_dev *sdev; + + mutex_lock(&fs_info->scrub_lock); + sdev = dev->scrub_device; + if (!sdev) { + mutex_unlock(&fs_info->scrub_lock); + return -ENOTCONN; + } + atomic_inc(&sdev->cancel_req); + while (dev->scrub_device) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + dev->scrub_device == NULL); + mutex_lock(&fs_info->scrub_lock); + } + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} +int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_device *dev; + int ret; + + /* + * we have to hold the device_list_mutex here so the device + * does not go away in cancel_dev. FIXME: find a better solution + */ + mutex_lock(&fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (!dev) { + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + return -ENODEV; + } + ret = btrfs_scrub_cancel_dev(root, dev); + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + + return ret; +} + +int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, + struct btrfs_scrub_progress *progress) +{ + struct btrfs_device *dev; + struct scrub_dev *sdev = NULL; + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (dev) + sdev = dev->scrub_device; + if (sdev) + memcpy(progress, &sdev->stat, sizeof(*progress)); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + + return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV; +} diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..37c2302a08d4 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1321,6 +1321,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, WARN_ON(cur_trans != trans->transaction); + btrfs_scrub_pause(root); /* btrfs_commit_tree_roots is responsible for getting the * various roots consistent with each other. Every pointer * in the tree of tree roots has to point to the most up to date @@ -1405,6 +1406,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); + btrfs_scrub_continue(root); + if (current->journal_info == trans) current->journal_info = NULL; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..f1a0726da5f5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -614,7 +614,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ret = btrfs_lookup_csums_range(root->log_root, csum_start, csum_end - 1, - &ordered_sums); + &ordered_sums, 0); BUG_ON(ret); while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums; @@ -2093,7 +2093,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, * the running transaction open, so a full commit can't hop * in and cause problems either. */ + btrfs_scrub_pause_super(root); write_ctree_super(trans, root->fs_info->tree_root, 1); + btrfs_scrub_continue_super(root); ret = 0; mutex_lock(&root->log_mutex); @@ -2689,7 +2691,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ret = btrfs_lookup_csums_range( log->fs_info->csum_root, ds + cs, ds + cs + cl - 1, - &ordered_sums); + &ordered_sums, 0); BUG_ON(ret); } } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8b9fb8c7683d..89ca8f110b6e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -38,9 +38,6 @@ static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_device *device); static int btrfs_relocate_sys_chunks(struct btrfs_root *root); -#define map_lookup_size(n) (sizeof(struct map_lookup) + \ - (sizeof(struct btrfs_bio_stripe) * (n))) - static DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); @@ -1334,6 +1331,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) goto error_undo; device->in_fs_metadata = 0; + btrfs_scrub_cancel_dev(root, device); /* * the device list mutex makes sure that we don't change diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..f7c20123a1fe 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -85,6 +85,9 @@ struct btrfs_device { /* physical drive uuid (or lvm uuid) */ u8 uuid[BTRFS_UUID_SIZE]; + /* per-device scrub information */ + struct scrub_dev *scrub_device; + struct btrfs_work work; }; @@ -157,6 +160,9 @@ struct map_lookup { struct btrfs_bio_stripe stripes[]; }; +#define map_lookup_size(n) (sizeof(struct map_lookup) + \ + (sizeof(struct btrfs_bio_stripe) * (n))) + /* Used to sort the devices by max_avail(descending sort) */ int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); -- cgit v1.2.2 From 475f63874d739d7842a56da94687f18d583ae654 Mon Sep 17 00:00:00 2001 From: Jan Schmidt Date: Fri, 11 Mar 2011 15:41:01 +0100 Subject: btrfs: new ioctls for scrub adds ioctls necessary to start and cancel scrubs, to get current progress and to get info about devices to be scrubbed. Note that the scrub is done per-device and that the ioctl only returns after the scrub for this devices is finished or has been canceled. Signed-off-by: Arne Jansen --- fs/btrfs/ctree.h | 2 - fs/btrfs/ioctl.c | 131 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/ioctl.h | 38 ++++++++++++++++ 3 files changed, 169 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 31141ba6072d..b7373b14e4cd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -189,7 +189,6 @@ struct btrfs_mapping_tree { struct extent_map_tree map_tree; }; -#define BTRFS_UUID_SIZE 16 struct btrfs_dev_item { /* the internal btrfs device id */ __le64 devid; @@ -296,7 +295,6 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes) sizeof(struct btrfs_stripe) * (num_stripes - 1); } -#define BTRFS_FSID_SIZE 16 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f580a3a5d2fc..205cd011d2f3 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1803,6 +1803,75 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) return ret; } +static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) +{ + struct btrfs_ioctl_fs_info_args fi_args; + struct btrfs_device *device; + struct btrfs_device *next; + struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + fi_args.num_devices = fs_devices->num_devices; + fi_args.max_id = 0; + memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid)); + + mutex_lock(&fs_devices->device_list_mutex); + list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { + if (device->devid > fi_args.max_id) + fi_args.max_id = device->devid; + } + mutex_unlock(&fs_devices->device_list_mutex); + + if (copy_to_user(arg, &fi_args, sizeof(fi_args))) + return -EFAULT; + + return 0; +} + +static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) +{ + struct btrfs_ioctl_dev_info_args *di_args; + struct btrfs_device *dev; + struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; + int ret = 0; + char *s_uuid = NULL; + char empty_uuid[BTRFS_UUID_SIZE] = {0}; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + di_args = memdup_user(arg, sizeof(*di_args)); + if (IS_ERR(di_args)) + return PTR_ERR(di_args); + + if (memcmp(empty_uuid, di_args->uuid, BTRFS_UUID_SIZE) != 0) + s_uuid = di_args->uuid; + + mutex_lock(&fs_devices->device_list_mutex); + dev = btrfs_find_device(root, di_args->devid, s_uuid, NULL); + mutex_unlock(&fs_devices->device_list_mutex); + + if (!dev) { + ret = -ENODEV; + goto out; + } + + di_args->devid = dev->devid; + di_args->bytes_used = dev->bytes_used; + di_args->total_bytes = dev->total_bytes; + memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); + strncpy(di_args->path, dev->name, sizeof(di_args->path)); + +out: + if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) + ret = -EFAULT; + + kfree(di_args); + return ret; +} + static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, u64 off, u64 olen, u64 destoff) { @@ -2465,6 +2534,58 @@ static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp) return btrfs_wait_for_commit(root, transid); } +static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg) +{ + int ret; + struct btrfs_ioctl_scrub_args *sa; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + sa = memdup_user(arg, sizeof(*sa)); + if (IS_ERR(sa)) + return PTR_ERR(sa); + + ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end, + &sa->progress); + + if (copy_to_user(arg, sa, sizeof(*sa))) + ret = -EFAULT; + + kfree(sa); + return ret; +} + +static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return btrfs_scrub_cancel(root); +} + +static long btrfs_ioctl_scrub_progress(struct btrfs_root *root, + void __user *arg) +{ + struct btrfs_ioctl_scrub_args *sa; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + sa = memdup_user(arg, sizeof(*sa)); + if (IS_ERR(sa)) + return PTR_ERR(sa); + + ret = btrfs_scrub_progress(root, sa->devid, &sa->progress); + + if (copy_to_user(arg, sa, sizeof(*sa))) + ret = -EFAULT; + + kfree(sa); + return ret; +} + long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -2504,6 +2625,10 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_add_dev(root, argp); case BTRFS_IOC_RM_DEV: return btrfs_ioctl_rm_dev(root, argp); + case BTRFS_IOC_FS_INFO: + return btrfs_ioctl_fs_info(root, argp); + case BTRFS_IOC_DEV_INFO: + return btrfs_ioctl_dev_info(root, argp); case BTRFS_IOC_BALANCE: return btrfs_balance(root->fs_info->dev_root); case BTRFS_IOC_CLONE: @@ -2527,6 +2652,12 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_start_sync(file, argp); case BTRFS_IOC_WAIT_SYNC: return btrfs_ioctl_wait_sync(file, argp); + case BTRFS_IOC_SCRUB: + return btrfs_ioctl_scrub(root, argp); + case BTRFS_IOC_SCRUB_CANCEL: + return btrfs_ioctl_scrub_cancel(root, argp); + case BTRFS_IOC_SCRUB_PROGRESS: + return btrfs_ioctl_scrub_progress(root, argp); } return -ENOTTY; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 37ac030d64b4..1a638ceeead8 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -32,6 +32,8 @@ struct btrfs_ioctl_vol_args { #define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) #define BTRFS_SUBVOL_RDONLY (1ULL << 1) +#define BTRFS_FSID_SIZE 16 +#define BTRFS_UUID_SIZE 16 #define BTRFS_SUBVOL_NAME_MAX 4039 struct btrfs_ioctl_vol_args_v2 { @@ -79,6 +81,33 @@ struct btrfs_scrub_progress { * Intermittent error. */ }; +struct btrfs_ioctl_scrub_args { + __u64 devid; /* in */ + __u64 start; /* in */ + __u64 end; /* in */ + __u64 flags; /* in */ + struct btrfs_scrub_progress progress; /* out */ + /* pad to 1k */ + __u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8]; +}; + +#define BTRFS_DEVICE_PATH_NAME_MAX 1024 +struct btrfs_ioctl_dev_info_args { + __u64 devid; /* in/out */ + __u8 uuid[BTRFS_UUID_SIZE]; /* in/out */ + __u64 bytes_used; /* out */ + __u64 total_bytes; /* out */ + __u64 unused[379]; /* pad to 4k */ + __u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */ +}; + +struct btrfs_ioctl_fs_info_args { + __u64 max_id; /* out */ + __u64 num_devices; /* out */ + __u8 fsid[BTRFS_FSID_SIZE]; /* out */ + __u64 reserved[124]; /* pad to 1k */ +}; + #define BTRFS_INO_LOOKUP_PATH_MAX 4080 struct btrfs_ioctl_ino_lookup_args { __u64 treeid; @@ -240,4 +269,13 @@ struct btrfs_ioctl_space_args { struct btrfs_ioctl_vol_args_v2) #define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64) #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64) +#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \ + struct btrfs_ioctl_scrub_args) +#define BTRFS_IOC_SCRUB_CANCEL _IO(BTRFS_IOCTL_MAGIC, 28) +#define BTRFS_IOC_SCRUB_PROGRESS _IOWR(BTRFS_IOCTL_MAGIC, 29, \ + struct btrfs_ioctl_scrub_args) +#define BTRFS_IOC_DEV_INFO _IOWR(BTRFS_IOCTL_MAGIC, 30, \ + struct btrfs_ioctl_dev_info_args) +#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \ + struct btrfs_ioctl_fs_info_args) #endif -- cgit v1.2.2 From 96e369208e65a7d017a52361fd572df41fde8472 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sat, 9 Apr 2011 14:27:01 +0300 Subject: btrfs scrub: make fixups sync btrfs scrub - make fixups sync, don't reuse fixup bios Fixups are already sync for csum failures, this patch makes them sync for EIO case as well. Fixups are now sharing pages with the parent sbio - instead of allocating a separate page to do a fixup we grab the page from the sbio buffer. Fixup bios are no longer reused. struct fixup is no longer needed, instead pass [sbio pointer, index]. Originally this was added to look at the possibility of sharing the code between drive swap and scrub, but it actually fixes a serious bug in scrub code where errors that could be corrected were ignored and reported as uncorrectable. btrfs scrub - restore bios properly after media errors The current code reallocates a bio after a media error. This is a temporary measure introduced in v3 after a serious problem related to bio reuse was found in v2 of scrub patchset. Basically we did not reset bv_offset and bv_len fields of the bio_vec structure. They are changed in case I/O error happens, for example, at offset 512 or 1024 into the page. Also bi_flags field wasn't properly setup before reusing the bio. Signed-off-by: Arne Jansen --- fs/btrfs/scrub.c | 287 ++++++++++++++++--------------------------------------- 1 file changed, 80 insertions(+), 207 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 70f9fa772ee9..6a50801ecfa0 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -50,7 +50,6 @@ struct scrub_bio; struct scrub_page; struct scrub_dev; -struct scrub_fixup; static void scrub_bio_end_io(struct bio *bio, int err); static void scrub_checksum(struct btrfs_work *work); static int scrub_checksum_data(struct scrub_dev *sdev, @@ -59,9 +58,11 @@ static int scrub_checksum_tree_block(struct scrub_dev *sdev, struct scrub_page *spag, u64 logical, void *buffer); static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer); -static void scrub_recheck_end_io(struct bio *bio, int err); -static void scrub_fixup_worker(struct btrfs_work *work); -static void scrub_fixup(struct scrub_fixup *fixup); +static int scrub_fixup_check(struct scrub_bio *sbio, int ix); +static void scrub_fixup_end_io(struct bio *bio, int err); +static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, + struct page *page); +static void scrub_fixup(struct scrub_bio *sbio, int ix); #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ @@ -105,17 +106,6 @@ struct scrub_dev { spinlock_t stat_lock; }; -struct scrub_fixup { - struct scrub_dev *sdev; - struct bio *bio; - u64 logical; - u64 physical; - struct scrub_page spag; - struct btrfs_work work; - int err; - int recheck; -}; - static void scrub_free_csums(struct scrub_dev *sdev) { while (!list_empty(&sdev->csum_list)) { @@ -240,107 +230,34 @@ nomem: */ static void scrub_recheck_error(struct scrub_bio *sbio, int ix) { - struct scrub_dev *sdev = sbio->sdev; - struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; - struct bio *bio = NULL; - struct page *page = NULL; - struct scrub_fixup *fixup = NULL; - int ret; - - /* - * while we're in here we do not want the transaction to commit. - * To prevent it, we increment scrubs_running. scrub_pause will - * have to wait until we're finished - * we can safely increment scrubs_running here, because we're - * in the context of the original bio which is still marked in_flight - */ - atomic_inc(&fs_info->scrubs_running); - - fixup = kzalloc(sizeof(*fixup), GFP_NOFS); - if (!fixup) - goto malloc_error; - - fixup->logical = sbio->logical + ix * PAGE_SIZE; - fixup->physical = sbio->physical + ix * PAGE_SIZE; - fixup->spag = sbio->spag[ix]; - fixup->sdev = sdev; - - bio = bio_alloc(GFP_NOFS, 1); - if (!bio) - goto malloc_error; - bio->bi_private = fixup; - bio->bi_size = 0; - bio->bi_bdev = sdev->dev->bdev; - fixup->bio = bio; - fixup->recheck = 0; - - page = alloc_page(GFP_NOFS); - if (!page) - goto malloc_error; - - ret = bio_add_page(bio, page, PAGE_SIZE, 0); - if (!ret) - goto malloc_error; - - if (!sbio->err) { - /* - * shorter path: just a checksum error, go ahead and correct it - */ - scrub_fixup_worker(&fixup->work); - return; + if (sbio->err) { + if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, + (sbio->physical + ix * PAGE_SIZE) >> 9, + sbio->bio->bi_io_vec[ix].bv_page) == 0) { + if (scrub_fixup_check(sbio, ix) == 0) + return; + } } - /* - * an I/O-error occured for one of the blocks in the bio, not - * necessarily for this one, so first try to read it separately - */ - fixup->work.func = scrub_fixup_worker; - fixup->recheck = 1; - bio->bi_end_io = scrub_recheck_end_io; - bio->bi_sector = fixup->physical >> 9; - bio->bi_bdev = sdev->dev->bdev; - submit_bio(0, bio); - - return; - -malloc_error: - if (bio) - bio_put(bio); - if (page) - __free_page(page); - kfree(fixup); - spin_lock(&sdev->stat_lock); - ++sdev->stat.malloc_errors; - spin_unlock(&sdev->stat_lock); - atomic_dec(&fs_info->scrubs_running); - wake_up(&fs_info->scrub_pause_wait); + scrub_fixup(sbio, ix); } -static void scrub_recheck_end_io(struct bio *bio, int err) -{ - struct scrub_fixup *fixup = bio->bi_private; - struct btrfs_fs_info *fs_info = fixup->sdev->dev->dev_root->fs_info; - - fixup->err = err; - btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work); -} - -static int scrub_fixup_check(struct scrub_fixup *fixup) +static int scrub_fixup_check(struct scrub_bio *sbio, int ix) { int ret = 1; struct page *page; void *buffer; - u64 flags = fixup->spag.flags; + u64 flags = sbio->spag[ix].flags; - page = fixup->bio->bi_io_vec[0].bv_page; + page = sbio->bio->bi_io_vec[ix].bv_page; buffer = kmap_atomic(page, KM_USER0); if (flags & BTRFS_EXTENT_FLAG_DATA) { - ret = scrub_checksum_data(fixup->sdev, - &fixup->spag, buffer); + ret = scrub_checksum_data(sbio->sdev, + sbio->spag + ix, buffer); } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { - ret = scrub_checksum_tree_block(fixup->sdev, - &fixup->spag, - fixup->logical, + ret = scrub_checksum_tree_block(sbio->sdev, + sbio->spag + ix, + sbio->logical + ix * PAGE_SIZE, buffer); } else { WARN_ON(1); @@ -350,51 +267,25 @@ static int scrub_fixup_check(struct scrub_fixup *fixup) return ret; } -static void scrub_fixup_worker(struct btrfs_work *work) -{ - struct scrub_fixup *fixup; - struct btrfs_fs_info *fs_info; - u64 flags; - int ret = 1; - - fixup = container_of(work, struct scrub_fixup, work); - fs_info = fixup->sdev->dev->dev_root->fs_info; - flags = fixup->spag.flags; - - if (fixup->recheck && fixup->err == 0) - ret = scrub_fixup_check(fixup); - - if (ret || fixup->err) - scrub_fixup(fixup); - - __free_page(fixup->bio->bi_io_vec[0].bv_page); - bio_put(fixup->bio); - - atomic_dec(&fs_info->scrubs_running); - wake_up(&fs_info->scrub_pause_wait); - - kfree(fixup); -} - static void scrub_fixup_end_io(struct bio *bio, int err) { complete((struct completion *)bio->bi_private); } -static void scrub_fixup(struct scrub_fixup *fixup) +static void scrub_fixup(struct scrub_bio *sbio, int ix) { - struct scrub_dev *sdev = fixup->sdev; + struct scrub_dev *sdev = sbio->sdev; struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct btrfs_multi_bio *multi = NULL; - struct bio *bio = fixup->bio; + u64 logical = sbio->logical + ix * PAGE_SIZE; u64 length; int i; int ret; DECLARE_COMPLETION_ONSTACK(complete); - if ((fixup->spag.flags & BTRFS_EXTENT_FLAG_DATA) && - (fixup->spag.have_csum == 0)) { + if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) && + (sbio->spag[ix].have_csum == 0)) { /* * nodatasum, don't try to fix anything * FIXME: we can do better, open the inode and trigger a @@ -404,71 +295,49 @@ static void scrub_fixup(struct scrub_fixup *fixup) } length = PAGE_SIZE; - ret = btrfs_map_block(map_tree, REQ_WRITE, fixup->logical, &length, + ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &multi, 0); if (ret || !multi || length < PAGE_SIZE) { printk(KERN_ERR "scrub_fixup: btrfs_map_block failed us for %llu\n", - (unsigned long long)fixup->logical); + (unsigned long long)logical); WARN_ON(1); return; } - if (multi->num_stripes == 1) { + if (multi->num_stripes == 1) /* there aren't any replicas */ goto uncorrectable; - } /* * first find a good copy */ for (i = 0; i < multi->num_stripes; ++i) { - if (i == fixup->spag.mirror_num) + if (i == sbio->spag[ix].mirror_num) continue; - bio->bi_sector = multi->stripes[i].physical >> 9; - bio->bi_bdev = multi->stripes[i].dev->bdev; - bio->bi_size = PAGE_SIZE; - bio->bi_next = NULL; - bio->bi_flags |= 1 << BIO_UPTODATE; - bio->bi_comp_cpu = -1; - bio->bi_end_io = scrub_fixup_end_io; - bio->bi_private = &complete; - - submit_bio(0, bio); - - wait_for_completion(&complete); - - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev, + multi->stripes[i].physical >> 9, + sbio->bio->bi_io_vec[ix].bv_page)) { /* I/O-error, this is not a good copy */ continue; + } - ret = scrub_fixup_check(fixup); - if (ret == 0) + if (scrub_fixup_check(sbio, ix) == 0) break; } if (i == multi->num_stripes) goto uncorrectable; /* - * the bio now contains good data, write it back + * bi_io_vec[ix].bv_page now contains good data, write it back */ - bio->bi_sector = fixup->physical >> 9; - bio->bi_bdev = sdev->dev->bdev; - bio->bi_size = PAGE_SIZE; - bio->bi_next = NULL; - bio->bi_flags |= 1 << BIO_UPTODATE; - bio->bi_comp_cpu = -1; - bio->bi_end_io = scrub_fixup_end_io; - bio->bi_private = &complete; - - submit_bio(REQ_WRITE, bio); - - wait_for_completion(&complete); - - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (scrub_fixup_io(WRITE, sdev->dev->bdev, + (sbio->physical + ix * PAGE_SIZE) >> 9, + sbio->bio->bi_io_vec[ix].bv_page)) { /* I/O-error, writeback failed, give up */ goto uncorrectable; + } kfree(multi); spin_lock(&sdev->stat_lock); @@ -477,7 +346,7 @@ static void scrub_fixup(struct scrub_fixup *fixup) if (printk_ratelimit()) printk(KERN_ERR "btrfs: fixed up at %llu\n", - (unsigned long long)fixup->logical); + (unsigned long long)logical); return; uncorrectable: @@ -488,7 +357,32 @@ uncorrectable: if (printk_ratelimit()) printk(KERN_ERR "btrfs: unable to fixup at %llu\n", - (unsigned long long)fixup->logical); + (unsigned long long)logical); +} + +static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, + struct page *page) +{ + struct bio *bio = NULL; + int ret; + DECLARE_COMPLETION_ONSTACK(complete); + + /* we are going to wait on this IO */ + rw |= REQ_SYNC | REQ_UNPLUG; + + bio = bio_alloc(GFP_NOFS, 1); + bio->bi_bdev = bdev; + bio->bi_sector = sector; + bio_add_page(bio, page, PAGE_SIZE, 0); + bio->bi_end_io = scrub_fixup_end_io; + bio->bi_private = &complete; + submit_bio(rw, bio); + + wait_for_completion(&complete); + + ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); + bio_put(bio); + return ret; } static void scrub_bio_end_io(struct bio *bio, int err) @@ -514,44 +408,24 @@ static void scrub_checksum(struct btrfs_work *work) int ret; if (sbio->err) { - struct bio *bio; - struct bio *old_bio; - for (i = 0; i < sbio->count; ++i) scrub_recheck_error(sbio, i); + + sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); + sbio->bio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bio->bi_phys_segments = 0; + sbio->bio->bi_idx = 0; + + for (i = 0; i < sbio->count; i++) { + struct bio_vec *bi; + bi = &sbio->bio->bi_io_vec[i]; + bi->bv_offset = 0; + bi->bv_len = PAGE_SIZE; + } + spin_lock(&sdev->stat_lock); ++sdev->stat.read_errors; spin_unlock(&sdev->stat_lock); - - /* - * FIXME: allocate a new bio after a media error. I haven't - * figured out how to reuse this one - */ - old_bio = sbio->bio; - bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); - if (!bio) { - /* - * alloc failed. cancel the scrub and don't requeue - * this sbio - */ - printk(KERN_ERR "btrfs scrub: allocation failure, " - "cancelling scrub\n"); - atomic_inc(&sdev->dev->dev_root->fs_info-> - scrub_cancel_req); - goto out_no_enqueue; - } - sbio->bio = bio; - bio->bi_private = sbio; - bio->bi_end_io = scrub_bio_end_io; - bio->bi_sector = 0; - bio->bi_bdev = sbio->sdev->dev->bdev; - bio->bi_size = 0; - for (i = 0; i < SCRUB_PAGES_PER_BIO; ++i) { - struct page *page; - page = old_bio->bi_io_vec[i].bv_page; - bio_add_page(bio, page, PAGE_SIZE, 0); - } - bio_put(old_bio); goto out; } for (i = 0; i < sbio->count; ++i) { @@ -581,7 +455,6 @@ out: sbio->next_free = sdev->first_free; sdev->first_free = sbio->index; spin_unlock(&sdev->list_lock); -out_no_enqueue: atomic_dec(&sdev->in_flight); wake_up(&sdev->list_wait); } -- cgit v1.2.2 From 8628764e1a5e1998a42b9713e9edea7753653d01 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Wed, 23 Mar 2011 16:34:19 +0100 Subject: btrfs: add readonly flag setting the readonly flag prevents writes in case an error is detected Signed-off-by: Arne Jansen --- fs/btrfs/ctree.h | 2 +- fs/btrfs/ioctl.c | 2 +- fs/btrfs/ioctl.h | 1 + fs/btrfs/scrub.c | 23 +++++++++++++---------- 4 files changed, 16 insertions(+), 12 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b7373b14e4cd..ee904666b766 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2657,7 +2657,7 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, /* scrub.c */ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, - struct btrfs_scrub_progress *progress); + struct btrfs_scrub_progress *progress, int readonly); int btrfs_scrub_pause(struct btrfs_root *root); int btrfs_scrub_pause_super(struct btrfs_root *root); int btrfs_scrub_continue(struct btrfs_root *root); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 205cd011d2f3..f0a74f014748 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2547,7 +2547,7 @@ static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg) return PTR_ERR(sa); ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end, - &sa->progress); + &sa->progress, sa->flags & BTRFS_SCRUB_READONLY); if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 1a638ceeead8..e5e0ee2cad4e 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -81,6 +81,7 @@ struct btrfs_scrub_progress { * Intermittent error. */ }; +#define BTRFS_SCRUB_READONLY 1 struct btrfs_ioctl_scrub_args { __u64 devid; /* in */ __u64 start; /* in */ diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 6a50801ecfa0..a31f2a9bd2e2 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -42,7 +42,6 @@ * - In case of a read error on files with nodatasum, map the file and read * the extent to trigger a writeback of the good copy * - track and record media errors, throw out bad devices - * - add a readonly mode * - add a mode to also read unallocated space * - make the prefetch cancellable */ @@ -99,6 +98,7 @@ struct scrub_dev { u16 csum_size; struct list_head csum_list; atomic_t cancel_req; + int readonly; /* * statistics */ @@ -329,14 +329,16 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix) if (i == multi->num_stripes) goto uncorrectable; - /* - * bi_io_vec[ix].bv_page now contains good data, write it back - */ - if (scrub_fixup_io(WRITE, sdev->dev->bdev, - (sbio->physical + ix * PAGE_SIZE) >> 9, - sbio->bio->bi_io_vec[ix].bv_page)) { - /* I/O-error, writeback failed, give up */ - goto uncorrectable; + if (!sdev->readonly) { + /* + * bi_io_vec[ix].bv_page now contains good data, write it back + */ + if (scrub_fixup_io(WRITE, sdev->dev->bdev, + (sbio->physical + ix * PAGE_SIZE) >> 9, + sbio->bio->bi_io_vec[ix].bv_page)) { + /* I/O-error, writeback failed, give up */ + goto uncorrectable; + } } kfree(multi); @@ -1156,7 +1158,7 @@ static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, - struct btrfs_scrub_progress *progress) + struct btrfs_scrub_progress *progress, int readonly) { struct scrub_dev *sdev; struct btrfs_fs_info *fs_info = root->fs_info; @@ -1209,6 +1211,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, scrub_workers_put(root); return PTR_ERR(sdev); } + sdev->readonly = readonly; dev->scrub_device = sdev; atomic_inc(&fs_info->scrubs_running); -- cgit v1.2.2 From 7a36ddec1003a4e84e79f28ee714a142ed6bc529 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 6 May 2011 15:33:15 +0200 Subject: btrfs: use printk_ratelimited instead of printk_ratelimit As per printk_ratelimit comment, it should not be used. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 21 ++++++--------------- fs/btrfs/inode.c | 13 ++++--------- 2 files changed, 10 insertions(+), 24 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index de7b4770ab17..cb9d1b8bfe74 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include "compat.h" #include "ctree.h" @@ -254,14 +255,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs: %s checksum verify " + printk_ratelimited(KERN_INFO "btrfs: %s checksum verify " "failed on %llu wanted %X found %X " "level %d\n", root->fs_info->sb->s_id, (unsigned long long)buf->start, val, found, btrfs_header_level(buf)); - } if (result != (char *)&inline_result) kfree(result); return 1; @@ -296,13 +295,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ret = 0; goto out; } - if (printk_ratelimit()) { - printk("parent transid verify failed on %llu wanted %llu " + printk_ratelimited("parent transid verify failed on %llu wanted %llu " "found %llu\n", (unsigned long long)eb->start, (unsigned long long)parent_transid, (unsigned long long)btrfs_header_generation(eb)); - } ret = 1; clear_extent_buffer_uptodate(io_tree, eb, &cached_state); out: @@ -533,12 +530,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, found_start = btrfs_header_bytenr(eb); if (found_start != start) { - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs bad tree block start " + printk_ratelimited(KERN_INFO "btrfs bad tree block start " "%llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); - } ret = -EIO; goto err; } @@ -550,10 +545,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, goto err; } if (check_tree_block_fsid(root, eb)) { - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs bad fsid on block %llu\n", + printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n", (unsigned long long)eb->start); - } ret = -EIO; goto err; } @@ -2108,11 +2101,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) if (uptodate) { set_buffer_uptodate(bh); } else { - if (printk_ratelimit()) { - printk(KERN_WARNING "lost page write due to " + printk_ratelimited(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); - } /* note, we dont' set_buffer_write_io_error because we have * our own ways of dealing with the IO errors */ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5ff52b644a60..1d1017f91558 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -2004,12 +2005,10 @@ good: return 0; zeroit: - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " + printk_ratelimited(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " "private %llu\n", page->mapping->host->i_ino, (unsigned long long)start, csum, (unsigned long long)private); - } memset(kaddr + offset, 1, end - start + 1); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); @@ -4243,22 +4242,18 @@ void btrfs_dirty_inode(struct inode *inode) btrfs_end_transaction(trans, root); trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { - if (printk_ratelimit()) { - printk(KERN_ERR "btrfs: fail to " + printk_ratelimited(KERN_ERR "btrfs: fail to " "dirty inode %lu error %ld\n", inode->i_ino, PTR_ERR(trans)); - } return; } btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret) { - if (printk_ratelimit()) { - printk(KERN_ERR "btrfs: fail to " + printk_ratelimited(KERN_ERR "btrfs: fail to " "dirty inode %lu error %d\n", inode->i_ino, ret); - } } } btrfs_end_transaction(trans, root); -- cgit v1.2.2 From 4ea028859bbdad34b84c9951fbb832ae10c6a96c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 12 May 2011 18:13:12 +0200 Subject: btrfs: use unsigned type for single bit bitfield Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e37d441617d2..343304dec6d1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -740,12 +740,12 @@ struct btrfs_space_info { */ unsigned long reservation_progress; - int full:1; /* indicates that we cannot allocate any more + unsigned int full:1; /* indicates that we cannot allocate any more chunks for this space */ - int chunk_alloc:1; /* set if we are allocating a chunk */ + unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ - int force_alloc; /* set if we need to force a chunk alloc for - this space */ + unsigned int force_alloc; /* set if we need to force a chunk + alloc for this space */ struct list_head list; -- cgit v1.2.2 From bcd53741cc2af4342ac3ff6983bddc4a1b63b9b4 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 12 Apr 2011 10:43:21 +0200 Subject: btrfs: move btrfs_cmp_device_free_bytes to super.c this function won't be used here anymore, so move it super.c where it is used for df-calculation --- fs/btrfs/super.c | 26 ++++++++++++++++++++++++++ fs/btrfs/volumes.c | 13 ------------- fs/btrfs/volumes.h | 15 --------------- 3 files changed, 26 insertions(+), 28 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..32fe8b33cc1c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -913,6 +913,32 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) return 0; } +/* Used to sort the devices by max_avail(descending sort) */ +static int btrfs_cmp_device_free_bytes(const void *dev_info1, + const void *dev_info2) +{ + if (((struct btrfs_device_info *)dev_info1)->max_avail > + ((struct btrfs_device_info *)dev_info2)->max_avail) + return -1; + else if (((struct btrfs_device_info *)dev_info1)->max_avail < + ((struct btrfs_device_info *)dev_info2)->max_avail) + return 1; + else + return 0; +} + +/* + * sort the devices by max_avail, in which max free extent size of each device + * is stored.(Descending Sort) + */ +static inline void btrfs_descending_sort_devices( + struct btrfs_device_info *devices, + size_t nr_devices) +{ + sort(devices, nr_devices, sizeof(struct btrfs_device_info), + btrfs_cmp_device_free_bytes, NULL); +} + /* * The helper to calc the free space on the devices that can be used to store * file data. diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8b9fb8c7683d..a9f1fc23278b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2282,19 +2282,6 @@ static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, return calc_size * num_stripes; } -/* Used to sort the devices by max_avail(descending sort) */ -int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2) -{ - if (((struct btrfs_device_info *)dev_info1)->max_avail > - ((struct btrfs_device_info *)dev_info2)->max_avail) - return -1; - else if (((struct btrfs_device_info *)dev_info1)->max_avail < - ((struct btrfs_device_info *)dev_info2)->max_avail) - return 1; - else - return 0; -} - static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, int *num_stripes, int *min_stripes, int *sub_stripes) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..b502f01f79ed 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -157,21 +157,6 @@ struct map_lookup { struct btrfs_bio_stripe stripes[]; }; -/* Used to sort the devices by max_avail(descending sort) */ -int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); - -/* - * sort the devices by max_avail, in which max free extent size of each device - * is stored.(Descending Sort) - */ -static inline void btrfs_descending_sort_devices( - struct btrfs_device_info *devices, - size_t nr_devices) -{ - sort(devices, nr_devices, sizeof(struct btrfs_device_info), - btrfs_cmp_device_free_bytes, NULL); -} - int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, u64 end, u64 *length); -- cgit v1.2.2 From a9c9bf68276c36898e23db770a65bd9b75bfac58 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 12 Apr 2011 11:01:20 +0200 Subject: btrfs: heed alloc_start currently alloc_start is disregarded if the requested chunk size is bigger than (device size - alloc_start), but smaller than the device size. The only situation where I see this could have made sense was when a chunk equal the size of the device has been requested. This was possible as the allocator failed to take alloc_start into account when calculating the request chunk size. As this gets fixed by this patch, the workaround is not necessary anymore. --- fs/btrfs/volumes.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a9f1fc23278b..45c592a7335e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -849,10 +849,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, /* we don't want to overwrite the superblock on the drive, * so we make sure to start at an offset of at least 1MB */ - search_start = 1024 * 1024; - - if (root->fs_info->alloc_start + num_bytes <= search_end) - search_start = max(root->fs_info->alloc_start, search_start); + search_start = max(root->fs_info->alloc_start, 1024ull * 1024); max_hole_start = search_start; max_hole_size = 0; -- cgit v1.2.2 From 73c5de0051533cbdf2bb656586c3eb21a475aa7d Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 12 Apr 2011 12:07:57 +0200 Subject: btrfs: quasi-round-robin for chunk allocation In a multi device setup, the chunk allocator currently always allocates chunks on the devices in the same order. This leads to a very uneven distribution, especially with RAID1 or RAID10 and an uneven number of devices. This patch always sorts the devices before allocating, and allocates the stripes on the devices with the most available space, as long as there is enough space available. In a low space situation, it first tries to maximize striping. The patch also simplifies the allocator and reduces the checks for corner cases. The simplification is done by several means. First, it defines the properties of each RAID type upfront. These properties are used afterwards instead of differentiating cases in several places. Second, the old allocator defined a minimum stripe size for each block group type, tried to find a large enough chunk, and if this fails just allocates a smaller one. This is now done in one step. The largest possible chunk (up to max_chunk_size) is searched and allocated. Because we now have only one pass, the allocation of the map (struct map_lookup) is moved down to the point where the number of stripes is already known. This way we avoid reallocation of the map. We still avoid allocating stripes that are not a multiple of STRIPE_SIZE. --- fs/btrfs/volumes.c | 481 ++++++++++++++++++++--------------------------------- fs/btrfs/volumes.h | 1 + 2 files changed, 177 insertions(+), 305 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 45c592a7335e..ab55bfc31a06 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2268,262 +2268,204 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, return 0; } -static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, - int num_stripes, int sub_stripes) +/* + * sort the devices in descending order by max_avail, total_avail + */ +static int btrfs_cmp_device_info(const void *a, const void *b) { - if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) - return calc_size; - else if (type & BTRFS_BLOCK_GROUP_RAID10) - return calc_size * (num_stripes / sub_stripes); - else - return calc_size * num_stripes; + const struct btrfs_device_info *di_a = a; + const struct btrfs_device_info *di_b = b; + + if (di_a->max_avail > di_b->max_avail) + return -1; + if (di_a->max_avail < di_b->max_avail) + return 1; + if (di_a->total_avail > di_b->total_avail) + return -1; + if (di_a->total_avail < di_b->total_avail) + return 1; + return 0; } -static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, - int *num_stripes, int *min_stripes, - int *sub_stripes) +static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, + struct map_lookup **map_ret, + u64 *num_bytes_out, u64 *stripe_size_out, + u64 start, u64 type) { - *num_stripes = 1; - *min_stripes = 1; - *sub_stripes = 0; + struct btrfs_fs_info *info = extent_root->fs_info; + struct btrfs_fs_devices *fs_devices = info->fs_devices; + struct list_head *cur; + struct map_lookup *map = NULL; + struct extent_map_tree *em_tree; + struct extent_map *em; + struct btrfs_device_info *devices_info = NULL; + u64 total_avail; + int num_stripes; /* total number of stripes to allocate */ + int sub_stripes; /* sub_stripes info for map */ + int dev_stripes; /* stripes per dev */ + int devs_max; /* max devs to use */ + int devs_min; /* min devs needed */ + int devs_increment; /* ndevs has to be a multiple of this */ + int ncopies; /* how many copies to data has */ + int ret; + u64 max_stripe_size; + u64 max_chunk_size; + u64 stripe_size; + u64 num_bytes; + int ndevs; + int i; + int j; - if (type & (BTRFS_BLOCK_GROUP_RAID0)) { - *num_stripes = fs_devices->rw_devices; - *min_stripes = 2; - } - if (type & (BTRFS_BLOCK_GROUP_DUP)) { - *num_stripes = 2; - *min_stripes = 2; - } - if (type & (BTRFS_BLOCK_GROUP_RAID1)) { - if (fs_devices->rw_devices < 2) - return -ENOSPC; - *num_stripes = 2; - *min_stripes = 2; - } - if (type & (BTRFS_BLOCK_GROUP_RAID10)) { - *num_stripes = fs_devices->rw_devices; - if (*num_stripes < 4) - return -ENOSPC; - *num_stripes &= ~(u32)1; - *sub_stripes = 2; - *min_stripes = 4; + if ((type & BTRFS_BLOCK_GROUP_RAID1) && + (type & BTRFS_BLOCK_GROUP_DUP)) { + WARN_ON(1); + type &= ~BTRFS_BLOCK_GROUP_DUP; } - return 0; -} + if (list_empty(&fs_devices->alloc_list)) + return -ENOSPC; -static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, - u64 proposed_size, u64 type, - int num_stripes, int small_stripe) -{ - int min_stripe_size = 1 * 1024 * 1024; - u64 calc_size = proposed_size; - u64 max_chunk_size = calc_size; - int ncopies = 1; + sub_stripes = 1; + dev_stripes = 1; + devs_increment = 1; + ncopies = 1; + devs_max = 0; /* 0 == as many as possible */ + devs_min = 1; - if (type & (BTRFS_BLOCK_GROUP_RAID1 | - BTRFS_BLOCK_GROUP_DUP | - BTRFS_BLOCK_GROUP_RAID10)) + /* + * define the properties of each RAID type. + * FIXME: move this to a global table and use it in all RAID + * calculation code + */ + if (type & (BTRFS_BLOCK_GROUP_DUP)) { + dev_stripes = 2; ncopies = 2; + devs_max = 1; + } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) { + devs_min = 2; + } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) { + devs_increment = 2; + ncopies = 2; + devs_max = 2; + devs_min = 2; + } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) { + sub_stripes = 2; + devs_increment = 2; + ncopies = 2; + devs_min = 4; + } else { + devs_max = 1; + } if (type & BTRFS_BLOCK_GROUP_DATA) { - max_chunk_size = 10 * calc_size; - min_stripe_size = 64 * 1024 * 1024; + max_stripe_size = 1024 * 1024 * 1024; + max_chunk_size = 10 * max_stripe_size; } else if (type & BTRFS_BLOCK_GROUP_METADATA) { - max_chunk_size = 256 * 1024 * 1024; - min_stripe_size = 32 * 1024 * 1024; + max_stripe_size = 256 * 1024 * 1024; + max_chunk_size = max_stripe_size; } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { - calc_size = 8 * 1024 * 1024; - max_chunk_size = calc_size * 2; - min_stripe_size = 1 * 1024 * 1024; + max_stripe_size = 8 * 1024 * 1024; + max_chunk_size = 2 * max_stripe_size; + } else { + printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n", + type); + BUG_ON(1); } /* we don't want a chunk larger than 10% of writeable space */ max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); - if (calc_size * num_stripes > max_chunk_size * ncopies) { - calc_size = max_chunk_size * ncopies; - do_div(calc_size, num_stripes); - do_div(calc_size, BTRFS_STRIPE_LEN); - calc_size *= BTRFS_STRIPE_LEN; - } + devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, + GFP_NOFS); + if (!devices_info) + return -ENOMEM; - /* we don't want tiny stripes */ - if (!small_stripe) - calc_size = max_t(u64, min_stripe_size, calc_size); + cur = fs_devices->alloc_list.next; /* - * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure - * we end up with something bigger than a stripe + * in the first pass through the devices list, we gather information + * about the available holes on each device. */ - calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); - - do_div(calc_size, BTRFS_STRIPE_LEN); - calc_size *= BTRFS_STRIPE_LEN; - - return calc_size; -} + ndevs = 0; + while (cur != &fs_devices->alloc_list) { + struct btrfs_device *device; + u64 max_avail; + u64 dev_offset; -static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map, - int num_stripes) -{ - struct map_lookup *new; - size_t len = map_lookup_size(num_stripes); - - BUG_ON(map->num_stripes < num_stripes); - - if (map->num_stripes == num_stripes) - return map; - - new = kmalloc(len, GFP_NOFS); - if (!new) { - /* just change map->num_stripes */ - map->num_stripes = num_stripes; - return map; - } - - memcpy(new, map, len); - new->num_stripes = num_stripes; - kfree(map); - return new; -} - -/* - * helper to allocate device space from btrfs_device_info, in which we stored - * max free space information of every device. It is used when we can not - * allocate chunks by default size. - * - * By this helper, we can allocate a new chunk as larger as possible. - */ -static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans, - struct btrfs_fs_devices *fs_devices, - struct btrfs_device_info *devices, - int nr_device, u64 type, - struct map_lookup **map_lookup, - int min_stripes, u64 *stripe_size) -{ - int i, index, sort_again = 0; - int min_devices = min_stripes; - u64 max_avail, min_free; - struct map_lookup *map = *map_lookup; - int ret; + device = list_entry(cur, struct btrfs_device, dev_alloc_list); - if (nr_device < min_stripes) - return -ENOSPC; + cur = cur->next; - btrfs_descending_sort_devices(devices, nr_device); + if (!device->writeable) { + printk(KERN_ERR + "btrfs: read-only device in alloc_list\n"); + WARN_ON(1); + continue; + } - max_avail = devices[0].max_avail; - if (!max_avail) - return -ENOSPC; + if (!device->in_fs_metadata) + continue; - for (i = 0; i < nr_device; i++) { - /* - * if dev_offset = 0, it means the free space of this device - * is less than what we need, and we didn't search max avail - * extent on this device, so do it now. + if (device->total_bytes > device->bytes_used) + total_avail = device->total_bytes - device->bytes_used; + else + total_avail = 0; + /* avail is off by max(alloc_start, 1MB), but that is the same + * for all devices, so it doesn't hurt the sorting later on */ - if (!devices[i].dev_offset) { - ret = find_free_dev_extent(trans, devices[i].dev, - max_avail, - &devices[i].dev_offset, - &devices[i].max_avail); - if (ret != 0 && ret != -ENOSPC) - return ret; - sort_again = 1; - } - } - /* we update the max avail free extent of each devices, sort again */ - if (sort_again) - btrfs_descending_sort_devices(devices, nr_device); - - if (type & BTRFS_BLOCK_GROUP_DUP) - min_devices = 1; + ret = find_free_dev_extent(trans, device, + max_stripe_size * dev_stripes, + &dev_offset, &max_avail); + if (ret && ret != -ENOSPC) + goto error; - if (!devices[min_devices - 1].max_avail) - return -ENOSPC; + if (ret == 0) + max_avail = max_stripe_size * dev_stripes; - max_avail = devices[min_devices - 1].max_avail; - if (type & BTRFS_BLOCK_GROUP_DUP) - do_div(max_avail, 2); + if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) + continue; - max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, - min_stripes, 1); - if (type & BTRFS_BLOCK_GROUP_DUP) - min_free = max_avail * 2; - else - min_free = max_avail; + devices_info[ndevs].dev_offset = dev_offset; + devices_info[ndevs].max_avail = max_avail; + devices_info[ndevs].total_avail = total_avail; + devices_info[ndevs].dev = device; + ++ndevs; + } - if (min_free > devices[min_devices - 1].max_avail) - return -ENOSPC; + /* + * now sort the devices by hole size / available space + */ + sort(devices_info, ndevs, sizeof(struct btrfs_device_info), + btrfs_cmp_device_info, NULL); - map = __shrink_map_lookup_stripes(map, min_stripes); - *stripe_size = max_avail; + /* round down to number of usable stripes */ + ndevs -= ndevs % devs_increment; - index = 0; - for (i = 0; i < min_stripes; i++) { - map->stripes[i].dev = devices[index].dev; - map->stripes[i].physical = devices[index].dev_offset; - if (type & BTRFS_BLOCK_GROUP_DUP) { - i++; - map->stripes[i].dev = devices[index].dev; - map->stripes[i].physical = devices[index].dev_offset + - max_avail; - } - index++; + if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) { + ret = -ENOSPC; + goto error; } - *map_lookup = map; - - return 0; -} -static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct map_lookup **map_ret, - u64 *num_bytes, u64 *stripe_size, - u64 start, u64 type) -{ - struct btrfs_fs_info *info = extent_root->fs_info; - struct btrfs_device *device = NULL; - struct btrfs_fs_devices *fs_devices = info->fs_devices; - struct list_head *cur; - struct map_lookup *map; - struct extent_map_tree *em_tree; - struct extent_map *em; - struct btrfs_device_info *devices_info; - struct list_head private_devs; - u64 calc_size = 1024 * 1024 * 1024; - u64 min_free; - u64 avail; - u64 dev_offset; - int num_stripes; - int min_stripes; - int sub_stripes; - int min_devices; /* the min number of devices we need */ - int i; - int ret; - int index; + if (devs_max && ndevs > devs_max) + ndevs = devs_max; + /* + * the primary goal is to maximize the number of stripes, so use as many + * devices as possible, even if the stripes are not maximum sized. + */ + stripe_size = devices_info[ndevs-1].max_avail; + num_stripes = ndevs * dev_stripes; - if ((type & BTRFS_BLOCK_GROUP_RAID1) && - (type & BTRFS_BLOCK_GROUP_DUP)) { - WARN_ON(1); - type &= ~BTRFS_BLOCK_GROUP_DUP; + if (stripe_size * num_stripes > max_chunk_size * ncopies) { + stripe_size = max_chunk_size * ncopies; + do_div(stripe_size, num_stripes); } - if (list_empty(&fs_devices->alloc_list)) - return -ENOSPC; - - ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes, - &min_stripes, &sub_stripes); - if (ret) - return ret; - devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, - GFP_NOFS); - if (!devices_info) - return -ENOMEM; + do_div(stripe_size, dev_stripes); + do_div(stripe_size, BTRFS_STRIPE_LEN); + stripe_size *= BTRFS_STRIPE_LEN; map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { @@ -2532,85 +2474,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, } map->num_stripes = num_stripes; - cur = fs_devices->alloc_list.next; - index = 0; - i = 0; - - calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, - num_stripes, 0); - - if (type & BTRFS_BLOCK_GROUP_DUP) { - min_free = calc_size * 2; - min_devices = 1; - } else { - min_free = calc_size; - min_devices = min_stripes; - } - - INIT_LIST_HEAD(&private_devs); - while (index < num_stripes) { - device = list_entry(cur, struct btrfs_device, dev_alloc_list); - BUG_ON(!device->writeable); - if (device->total_bytes > device->bytes_used) - avail = device->total_bytes - device->bytes_used; - else - avail = 0; - cur = cur->next; - - if (device->in_fs_metadata && avail >= min_free) { - ret = find_free_dev_extent(trans, device, min_free, - &devices_info[i].dev_offset, - &devices_info[i].max_avail); - if (ret == 0) { - list_move_tail(&device->dev_alloc_list, - &private_devs); - map->stripes[index].dev = device; - map->stripes[index].physical = - devices_info[i].dev_offset; - index++; - if (type & BTRFS_BLOCK_GROUP_DUP) { - map->stripes[index].dev = device; - map->stripes[index].physical = - devices_info[i].dev_offset + - calc_size; - index++; - } - } else if (ret != -ENOSPC) - goto error; - - devices_info[i].dev = device; - i++; - } else if (device->in_fs_metadata && - avail >= BTRFS_STRIPE_LEN) { - devices_info[i].dev = device; - devices_info[i].max_avail = avail; - i++; - } - - if (cur == &fs_devices->alloc_list) - break; - } - - list_splice(&private_devs, &fs_devices->alloc_list); - if (index < num_stripes) { - if (index >= min_stripes) { - num_stripes = index; - if (type & (BTRFS_BLOCK_GROUP_RAID10)) { - num_stripes /= sub_stripes; - num_stripes *= sub_stripes; - } - - map = __shrink_map_lookup_stripes(map, num_stripes); - } else if (i >= min_devices) { - ret = __btrfs_alloc_tiny_space(trans, fs_devices, - devices_info, i, type, - &map, min_stripes, - &calc_size); - if (ret) - goto error; - } else { - ret = -ENOSPC; - goto error; + for (i = 0; i < ndevs; ++i) { + for (j = 0; j < dev_stripes; ++j) { + int s = i * dev_stripes + j; + map->stripes[s].dev = devices_info[i].dev; + map->stripes[s].physical = devices_info[i].dev_offset + + j * stripe_size; } } map->sector_size = extent_root->sectorsize; @@ -2621,11 +2490,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, map->sub_stripes = sub_stripes; *map_ret = map; - *stripe_size = calc_size; - *num_bytes = chunk_bytes_by_type(type, calc_size, - map->num_stripes, sub_stripes); + num_bytes = stripe_size * (num_stripes / ncopies); + + *stripe_size_out = stripe_size; + *num_bytes_out = num_bytes; - trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); + trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes); em = alloc_extent_map(GFP_NOFS); if (!em) { @@ -2634,7 +2504,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, } em->bdev = (struct block_device *)map; em->start = start; - em->len = *num_bytes; + em->len = num_bytes; em->block_start = 0; em->block_len = em->len; @@ -2647,20 +2517,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ret = btrfs_make_block_group(trans, extent_root, 0, type, BTRFS_FIRST_CHUNK_TREE_OBJECTID, - start, *num_bytes); + start, num_bytes); BUG_ON(ret); - index = 0; - while (index < map->num_stripes) { - device = map->stripes[index].dev; - dev_offset = map->stripes[index].physical; + for (i = 0; i < map->num_stripes; ++i) { + struct btrfs_device *device; + u64 dev_offset; + + device = map->stripes[i].dev; + dev_offset = map->stripes[i].physical; ret = btrfs_alloc_dev_extent(trans, device, info->chunk_root->root_key.objectid, BTRFS_FIRST_CHUNK_TREE_OBJECTID, - start, dev_offset, calc_size); + start, dev_offset, stripe_size); BUG_ON(ret); - index++; } kfree(devices_info); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index b502f01f79ed..37ae6e2126a1 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -144,6 +144,7 @@ struct btrfs_device_info { struct btrfs_device *dev; u64 dev_offset; u64 max_avail; + u64 total_avail; }; struct map_lookup { -- cgit v1.2.2 From f5de93914983bf04b92a786d1d205286fc53b49b Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Tue, 3 May 2011 16:44:13 +0000 Subject: Prevent oopsing in posix_acl_valid() If posix_acl_from_xattr() returns an error code, a negative address is dereferenced causing an oops; fix by checking for error code first. Signed-off-by: Daniel J Blueman Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/acl.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index a892bc27f13a..827be9a6ca33 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -178,12 +178,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, if (value) { acl = posix_acl_from_xattr(value, size); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl) { ret = posix_acl_valid(acl); if (ret) goto out; - } else if (IS_ERR(acl)) { - return PTR_ERR(acl); } } -- cgit v1.2.2 From 1aba86d67f340a8001d67183ec32e8a62e3ec658 Mon Sep 17 00:00:00 2001 From: liubo Date: Fri, 8 Apr 2011 08:44:37 +0000 Subject: Btrfs: fix easily get into ENOSPC in mixed case When a btrfs disk is created by mixed data & metadata option, it will have no pure data or pure metadata space info. In btrfs's for-linus branch, commit 78b1ea13838039cd88afdd62519b40b344d6c920 (Btrfs: fix OOPS of empty filesystem after balance) initializes space infos at the very beginning. The problem is this initialization does not take the mixed case into account, which will cause btrfs will easily get into ENOSPC in mixed case. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd52f7f556ef..9ee6bd55e16c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8856,23 +8856,38 @@ out: int btrfs_init_space_info(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *space_info; + struct btrfs_super_block *disk_super; + u64 features; + u64 flags; + int mixed = 0; int ret; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, - &space_info); - if (ret) - return ret; + disk_super = &fs_info->super_copy; + if (!btrfs_super_root(disk_super)) + return 1; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, - &space_info); - if (ret) - return ret; + features = btrfs_super_incompat_flags(disk_super); + if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) + mixed = 1; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, - &space_info); + flags = BTRFS_BLOCK_GROUP_SYSTEM; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); if (ret) - return ret; + goto out; + if (mixed) { + flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + } else { + flags = BTRFS_BLOCK_GROUP_METADATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + if (ret) + goto out; + + flags = BTRFS_BLOCK_GROUP_DATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + } +out: return ret; } -- cgit v1.2.2 From e1e8fb6a1ff3f9487e03a4cbf85b81d1316068ce Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 15 Apr 2011 03:02:49 +0000 Subject: fs: remove FS_COW_FL FS_COW_FL and FS_NOCOW_FL were newly introduced to control per file COW in btrfs, but FS_NOCOW_FL is sufficient. The fact is we don't have corresponding BTRFS_INODE_COW flag. COW is default, and FS_NOCOW_FL can be used to switch off COW for a single file. If we mount btrfs with nodatacow, a newly created file will be set with the FS_NOCOW_FL flag. So to turn on COW for it, we can just clear the FS_NOCOW_FL flag. Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f580a3a5d2fc..3240dd90da42 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -144,16 +144,13 @@ static int check_flags(unsigned int flags) if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL | \ - FS_NOCOMP_FL | FS_COMPR_FL | \ - FS_NOCOW_FL | FS_COW_FL)) + FS_NOCOMP_FL | FS_COMPR_FL | + FS_NOCOW_FL)) return -EOPNOTSUPP; if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; - if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) - return -EINVAL; - return 0; } @@ -218,6 +215,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ip->flags |= BTRFS_INODE_DIRSYNC; else ip->flags &= ~BTRFS_INODE_DIRSYNC; + if (flags & FS_NOCOW_FL) + ip->flags |= BTRFS_INODE_NODATACOW; + else + ip->flags &= ~BTRFS_INODE_NODATACOW; /* * The COMPRESS flag can only be changed by users, while the NOCOMPRESS @@ -231,10 +232,6 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ip->flags |= BTRFS_INODE_COMPRESS; ip->flags &= ~BTRFS_INODE_NOCOMPRESS; } - if (flags & FS_NOCOW_FL) - ip->flags |= BTRFS_INODE_NODATACOW; - else if (flags & FS_COW_FL) - ip->flags &= ~BTRFS_INODE_NODATACOW; trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); -- cgit v1.2.2 From d0092bdda819914b8725da76a8c33eb06eb0bd21 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 15 Apr 2011 03:03:06 +0000 Subject: Btrfs: fix FS_IOC_GETFLAGS ioctl As we've added per file compression/cow support. Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 3240dd90da42..aeabf6b6ccc8 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -81,6 +81,13 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) iflags |= FS_NOATIME_FL; if (flags & BTRFS_INODE_DIRSYNC) iflags |= FS_DIRSYNC_FL; + if (flags & BTRFS_INODE_NODATACOW) + iflags |= FS_NOCOW_FL; + + if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) + iflags |= FS_COMPR_FL; + else if (flags & BTRFS_INODE_NOCOMPRESS) + iflags |= FS_NOCOMP_FL; return iflags; } -- cgit v1.2.2 From ebcb904dfe31644857422e3bb62e50f76fe86255 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 15 Apr 2011 03:03:17 +0000 Subject: Btrfs: fix FS_IOC_SETFLAGS ioctl Steps to reproduce the bug: - Call FS_IOC_SETLFAGS ioctl with flags=FS_COMPR_FL - Call FS_IOC_SETFLAGS ioctl with flags=0 - Call FS_IOC_GETFLAGS ioctl, and you'll see FS_COMPR_FL is still set! Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index aeabf6b6ccc8..3e7031d32eef 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -238,6 +238,8 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) } else if (flags & FS_COMPR_FL) { ip->flags |= BTRFS_INODE_COMPRESS; ip->flags &= ~BTRFS_INODE_NOCOMPRESS; + } else { + ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } trans = btrfs_join_transaction(root, 1); -- cgit v1.2.2 From 268bb0ce3e87872cb9290c322b0d35bce230d88f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 20 May 2011 12:50:29 -0700 Subject: sanitize usage Commit e66eed651fd1 ("list: remove prefetching from regular list iterators") removed the include of prefetch.h from list.h, which uncovered several cases that had apparently relied on that rather obscure header file dependency. So this fixes things up a bit, using grep -L linux/prefetch.h $(git grep -l '[^a-z_]prefetchw*(' -- '*.[ch]') grep -L 'prefetchw*(' $(git grep -l 'linux/prefetch.h' -- '*.[ch]') to guide us in finding files that either need inclusion, or have it despite not needing it. There are more of them around (mostly network drivers), but this gets many core ones. Reported-by: Stephen Rothwell Signed-off-by: Linus Torvalds --- fs/btrfs/extent_io.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31b..96fcfa522dab 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "extent_io.h" #include "extent_map.h" #include "compat.h" -- cgit v1.2.2 From 16cdcec736cd214350cdb591bf1091f8beedefa0 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Fri, 22 Apr 2011 18:12:22 +0800 Subject: btrfs: implement delayed inode items operation Changelog V5 -> V6: - Fix oom when the memory load is high, by storing the delayed nodes into the root's radix tree, and letting btrfs inodes go. Changelog V4 -> V5: - Fix the race on adding the delayed node to the inode, which is spotted by Chris Mason. - Merge Chris Mason's incremental patch into this patch. - Fix deadlock between readdir() and memory fault, which is reported by Itaru Kitayama. Changelog V3 -> V4: - Fix nested lock, which is reported by Itaru Kitayama, by updating space cache inode in time. Changelog V2 -> V3: - Fix the race between the delayed worker and the task which does delayed items balance, which is reported by Tsutomu Itoh. - Modify the patch address David Sterba's comment. - Fix the bug of the cpu recursion spinlock, reported by Chris Mason Changelog V1 -> V2: - break up the global rb-tree, use a list to manage the delayed nodes, which is created for every directory and file, and used to manage the delayed directory name index items and the delayed inode item. - introduce a worker to deal with the delayed nodes. Compare with Ext3/4, the performance of file creation and deletion on btrfs is very poor. the reason is that btrfs must do a lot of b+ tree insertions, such as inode item, directory name item, directory name index and so on. If we can do some delayed b+ tree insertion or deletion, we can improve the performance, so we made this patch which implemented delayed directory name index insertion/deletion and delayed inode update. Implementation: - introduce a delayed root object into the filesystem, that use two lists to manage the delayed nodes which are created for every file/directory. One is used to manage all the delayed nodes that have delayed items. And the other is used to manage the delayed nodes which is waiting to be dealt with by the work thread. - Every delayed node has two rb-tree, one is used to manage the directory name index which is going to be inserted into b+ tree, and the other is used to manage the directory name index which is going to be deleted from b+ tree. - introduce a worker to deal with the delayed operation. This worker is used to deal with the works of the delayed directory name index items insertion and deletion and the delayed inode update. When the delayed items is beyond the lower limit, we create works for some delayed nodes and insert them into the work queue of the worker, and then go back. When the delayed items is beyond the upper bound, we create works for all the delayed nodes that haven't been dealt with, and insert them into the work queue of the worker, and then wait for that the untreated items is below some threshold value. - When we want to insert a directory name index into b+ tree, we just add the information into the delayed inserting rb-tree. And then we check the number of the delayed items and do delayed items balance. (The balance policy is above.) - When we want to delete a directory name index from the b+ tree, we search it in the inserting rb-tree at first. If we look it up, just drop it. If not, add the key of it into the delayed deleting rb-tree. Similar to the delayed inserting rb-tree, we also check the number of the delayed items and do delayed items balance. (The same to inserting manipulation) - When we want to update the metadata of some inode, we cached the data of the inode into the delayed node. the worker will flush it into the b+ tree after dealing with the delayed insertion and deletion. - We will move the delayed node to the tail of the list after we access the delayed node, By this way, we can cache more delayed items and merge more inode updates. - If we want to commit transaction, we will deal with all the delayed node. - the delayed node will be freed when we free the btrfs inode. - Before we log the inode items, we commit all the directory name index items and the delayed inode update. I did a quick test by the benchmark tool[1] and found we can improve the performance of file creation by ~15%, and file deletion by ~20%. Before applying this patch: Create files: Total files: 50000 Total time: 1.096108 Average time: 0.000022 Delete files: Total files: 50000 Total time: 1.510403 Average time: 0.000030 After applying this patch: Create files: Total files: 50000 Total time: 0.932899 Average time: 0.000019 Delete files: Total files: 50000 Total time: 1.215732 Average time: 0.000024 [1] http://marc.info/?l=linux-btrfs&m=128212635122920&q=p3 Many thanks for Kitayama-san's help! Signed-off-by: Miao Xie Reviewed-by: David Sterba Tested-by: Tsutomu Itoh Tested-by: Itaru Kitayama Signed-off-by: Chris Mason --- fs/btrfs/Makefile | 2 +- fs/btrfs/btrfs_inode.h | 5 + fs/btrfs/ctree.c | 14 +- fs/btrfs/ctree.h | 29 +- fs/btrfs/delayed-inode.c | 1694 ++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/delayed-inode.h | 141 ++++ fs/btrfs/dir-item.c | 34 +- fs/btrfs/disk-io.c | 50 +- fs/btrfs/disk-io.h | 1 + fs/btrfs/extent-tree.c | 18 +- fs/btrfs/inode.c | 111 ++- fs/btrfs/ioctl.c | 2 +- fs/btrfs/super.c | 10 +- fs/btrfs/transaction.c | 45 +- fs/btrfs/transaction.h | 2 + fs/btrfs/tree-log.c | 7 + 16 files changed, 2074 insertions(+), 91 deletions(-) create mode 100644 fs/btrfs/delayed-inode.c create mode 100644 fs/btrfs/delayed-inode.h (limited to 'fs/btrfs') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 31610ea73aec..a8411c22313d 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ - compression.o delayed-ref.o relocation.o + compression.o delayed-ref.o relocation.o delayed-inode.o diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884ce..beefafd91f22 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -22,6 +22,7 @@ #include "extent_map.h" #include "extent_io.h" #include "ordered-data.h" +#include "delayed-inode.h" /* in memory btrfs inode */ struct btrfs_inode { @@ -158,9 +159,13 @@ struct btrfs_inode { */ unsigned force_compress:4; + struct btrfs_delayed_node *delayed_node; + struct inode vfs_inode; }; +extern unsigned char btrfs_filetype_table[]; + static inline struct btrfs_inode *BTRFS_I(struct inode *inode) { return container_of(inode, struct btrfs_inode, vfs_inode); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..2736b6b2ff5f 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans, struct extent_buffer *src_buf); static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot); -static int setup_items_for_insert(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - u32 total_data, u32 total_size, int nr); - struct btrfs_path *btrfs_alloc_path(void) { @@ -3559,11 +3554,10 @@ out: * to save stack depth by doing the bulk of the work in a function * that doesn't call btrfs_search_slot */ -static noinline_for_stack int -setup_items_for_insert(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - u32 total_data, u32 total_size, int nr) +int setup_items_for_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *cpu_key, u32 *data_size, + u32 total_data, u32 total_size, int nr) { struct btrfs_item *item; int i; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..5d25129d0116 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -869,6 +869,7 @@ struct btrfs_block_group_cache { struct reloc_control; struct btrfs_device; struct btrfs_fs_devices; +struct btrfs_delayed_root; struct btrfs_fs_info { u8 fsid[BTRFS_FSID_SIZE]; u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; @@ -895,7 +896,10 @@ struct btrfs_fs_info { /* logical->physical extent mapping */ struct btrfs_mapping_tree mapping_tree; - /* block reservation for extent, checksum and root tree */ + /* + * block reservation for extent, checksum, root tree and + * delayed dir index item + */ struct btrfs_block_rsv global_block_rsv; /* block reservation for delay allocation */ struct btrfs_block_rsv delalloc_block_rsv; @@ -1022,6 +1026,7 @@ struct btrfs_fs_info { * for the sys_munmap function call path */ struct btrfs_workers fixup_workers; + struct btrfs_workers delayed_workers; struct task_struct *transaction_kthread; struct task_struct *cleaner_kthread; int thread_pool_size; @@ -1079,6 +1084,8 @@ struct btrfs_fs_info { /* filesystem state */ u64 fs_state; + + struct btrfs_delayed_root *delayed_root; }; /* @@ -1161,6 +1168,11 @@ struct btrfs_root { /* red-black tree that keeps track of in-memory inodes */ struct rb_root inode_tree; + /* + * radix tree that keeps track of delayed nodes of every inode, + * protected by inode_lock + */ + struct radix_tree_root delayed_nodes_tree; /* * right now this just gets used so that a root has its own devid * for stat. It may be used for more later @@ -2099,6 +2111,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) } /* extent-tree.c */ +static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, + int num_items) +{ + return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * + 3 * num_items; +} + void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); @@ -2294,6 +2313,8 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); void btrfs_set_path_blocking(struct btrfs_path *p); +void btrfs_clear_path_blocking(struct btrfs_path *p, + struct extent_buffer *held); void btrfs_unlock_up_safe(struct btrfs_path *p, int level); int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -2305,6 +2326,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, return btrfs_del_items(trans, root, path, path->slots[0], 1); } +int setup_items_for_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *cpu_key, u32 *data_size, + u32 total_data, u32 total_size, int nr); int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, void *data, u32 data_size); int btrfs_insert_some_items(struct btrfs_trans_handle *trans, @@ -2368,7 +2393,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item); /* dir-item.c */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, - int name_len, u64 dir, + int name_len, struct inode *dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c new file mode 100644 index 000000000000..95485318f001 --- /dev/null +++ b/fs/btrfs/delayed-inode.c @@ -0,0 +1,1694 @@ +/* + * Copyright (C) 2011 Fujitsu. All rights reserved. + * Written by Miao Xie + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include "delayed-inode.h" +#include "disk-io.h" +#include "transaction.h" + +#define BTRFS_DELAYED_WRITEBACK 400 +#define BTRFS_DELAYED_BACKGROUND 100 + +static struct kmem_cache *delayed_node_cache; + +int __init btrfs_delayed_inode_init(void) +{ + delayed_node_cache = kmem_cache_create("delayed_node", + sizeof(struct btrfs_delayed_node), + 0, + SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, + NULL); + if (!delayed_node_cache) + return -ENOMEM; + return 0; +} + +void btrfs_delayed_inode_exit(void) +{ + if (delayed_node_cache) + kmem_cache_destroy(delayed_node_cache); +} + +static inline void btrfs_init_delayed_node( + struct btrfs_delayed_node *delayed_node, + struct btrfs_root *root, u64 inode_id) +{ + delayed_node->root = root; + delayed_node->inode_id = inode_id; + atomic_set(&delayed_node->refs, 0); + delayed_node->count = 0; + delayed_node->in_list = 0; + delayed_node->inode_dirty = 0; + delayed_node->ins_root = RB_ROOT; + delayed_node->del_root = RB_ROOT; + mutex_init(&delayed_node->mutex); + delayed_node->index_cnt = 0; + INIT_LIST_HEAD(&delayed_node->n_list); + INIT_LIST_HEAD(&delayed_node->p_list); + delayed_node->bytes_reserved = 0; +} + +static inline int btrfs_is_continuous_delayed_item( + struct btrfs_delayed_item *item1, + struct btrfs_delayed_item *item2) +{ + if (item1->key.type == BTRFS_DIR_INDEX_KEY && + item1->key.objectid == item2->key.objectid && + item1->key.type == item2->key.type && + item1->key.offset + 1 == item2->key.offset) + return 1; + return 0; +} + +static inline struct btrfs_delayed_root *btrfs_get_delayed_root( + struct btrfs_root *root) +{ + return root->fs_info->delayed_root; +} + +static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( + struct inode *inode) +{ + struct btrfs_delayed_node *node; + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_root *root = btrfs_inode->root; + int ret; + +again: + node = ACCESS_ONCE(btrfs_inode->delayed_node); + if (node) { + atomic_inc(&node->refs); /* can be accessed */ + return node; + } + + spin_lock(&root->inode_lock); + node = radix_tree_lookup(&root->delayed_nodes_tree, inode->i_ino); + if (node) { + if (btrfs_inode->delayed_node) { + spin_unlock(&root->inode_lock); + goto again; + } + btrfs_inode->delayed_node = node; + atomic_inc(&node->refs); /* can be accessed */ + atomic_inc(&node->refs); /* cached in the inode */ + spin_unlock(&root->inode_lock); + return node; + } + spin_unlock(&root->inode_lock); + + node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); + if (!node) + return ERR_PTR(-ENOMEM); + btrfs_init_delayed_node(node, root, inode->i_ino); + + atomic_inc(&node->refs); /* cached in the btrfs inode */ + atomic_inc(&node->refs); /* can be accessed */ + + ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + if (ret) { + kmem_cache_free(delayed_node_cache, node); + return ERR_PTR(ret); + } + + spin_lock(&root->inode_lock); + ret = radix_tree_insert(&root->delayed_nodes_tree, inode->i_ino, node); + if (ret == -EEXIST) { + kmem_cache_free(delayed_node_cache, node); + spin_unlock(&root->inode_lock); + radix_tree_preload_end(); + goto again; + } + btrfs_inode->delayed_node = node; + spin_unlock(&root->inode_lock); + radix_tree_preload_end(); + + return node; +} + +/* + * Call it when holding delayed_node->mutex + * + * If mod = 1, add this node into the prepared list. + */ +static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, + struct btrfs_delayed_node *node, + int mod) +{ + spin_lock(&root->lock); + if (node->in_list) { + if (!list_empty(&node->p_list)) + list_move_tail(&node->p_list, &root->prepare_list); + else if (mod) + list_add_tail(&node->p_list, &root->prepare_list); + } else { + list_add_tail(&node->n_list, &root->node_list); + list_add_tail(&node->p_list, &root->prepare_list); + atomic_inc(&node->refs); /* inserted into list */ + root->nodes++; + node->in_list = 1; + } + spin_unlock(&root->lock); +} + +/* Call it when holding delayed_node->mutex */ +static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, + struct btrfs_delayed_node *node) +{ + spin_lock(&root->lock); + if (node->in_list) { + root->nodes--; + atomic_dec(&node->refs); /* not in the list */ + list_del_init(&node->n_list); + if (!list_empty(&node->p_list)) + list_del_init(&node->p_list); + node->in_list = 0; + } + spin_unlock(&root->lock); +} + +struct btrfs_delayed_node *btrfs_first_delayed_node( + struct btrfs_delayed_root *delayed_root) +{ + struct list_head *p; + struct btrfs_delayed_node *node = NULL; + + spin_lock(&delayed_root->lock); + if (list_empty(&delayed_root->node_list)) + goto out; + + p = delayed_root->node_list.next; + node = list_entry(p, struct btrfs_delayed_node, n_list); + atomic_inc(&node->refs); +out: + spin_unlock(&delayed_root->lock); + + return node; +} + +struct btrfs_delayed_node *btrfs_next_delayed_node( + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_root *delayed_root; + struct list_head *p; + struct btrfs_delayed_node *next = NULL; + + delayed_root = node->root->fs_info->delayed_root; + spin_lock(&delayed_root->lock); + if (!node->in_list) { /* not in the list */ + if (list_empty(&delayed_root->node_list)) + goto out; + p = delayed_root->node_list.next; + } else if (list_is_last(&node->n_list, &delayed_root->node_list)) + goto out; + else + p = node->n_list.next; + + next = list_entry(p, struct btrfs_delayed_node, n_list); + atomic_inc(&next->refs); +out: + spin_unlock(&delayed_root->lock); + + return next; +} + +static void __btrfs_release_delayed_node( + struct btrfs_delayed_node *delayed_node, + int mod) +{ + struct btrfs_delayed_root *delayed_root; + + if (!delayed_node) + return; + + delayed_root = delayed_node->root->fs_info->delayed_root; + + mutex_lock(&delayed_node->mutex); + if (delayed_node->count) + btrfs_queue_delayed_node(delayed_root, delayed_node, mod); + else + btrfs_dequeue_delayed_node(delayed_root, delayed_node); + mutex_unlock(&delayed_node->mutex); + + if (atomic_dec_and_test(&delayed_node->refs)) { + struct btrfs_root *root = delayed_node->root; + spin_lock(&root->inode_lock); + if (atomic_read(&delayed_node->refs) == 0) { + radix_tree_delete(&root->delayed_nodes_tree, + delayed_node->inode_id); + kmem_cache_free(delayed_node_cache, delayed_node); + } + spin_unlock(&root->inode_lock); + } +} + +static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) +{ + __btrfs_release_delayed_node(node, 0); +} + +struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( + struct btrfs_delayed_root *delayed_root) +{ + struct list_head *p; + struct btrfs_delayed_node *node = NULL; + + spin_lock(&delayed_root->lock); + if (list_empty(&delayed_root->prepare_list)) + goto out; + + p = delayed_root->prepare_list.next; + list_del_init(p); + node = list_entry(p, struct btrfs_delayed_node, p_list); + atomic_inc(&node->refs); +out: + spin_unlock(&delayed_root->lock); + + return node; +} + +static inline void btrfs_release_prepared_delayed_node( + struct btrfs_delayed_node *node) +{ + __btrfs_release_delayed_node(node, 1); +} + +struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) +{ + struct btrfs_delayed_item *item; + item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); + if (item) { + item->data_len = data_len; + item->ins_or_del = 0; + item->bytes_reserved = 0; + item->block_rsv = NULL; + item->delayed_node = NULL; + atomic_set(&item->refs, 1); + } + return item; +} + +/* + * __btrfs_lookup_delayed_item - look up the delayed item by key + * @delayed_node: pointer to the delayed node + * @key: the key to look up + * @prev: used to store the prev item if the right item isn't found + * @next: used to store the next item if the right item isn't found + * + * Note: if we don't find the right item, we will return the prev item and + * the next item. + */ +static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( + struct rb_root *root, + struct btrfs_key *key, + struct btrfs_delayed_item **prev, + struct btrfs_delayed_item **next) +{ + struct rb_node *node, *prev_node = NULL; + struct btrfs_delayed_item *delayed_item = NULL; + int ret = 0; + + node = root->rb_node; + + while (node) { + delayed_item = rb_entry(node, struct btrfs_delayed_item, + rb_node); + prev_node = node; + ret = btrfs_comp_cpu_keys(&delayed_item->key, key); + if (ret < 0) + node = node->rb_right; + else if (ret > 0) + node = node->rb_left; + else + return delayed_item; + } + + if (prev) { + if (!prev_node) + *prev = NULL; + else if (ret < 0) + *prev = delayed_item; + else if ((node = rb_prev(prev_node)) != NULL) { + *prev = rb_entry(node, struct btrfs_delayed_item, + rb_node); + } else + *prev = NULL; + } + + if (next) { + if (!prev_node) + *next = NULL; + else if (ret > 0) + *next = delayed_item; + else if ((node = rb_next(prev_node)) != NULL) { + *next = rb_entry(node, struct btrfs_delayed_item, + rb_node); + } else + *next = NULL; + } + return NULL; +} + +struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, + NULL, NULL); + return item; +} + +struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, + NULL, NULL); + return item; +} + +struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item, *next; + + item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, + NULL, &next); + if (!item) + item = next; + + return item; +} + +struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item, *next; + + item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, + NULL, &next); + if (!item) + item = next; + + return item; +} + +static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, + struct btrfs_delayed_item *ins, + int action) +{ + struct rb_node **p, *node; + struct rb_node *parent_node = NULL; + struct rb_root *root; + struct btrfs_delayed_item *item; + int cmp; + + if (action == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_node->ins_root; + else if (action == BTRFS_DELAYED_DELETION_ITEM) + root = &delayed_node->del_root; + else + BUG(); + p = &root->rb_node; + node = &ins->rb_node; + + while (*p) { + parent_node = *p; + item = rb_entry(parent_node, struct btrfs_delayed_item, + rb_node); + + cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); + if (cmp < 0) + p = &(*p)->rb_right; + else if (cmp > 0) + p = &(*p)->rb_left; + else + return -EEXIST; + } + + rb_link_node(node, parent_node, p); + rb_insert_color(node, root); + ins->delayed_node = delayed_node; + ins->ins_or_del = action; + + if (ins->key.type == BTRFS_DIR_INDEX_KEY && + action == BTRFS_DELAYED_INSERTION_ITEM && + ins->key.offset >= delayed_node->index_cnt) + delayed_node->index_cnt = ins->key.offset + 1; + + delayed_node->count++; + atomic_inc(&delayed_node->root->fs_info->delayed_root->items); + return 0; +} + +static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, + struct btrfs_delayed_item *item) +{ + return __btrfs_add_delayed_item(node, item, + BTRFS_DELAYED_INSERTION_ITEM); +} + +static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, + struct btrfs_delayed_item *item) +{ + return __btrfs_add_delayed_item(node, item, + BTRFS_DELAYED_DELETION_ITEM); +} + +static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) +{ + struct rb_root *root; + struct btrfs_delayed_root *delayed_root; + + delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; + + BUG_ON(!delayed_root); + BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && + delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); + + if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_item->delayed_node->ins_root; + else + root = &delayed_item->delayed_node->del_root; + + rb_erase(&delayed_item->rb_node, root); + delayed_item->delayed_node->count--; + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && + waitqueue_active(&delayed_root->wait)) + wake_up(&delayed_root->wait); +} + +static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) +{ + if (item) { + __btrfs_remove_delayed_item(item); + if (atomic_dec_and_test(&item->refs)) + kfree(item); + } +} + +struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node) +{ + struct rb_node *p; + struct btrfs_delayed_item *item = NULL; + + p = rb_first(&delayed_node->ins_root); + if (p) + item = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return item; +} + +struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node) +{ + struct rb_node *p; + struct btrfs_delayed_item *item = NULL; + + p = rb_first(&delayed_node->del_root); + if (p) + item = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return item; +} + +struct btrfs_delayed_item *__btrfs_next_delayed_item( + struct btrfs_delayed_item *item) +{ + struct rb_node *p; + struct btrfs_delayed_item *next = NULL; + + p = rb_next(&item->rb_node); + if (p) + next = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return next; +} + +static inline struct btrfs_delayed_node *btrfs_get_delayed_node( + struct inode *inode) +{ + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_delayed_node *delayed_node; + + delayed_node = btrfs_inode->delayed_node; + if (delayed_node) + atomic_inc(&delayed_node->refs); + + return delayed_node; +} + +static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, + u64 root_id) +{ + struct btrfs_key root_key; + + if (root->objectid == root_id) + return root; + + root_key.objectid = root_id; + root_key.type = BTRFS_ROOT_ITEM_KEY; + root_key.offset = (u64)-1; + return btrfs_read_fs_root_no_name(root->fs_info, &root_key); +} + +static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_delayed_item *item) +{ + struct btrfs_block_rsv *src_rsv; + struct btrfs_block_rsv *dst_rsv; + u64 num_bytes; + int ret; + + if (!trans->bytes_reserved) + return 0; + + src_rsv = trans->block_rsv; + dst_rsv = &root->fs_info->global_block_rsv; + + num_bytes = btrfs_calc_trans_metadata_size(root, 1); + ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + if (!ret) { + item->bytes_reserved = num_bytes; + item->block_rsv = dst_rsv; + } + + return ret; +} + +static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, + struct btrfs_delayed_item *item) +{ + if (!item->bytes_reserved) + return; + + btrfs_block_rsv_release(root, item->block_rsv, + item->bytes_reserved); +} + +static int btrfs_delayed_inode_reserve_metadata( + struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_block_rsv *src_rsv; + struct btrfs_block_rsv *dst_rsv; + u64 num_bytes; + int ret; + + if (!trans->bytes_reserved) + return 0; + + src_rsv = trans->block_rsv; + dst_rsv = &root->fs_info->global_block_rsv; + + num_bytes = btrfs_calc_trans_metadata_size(root, 1); + ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + if (!ret) + node->bytes_reserved = num_bytes; + + return ret; +} + +static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_block_rsv *rsv; + + if (!node->bytes_reserved) + return; + + rsv = &root->fs_info->global_block_rsv; + btrfs_block_rsv_release(root, rsv, + node->bytes_reserved); + node->bytes_reserved = 0; +} + +/* + * This helper will insert some continuous items into the same leaf according + * to the free space of the leaf. + */ +static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *item) +{ + struct btrfs_delayed_item *curr, *next; + int free_space; + int total_data_size = 0, total_size = 0; + struct extent_buffer *leaf; + char *data_ptr; + struct btrfs_key *keys; + u32 *data_size; + struct list_head head; + int slot; + int nitems; + int i; + int ret = 0; + + BUG_ON(!path->nodes[0]); + + leaf = path->nodes[0]; + free_space = btrfs_leaf_free_space(root, leaf); + INIT_LIST_HEAD(&head); + + next = item; + + /* + * count the number of the continuous items that we can insert in batch + */ + while (total_size + next->data_len + sizeof(struct btrfs_item) <= + free_space) { + total_data_size += next->data_len; + total_size += next->data_len + sizeof(struct btrfs_item); + list_add_tail(&next->tree_list, &head); + nitems++; + + curr = next; + next = __btrfs_next_delayed_item(curr); + if (!next) + break; + + if (!btrfs_is_continuous_delayed_item(curr, next)) + break; + } + + if (!nitems) { + ret = 0; + goto out; + } + + /* + * we need allocate some memory space, but it might cause the task + * to sleep, so we set all locked nodes in the path to blocking locks + * first. + */ + btrfs_set_path_blocking(path); + + keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); + if (!keys) { + ret = -ENOMEM; + goto out; + } + + data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); + if (!data_size) { + ret = -ENOMEM; + goto error; + } + + /* get keys of all the delayed items */ + i = 0; + list_for_each_entry(next, &head, tree_list) { + keys[i] = next->key; + data_size[i] = next->data_len; + i++; + } + + /* reset all the locked nodes in the patch to spinning locks. */ + btrfs_clear_path_blocking(path, NULL); + + /* insert the keys of the items */ + ret = setup_items_for_insert(trans, root, path, keys, data_size, + total_data_size, total_size, nitems); + if (ret) + goto error; + + /* insert the dir index items */ + slot = path->slots[0]; + list_for_each_entry_safe(curr, next, &head, tree_list) { + data_ptr = btrfs_item_ptr(leaf, slot, char); + write_extent_buffer(leaf, &curr->data, + (unsigned long)data_ptr, + curr->data_len); + slot++; + + btrfs_delayed_item_release_metadata(root, curr); + + list_del(&curr->tree_list); + btrfs_release_delayed_item(curr); + } + +error: + kfree(data_size); + kfree(keys); +out: + return ret; +} + +/* + * This helper can just do simple insertion that needn't extend item for new + * data, such as directory name index insertion, inode insertion. + */ +static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *delayed_item) +{ + struct extent_buffer *leaf; + struct btrfs_item *item; + char *ptr; + int ret; + + ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, + delayed_item->data_len); + if (ret < 0 && ret != -EEXIST) + return ret; + + leaf = path->nodes[0]; + + item = btrfs_item_nr(leaf, path->slots[0]); + ptr = btrfs_item_ptr(leaf, path->slots[0], char); + + write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, + delayed_item->data_len); + btrfs_mark_buffer_dirty(leaf); + + btrfs_delayed_item_release_metadata(root, delayed_item); + return 0; +} + +/* + * we insert an item first, then if there are some continuous items, we try + * to insert those items into the same leaf. + */ +static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_item *curr, *prev; + int ret = 0; + +do_again: + mutex_lock(&node->mutex); + curr = __btrfs_first_delayed_insertion_item(node); + if (!curr) + goto insert_end; + + ret = btrfs_insert_delayed_item(trans, root, path, curr); + if (ret < 0) { + btrfs_release_path(root, path); + goto insert_end; + } + + prev = curr; + curr = __btrfs_next_delayed_item(prev); + if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { + /* insert the continuous items into the same leaf */ + path->slots[0]++; + btrfs_batch_insert_items(trans, root, path, curr); + } + btrfs_release_delayed_item(prev); + btrfs_mark_buffer_dirty(path->nodes[0]); + + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + goto do_again; + +insert_end: + mutex_unlock(&node->mutex); + return ret; +} + +static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *item) +{ + struct btrfs_delayed_item *curr, *next; + struct extent_buffer *leaf; + struct btrfs_key key; + struct list_head head; + int nitems, i, last_item; + int ret = 0; + + BUG_ON(!path->nodes[0]); + + leaf = path->nodes[0]; + + i = path->slots[0]; + last_item = btrfs_header_nritems(leaf) - 1; + if (i > last_item) + return -ENOENT; /* FIXME: Is errno suitable? */ + + next = item; + INIT_LIST_HEAD(&head); + btrfs_item_key_to_cpu(leaf, &key, i); + nitems = 0; + /* + * count the number of the dir index items that we can delete in batch + */ + while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { + list_add_tail(&next->tree_list, &head); + nitems++; + + curr = next; + next = __btrfs_next_delayed_item(curr); + if (!next) + break; + + if (!btrfs_is_continuous_delayed_item(curr, next)) + break; + + i++; + if (i > last_item) + break; + btrfs_item_key_to_cpu(leaf, &key, i); + } + + if (!nitems) + return 0; + + ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); + if (ret) + goto out; + + list_for_each_entry_safe(curr, next, &head, tree_list) { + btrfs_delayed_item_release_metadata(root, curr); + list_del(&curr->tree_list); + btrfs_release_delayed_item(curr); + } + +out: + return ret; +} + +static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_item *curr, *prev; + int ret = 0; + +do_again: + mutex_lock(&node->mutex); + curr = __btrfs_first_delayed_deletion_item(node); + if (!curr) + goto delete_fail; + + ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); + if (ret < 0) + goto delete_fail; + else if (ret > 0) { + /* + * can't find the item which the node points to, so this node + * is invalid, just drop it. + */ + prev = curr; + curr = __btrfs_next_delayed_item(prev); + btrfs_release_delayed_item(prev); + ret = 0; + btrfs_release_path(root, path); + if (curr) + goto do_again; + else + goto delete_fail; + } + + btrfs_batch_delete_items(trans, root, path, curr); + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + goto do_again; + +delete_fail: + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + return ret; +} + +static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) +{ + struct btrfs_delayed_root *delayed_root; + + if (delayed_node && delayed_node->inode_dirty) { + BUG_ON(!delayed_node->root); + delayed_node->inode_dirty = 0; + delayed_node->count--; + + delayed_root = delayed_node->root->fs_info->delayed_root; + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND && + waitqueue_active(&delayed_root->wait)) + wake_up(&delayed_root->wait); + } +} + +static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_node *node) +{ + struct btrfs_key key; + struct btrfs_inode_item *inode_item; + struct extent_buffer *leaf; + int ret; + + mutex_lock(&node->mutex); + if (!node->inode_dirty) { + mutex_unlock(&node->mutex); + return 0; + } + + key.objectid = node->inode_id; + btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); + key.offset = 0; + ret = btrfs_lookup_inode(trans, root, path, &key, 1); + if (ret > 0) { + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + return -ENOENT; + } else if (ret < 0) { + mutex_unlock(&node->mutex); + return ret; + } + + btrfs_unlock_up_safe(path, 1); + leaf = path->nodes[0]; + inode_item = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_inode_item); + write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, + sizeof(struct btrfs_inode_item)); + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + + btrfs_delayed_inode_release_metadata(root, node); + btrfs_release_delayed_inode(node); + mutex_unlock(&node->mutex); + + return 0; +} + +/* Called when committing the transaction. */ +int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + struct btrfs_delayed_node *curr_node, *prev_node; + struct btrfs_path *path; + int ret = 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + delayed_root = btrfs_get_delayed_root(root); + + curr_node = btrfs_first_delayed_node(delayed_root); + while (curr_node) { + root = curr_node->root; + ret = btrfs_insert_delayed_items(trans, path, root, + curr_node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, root, + curr_node); + if (!ret) + ret = btrfs_update_delayed_inode(trans, root, path, + curr_node); + if (ret) { + btrfs_release_delayed_node(curr_node); + break; + } + + prev_node = curr_node; + curr_node = btrfs_next_delayed_node(curr_node); + btrfs_release_delayed_node(prev_node); + } + + btrfs_free_path(path); + return ret; +} + +static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_delayed_node *node) +{ + struct btrfs_path *path; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + ret = btrfs_insert_delayed_items(trans, path, node->root, node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, node->root, node); + if (!ret) + ret = btrfs_update_delayed_inode(trans, node->root, path, node); + btrfs_free_path(path); + + return ret; +} + +int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); + int ret; + + if (!delayed_node) + return 0; + + mutex_lock(&delayed_node->mutex); + if (!delayed_node->count) { + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return 0; + } + mutex_unlock(&delayed_node->mutex); + + ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); + btrfs_release_delayed_node(delayed_node); + return ret; +} + +void btrfs_remove_delayed_node(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + + delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); + if (!delayed_node) + return; + + BTRFS_I(inode)->delayed_node = NULL; + btrfs_release_delayed_node(delayed_node); +} + +struct btrfs_async_delayed_node { + struct btrfs_root *root; + struct btrfs_delayed_node *delayed_node; + struct btrfs_work work; +}; + +static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) +{ + struct btrfs_async_delayed_node *async_node; + struct btrfs_trans_handle *trans; + struct btrfs_path *path; + struct btrfs_delayed_node *delayed_node = NULL; + struct btrfs_root *root; + unsigned long nr = 0; + int need_requeue = 0; + int ret; + + async_node = container_of(work, struct btrfs_async_delayed_node, work); + + path = btrfs_alloc_path(); + if (!path) + goto out; + path->leave_spinning = 1; + + delayed_node = async_node->delayed_node; + root = delayed_node->root; + + trans = btrfs_join_transaction(root, 0); + if (IS_ERR(trans)) + goto free_path; + + ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, root, + delayed_node); + + if (!ret) + btrfs_update_delayed_inode(trans, root, path, delayed_node); + + /* + * Maybe new delayed items have been inserted, so we need requeue + * the work. Besides that, we must dequeue the empty delayed nodes + * to avoid the race between delayed items balance and the worker. + * The race like this: + * Task1 Worker thread + * count == 0, needn't requeue + * also needn't insert the + * delayed node into prepare + * list again. + * add lots of delayed items + * queue the delayed node + * already in the list, + * and not in the prepare + * list, it means the delayed + * node is being dealt with + * by the worker. + * do delayed items balance + * the delayed node is being + * dealt with by the worker + * now, just wait. + * the worker goto idle. + * Task1 will sleep until the transaction is commited. + */ + mutex_lock(&delayed_node->mutex); + if (delayed_node->count) + need_requeue = 1; + else + btrfs_dequeue_delayed_node(root->fs_info->delayed_root, + delayed_node); + mutex_unlock(&delayed_node->mutex); + + nr = trans->blocks_used; + + btrfs_end_transaction_dmeta(trans, root); + __btrfs_btree_balance_dirty(root, nr); +free_path: + btrfs_free_path(path); +out: + if (need_requeue) + btrfs_requeue_work(&async_node->work); + else { + btrfs_release_prepared_delayed_node(delayed_node); + kfree(async_node); + } +} + +static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, + struct btrfs_root *root, int all) +{ + struct btrfs_async_delayed_node *async_node; + struct btrfs_delayed_node *curr; + int count = 0; + +again: + curr = btrfs_first_prepared_delayed_node(delayed_root); + if (!curr) + return 0; + + async_node = kmalloc(sizeof(*async_node), GFP_NOFS); + if (!async_node) { + btrfs_release_prepared_delayed_node(curr); + return -ENOMEM; + } + + async_node->root = root; + async_node->delayed_node = curr; + + async_node->work.func = btrfs_async_run_delayed_node_done; + async_node->work.flags = 0; + + btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); + count++; + + if (all || count < 4) + goto again; + + return 0; +} + +void btrfs_balance_delayed_items(struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + + delayed_root = btrfs_get_delayed_root(root); + + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) + return; + + if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { + int ret; + ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); + if (ret) + return; + + wait_event_interruptible_timeout( + delayed_root->wait, + (atomic_read(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND), + HZ); + return; + } + + btrfs_wq_run_delayed_node(delayed_root, root, 0); +} + +int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, struct inode *dir, + struct btrfs_disk_key *disk_key, u8 type, + u64 index) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_delayed_item *delayed_item; + struct btrfs_dir_item *dir_item; + int ret; + + delayed_node = btrfs_get_or_create_delayed_node(dir); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); + + delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); + if (!delayed_item) { + ret = -ENOMEM; + goto release_node; + } + + ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); + /* + * we have reserved enough space when we start a new transaction, + * so reserving metadata failure is impossible + */ + BUG_ON(ret); + + delayed_item->key.objectid = dir->i_ino; + btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); + delayed_item->key.offset = index; + + dir_item = (struct btrfs_dir_item *)delayed_item->data; + dir_item->location = *disk_key; + dir_item->transid = cpu_to_le64(trans->transid); + dir_item->data_len = 0; + dir_item->name_len = cpu_to_le16(name_len); + dir_item->type = type; + memcpy((char *)(dir_item + 1), name, name_len); + + mutex_lock(&delayed_node->mutex); + ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); + if (unlikely(ret)) { + printk(KERN_ERR "err add delayed dir index item(name: %s) into " + "the insertion tree of the delayed node" + "(root id: %llu, inode id: %llu, errno: %d)\n", + name, + (unsigned long long)delayed_node->root->objectid, + (unsigned long long)delayed_node->inode_id, + ret); + BUG(); + } + mutex_unlock(&delayed_node->mutex); + +release_node: + btrfs_release_delayed_node(delayed_node); + return ret; +} + +static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, + struct btrfs_delayed_node *node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + mutex_lock(&node->mutex); + item = __btrfs_lookup_delayed_insertion_item(node, key); + if (!item) { + mutex_unlock(&node->mutex); + return 1; + } + + btrfs_delayed_item_release_metadata(root, item); + btrfs_release_delayed_item(item); + mutex_unlock(&node->mutex); + return 0; +} + +int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *dir, + u64 index) +{ + struct btrfs_delayed_node *node; + struct btrfs_delayed_item *item; + struct btrfs_key item_key; + int ret; + + node = btrfs_get_or_create_delayed_node(dir); + if (IS_ERR(node)) + return PTR_ERR(node); + + item_key.objectid = dir->i_ino; + btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); + item_key.offset = index; + + ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); + if (!ret) + goto end; + + item = btrfs_alloc_delayed_item(0); + if (!item) { + ret = -ENOMEM; + goto end; + } + + item->key = item_key; + + ret = btrfs_delayed_item_reserve_metadata(trans, root, item); + /* + * we have reserved enough space when we start a new transaction, + * so reserving metadata failure is impossible. + */ + BUG_ON(ret); + + mutex_lock(&node->mutex); + ret = __btrfs_add_delayed_deletion_item(node, item); + if (unlikely(ret)) { + printk(KERN_ERR "err add delayed dir index item(index: %llu) " + "into the deletion tree of the delayed node" + "(root id: %llu, inode id: %llu, errno: %d)\n", + (unsigned long long)index, + (unsigned long long)node->root->objectid, + (unsigned long long)node->inode_id, + ret); + BUG(); + } + mutex_unlock(&node->mutex); +end: + btrfs_release_delayed_node(node); + return ret; +} + +int btrfs_inode_delayed_dir_index_count(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; + int ret = 0; + + if (!delayed_node) + return -ENOENT; + + /* + * Since we have held i_mutex of this directory, it is impossible that + * a new directory index is added into the delayed node and index_cnt + * is updated now. So we needn't lock the delayed node. + */ + if (!delayed_node->index_cnt) + return -EINVAL; + + BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; + return ret; +} + +void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, + struct list_head *del_list) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_delayed_item *item; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return; + + mutex_lock(&delayed_node->mutex); + item = __btrfs_first_delayed_insertion_item(delayed_node); + while (item) { + atomic_inc(&item->refs); + list_add_tail(&item->readdir_list, ins_list); + item = __btrfs_next_delayed_item(item); + } + + item = __btrfs_first_delayed_deletion_item(delayed_node); + while (item) { + atomic_inc(&item->refs); + list_add_tail(&item->readdir_list, del_list); + item = __btrfs_next_delayed_item(item); + } + mutex_unlock(&delayed_node->mutex); + /* + * This delayed node is still cached in the btrfs inode, so refs + * must be > 1 now, and we needn't check it is going to be freed + * or not. + * + * Besides that, this function is used to read dir, we do not + * insert/delete delayed items in this period. So we also needn't + * requeue or dequeue this delayed node. + */ + atomic_dec(&delayed_node->refs); +} + +void btrfs_put_delayed_items(struct list_head *ins_list, + struct list_head *del_list) +{ + struct btrfs_delayed_item *curr, *next; + + list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + list_del(&curr->readdir_list); + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + } + + list_for_each_entry_safe(curr, next, del_list, readdir_list) { + list_del(&curr->readdir_list); + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + } +} + +int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index) +{ + struct btrfs_delayed_item *curr, *next; + int ret; + + if (list_empty(del_list)) + return 0; + + list_for_each_entry_safe(curr, next, del_list, readdir_list) { + if (curr->key.offset > index) + break; + + list_del(&curr->readdir_list); + ret = (curr->key.offset == index); + + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + + if (ret) + return 1; + else + continue; + } + return 0; +} + +/* + * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree + * + */ +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, + struct list_head *ins_list) +{ + struct btrfs_dir_item *di; + struct btrfs_delayed_item *curr, *next; + struct btrfs_key location; + char *name; + int name_len; + int over = 0; + unsigned char d_type; + + if (list_empty(ins_list)) + return 0; + + /* + * Changing the data of the delayed item is impossible. So + * we needn't lock them. And we have held i_mutex of the + * directory, nobody can delete any directory indexes now. + */ + list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + list_del(&curr->readdir_list); + + if (curr->key.offset < filp->f_pos) { + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + continue; + } + + filp->f_pos = curr->key.offset; + + di = (struct btrfs_dir_item *)curr->data; + name = (char *)(di + 1); + name_len = le16_to_cpu(di->name_len); + + d_type = btrfs_filetype_table[di->type]; + btrfs_disk_key_to_cpu(&location, &di->location); + + over = filldir(dirent, name, name_len, curr->key.offset, + location.objectid, d_type); + + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + + if (over) + return 1; + } + return 0; +} + +BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, + generation, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, + sequence, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, + transid, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, + nbytes, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, + block_group, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); + +BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); +BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); + +static void fill_stack_inode_item(struct btrfs_trans_handle *trans, + struct btrfs_inode_item *inode_item, + struct inode *inode) +{ + btrfs_set_stack_inode_uid(inode_item, inode->i_uid); + btrfs_set_stack_inode_gid(inode_item, inode->i_gid); + btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); + btrfs_set_stack_inode_mode(inode_item, inode->i_mode); + btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); + btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); + btrfs_set_stack_inode_generation(inode_item, + BTRFS_I(inode)->generation); + btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); + btrfs_set_stack_inode_transid(inode_item, trans->transid); + btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); + btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); + btrfs_set_stack_inode_block_group(inode_item, + BTRFS_I(inode)->block_group); + + btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), + inode->i_atime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), + inode->i_atime.tv_nsec); + + btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), + inode->i_mtime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), + inode->i_mtime.tv_nsec); + + btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), + inode->i_ctime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), + inode->i_ctime.tv_nsec); +} + +int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + int ret; + + delayed_node = btrfs_get_or_create_delayed_node(inode); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); + + mutex_lock(&delayed_node->mutex); + if (delayed_node->inode_dirty) { + fill_stack_inode_item(trans, &delayed_node->inode_item, inode); + goto release_node; + } + + ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); + /* + * we must reserve enough space when we start a new transaction, + * so reserving metadata failure is impossible + */ + BUG_ON(ret); + + fill_stack_inode_item(trans, &delayed_node->inode_item, inode); + delayed_node->inode_dirty = 1; + delayed_node->count++; + atomic_inc(&root->fs_info->delayed_root->items); +release_node: + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return ret; +} + +static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) +{ + struct btrfs_root *root = delayed_node->root; + struct btrfs_delayed_item *curr_item, *prev_item; + + mutex_lock(&delayed_node->mutex); + curr_item = __btrfs_first_delayed_insertion_item(delayed_node); + while (curr_item) { + btrfs_delayed_item_release_metadata(root, curr_item); + prev_item = curr_item; + curr_item = __btrfs_next_delayed_item(prev_item); + btrfs_release_delayed_item(prev_item); + } + + curr_item = __btrfs_first_delayed_deletion_item(delayed_node); + while (curr_item) { + btrfs_delayed_item_release_metadata(root, curr_item); + prev_item = curr_item; + curr_item = __btrfs_next_delayed_item(prev_item); + btrfs_release_delayed_item(prev_item); + } + + if (delayed_node->inode_dirty) { + btrfs_delayed_inode_release_metadata(root, delayed_node); + btrfs_release_delayed_inode(delayed_node); + } + mutex_unlock(&delayed_node->mutex); +} + +void btrfs_kill_delayed_inode_items(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return; + + __btrfs_kill_delayed_node(delayed_node); + btrfs_release_delayed_node(delayed_node); +} + +void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) +{ + u64 inode_id = 0; + struct btrfs_delayed_node *delayed_nodes[8]; + int i, n; + + while (1) { + spin_lock(&root->inode_lock); + n = radix_tree_gang_lookup(&root->delayed_nodes_tree, + (void **)delayed_nodes, inode_id, + ARRAY_SIZE(delayed_nodes)); + if (!n) { + spin_unlock(&root->inode_lock); + break; + } + + inode_id = delayed_nodes[n - 1]->inode_id + 1; + + for (i = 0; i < n; i++) + atomic_inc(&delayed_nodes[i]->refs); + spin_unlock(&root->inode_lock); + + for (i = 0; i < n; i++) { + __btrfs_kill_delayed_node(delayed_nodes[i]); + btrfs_release_delayed_node(delayed_nodes[i]); + } + } +} diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h new file mode 100644 index 000000000000..eb7d240aa648 --- /dev/null +++ b/fs/btrfs/delayed-inode.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2011 Fujitsu. All rights reserved. + * Written by Miao Xie + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __DELAYED_TREE_OPERATION_H +#define __DELAYED_TREE_OPERATION_H + +#include +#include +#include +#include +#include +#include + +#include "ctree.h" + +/* types of the delayed item */ +#define BTRFS_DELAYED_INSERTION_ITEM 1 +#define BTRFS_DELAYED_DELETION_ITEM 2 + +struct btrfs_delayed_root { + spinlock_t lock; + struct list_head node_list; + /* + * Used for delayed nodes which is waiting to be dealt with by the + * worker. If the delayed node is inserted into the work queue, we + * drop it from this list. + */ + struct list_head prepare_list; + atomic_t items; /* for delayed items */ + int nodes; /* for delayed nodes */ + wait_queue_head_t wait; +}; + +struct btrfs_delayed_node { + u64 inode_id; + u64 bytes_reserved; + struct btrfs_root *root; + /* Used to add the node into the delayed root's node list. */ + struct list_head n_list; + /* + * Used to add the node into the prepare list, the nodes in this list + * is waiting to be dealt with by the async worker. + */ + struct list_head p_list; + struct rb_root ins_root; + struct rb_root del_root; + struct mutex mutex; + struct btrfs_inode_item inode_item; + atomic_t refs; + u64 index_cnt; + bool in_list; + bool inode_dirty; + int count; +}; + +struct btrfs_delayed_item { + struct rb_node rb_node; + struct btrfs_key key; + struct list_head tree_list; /* used for batch insert/delete items */ + struct list_head readdir_list; /* used for readdir items */ + u64 bytes_reserved; + struct btrfs_block_rsv *block_rsv; + struct btrfs_delayed_node *delayed_node; + atomic_t refs; + int ins_or_del; + u32 data_len; + char data[0]; +}; + +static inline void btrfs_init_delayed_root( + struct btrfs_delayed_root *delayed_root) +{ + atomic_set(&delayed_root->items, 0); + delayed_root->nodes = 0; + spin_lock_init(&delayed_root->lock); + init_waitqueue_head(&delayed_root->wait); + INIT_LIST_HEAD(&delayed_root->node_list); + INIT_LIST_HEAD(&delayed_root->prepare_list); +} + +int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, struct inode *dir, + struct btrfs_disk_key *disk_key, u8 type, + u64 index); + +int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *dir, + u64 index); + +int btrfs_inode_delayed_dir_index_count(struct inode *inode); + +int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root); + +void btrfs_balance_delayed_items(struct btrfs_root *root); + +int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct inode *inode); +/* Used for evicting the inode. */ +void btrfs_remove_delayed_node(struct inode *inode); +void btrfs_kill_delayed_inode_items(struct inode *inode); + + +int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode); + +/* Used for drop dead root */ +void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); + +/* Used for readdir() */ +void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, + struct list_head *del_list); +void btrfs_put_delayed_items(struct list_head *ins_list, + struct list_head *del_list); +int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index); +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, + struct list_head *ins_list); + +/* for init */ +int __init btrfs_delayed_inode_init(void); +void btrfs_delayed_inode_exit(void); +#endif diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index c62f02f6ae69..f53fb3847c96 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -124,8 +124,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, * to use for the second index (if one is created). */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root - *root, const char *name, int name_len, u64 dir, - struct btrfs_key *location, u8 type, u64 index) + *root, const char *name, int name_len, + struct inode *dir, struct btrfs_key *location, + u8 type, u64 index) { int ret = 0; int ret2 = 0; @@ -137,13 +138,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_disk_key disk_key; u32 data_size; - key.objectid = dir; + key.objectid = dir->i_ino; btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; path->leave_spinning = 1; + btrfs_cpu_key_to_disk(&disk_key, location); + data_size = sizeof(*dir_item) + name_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); @@ -155,7 +160,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root } leaf = path->nodes[0]; - btrfs_cpu_key_to_disk(&disk_key, location); btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_type(leaf, dir_item, type); btrfs_set_dir_data_len(leaf, dir_item, 0); @@ -174,27 +178,9 @@ second_insert: } btrfs_release_path(root, path); - btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); - key.offset = index; - dir_item = insert_with_overflow(trans, root, path, &key, data_size, - name, name_len); - if (IS_ERR(dir_item)) { - ret2 = PTR_ERR(dir_item); - goto out_free; - } - leaf = path->nodes[0]; - btrfs_cpu_key_to_disk(&disk_key, location); - btrfs_set_dir_item_key(leaf, dir_item, &disk_key); - btrfs_set_dir_type(leaf, dir_item, type); - btrfs_set_dir_data_len(leaf, dir_item, 0); - btrfs_set_dir_name_len(leaf, dir_item, name_len); - btrfs_set_dir_transid(leaf, dir_item, trans->transid); - name_ptr = (unsigned long)(dir_item + 1); - write_extent_buffer(leaf, name, name_ptr, name_len); - btrfs_mark_buffer_dirty(leaf); - + ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir, + &disk_key, type, index); out_free: - btrfs_free_path(path); if (ret) return ret; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..22c3c9586049 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1058,6 +1058,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->name = NULL; root->in_sysfs = 0; root->inode_tree = RB_ROOT; + INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; root->orphan_block_rsv = NULL; @@ -1693,6 +1694,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); + fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), + GFP_NOFS); + if (!fs_info->delayed_root) { + err = -ENOMEM; + goto fail_iput; + } + btrfs_init_delayed_root(fs_info->delayed_root); sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); @@ -1760,7 +1768,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) { err = -EINVAL; - goto fail_iput; + goto fail_alloc; } memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); @@ -1772,7 +1780,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, disk_super = &fs_info->super_copy; if (!btrfs_super_root(disk_super)) - goto fail_iput; + goto fail_alloc; /* check FS state, whether FS is broken. */ fs_info->fs_state |= btrfs_super_flags(disk_super); @@ -1788,7 +1796,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; - goto fail_iput; + goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super) & @@ -1798,7 +1806,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, "unsupported optional features (%Lx).\n", (unsigned long long)features); err = -EINVAL; - goto fail_iput; + goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super); @@ -1814,7 +1822,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, "unsupported option features (%Lx).\n", (unsigned long long)features); err = -EINVAL; - goto fail_iput; + goto fail_alloc; } btrfs_init_workers(&fs_info->generic_worker, @@ -1861,6 +1869,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", 1, &fs_info->generic_worker); + btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", + fs_info->thread_pool_size, + &fs_info->generic_worker); /* * endios are largely parallel and should have a very @@ -1882,6 +1893,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); btrfs_start_workers(&fs_info->endio_write_workers, 1); btrfs_start_workers(&fs_info->endio_freespace_worker, 1); + btrfs_start_workers(&fs_info->delayed_workers, 1); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, @@ -2138,6 +2150,9 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); + btrfs_stop_workers(&fs_info->delayed_workers); +fail_alloc: + kfree(fs_info->delayed_root); fail_iput: invalidate_inode_pages2(fs_info->btree_inode->i_mapping); iput(fs_info->btree_inode); @@ -2578,6 +2593,7 @@ int close_ctree(struct btrfs_root *root) del_fs_roots(fs_info); iput(fs_info->btree_inode); + kfree(fs_info->delayed_root); btrfs_stop_workers(&fs_info->generic_worker); btrfs_stop_workers(&fs_info->fixup_workers); @@ -2589,6 +2605,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); + btrfs_stop_workers(&fs_info->delayed_workers); btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); @@ -2662,6 +2679,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) u64 num_dirty; unsigned long thresh = 32 * 1024 * 1024; + if (current->flags & PF_MEMALLOC) + return; + + btrfs_balance_delayed_items(root); + + num_dirty = root->fs_info->dirty_metadata_bytes; + + if (num_dirty > thresh) { + balance_dirty_pages_ratelimited_nr( + root->fs_info->btree_inode->i_mapping, 1); + } + return; +} + +void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) +{ + /* + * looks as though older kernels can get into trouble with + * this code, they end up stuck in balance_dirty_pages forever + */ + u64 num_dirty; + unsigned long thresh = 32 * 1024 * 1024; + if (current->flags & PF_MEMALLOC) return; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 07b20dc2fd95..aca35af37dbc 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -71,6 +71,7 @@ int btrfs_insert_dev_radix(struct btrfs_root *root, u64 block_start, u64 num_blocks); void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); +void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_mark_buffer_dirty(struct extent_buffer *buf); void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ee6bd55e16c..7b0433866f36 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3973,12 +3973,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info) WARN_ON(fs_info->chunk_block_rsv.reserved > 0); } -static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) -{ - return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * - 3 * num_items; -} - int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, int num_items) @@ -3989,7 +3983,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, if (num_items == 0 || root->fs_info->chunk_root == root) return 0; - num_bytes = calc_trans_metadata_size(root, num_items); + num_bytes = btrfs_calc_trans_metadata_size(root, num_items); ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, num_bytes); if (!ret) { @@ -4028,14 +4022,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, * If all of the metadata space is used, we can commit * transaction and use space it freed. */ - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_orphan_release_metadata(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } @@ -4049,7 +4043,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, * two for root back/forward refs, two for directory entries * and one for root of the snapshot. */ - u64 num_bytes = calc_trans_metadata_size(root, 5); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); dst_rsv->space_info = src_rsv->space_info; return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } @@ -4078,7 +4072,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) if (nr_extents > reserved_extents) { nr_extents -= reserved_extents; - to_reserve = calc_trans_metadata_size(root, nr_extents); + to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); } else { nr_extents = 0; to_reserve = 0; @@ -4132,7 +4126,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes); if (nr_extents > 0) - to_free += calc_trans_metadata_size(root, nr_extents); + to_free += btrfs_calc_trans_metadata_size(root, nr_extents); btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, to_free); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..3470f67c6258 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2647,11 +2647,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; int ret; + /* + * If root is tree root, it means this inode is used to + * store free space information. And these inodes are updated + * when committing the transaction, so they needn't delaye to + * be updated, or deadlock will occured. + */ + if (likely(root != root->fs_info->tree_root)) { + ret = btrfs_delayed_update_inode(trans, root, inode); + if (!ret) + btrfs_set_inode_last_trans(trans, inode); + return ret; + } + path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; - ret = btrfs_lookup_inode(trans, root, path, - &BTRFS_I(inode)->location, 1); + ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, + 1); if (ret) { if (ret > 0) ret = -ENOENT; @@ -2661,7 +2676,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, btrfs_unlock_up_safe(path, 1); leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_inode_item); + struct btrfs_inode_item); fill_inode_item(trans, leaf, inode_item, inode); btrfs_mark_buffer_dirty(leaf); @@ -2672,7 +2687,6 @@ failed: return ret; } - /* * unlink helper that gets used here in inode.c and in the tree logging * recovery code. It remove a link in a directory with a given name, and @@ -2724,18 +2738,9 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - if (IS_ERR(di)) { - ret = PTR_ERR(di); - goto err; - } - if (!di) { - ret = -ENOENT; + ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); + if (ret) goto err; - } - ret = btrfs_delete_one_dir_name(trans, root, path, di); - btrfs_release_path(root, path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, dir->i_ino); @@ -2924,6 +2929,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, index = btrfs_inode_ref_index(path->nodes[0], ref); btrfs_release_path(root, path); + /* + * This is a commit root search, if we can lookup inode item and other + * relative items in the commit root, it means the transaction of + * dir/file creation has been committed, and the dir index item that we + * delay to insert has also been inserted into the commit root. So + * we needn't worry about the delayed insertion of the dir index item + * here. + */ di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { @@ -3029,24 +3042,16 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); index = key.offset; } + btrfs_release_path(root, path); - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); - - leaf = path->nodes[0]; - btrfs_dir_item_key_to_cpu(leaf, di, &key); - WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); - ret = btrfs_delete_one_dir_name(trans, root, path, di); + ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); BUG_ON(ret); - btrfs_release_path(root, path); btrfs_i_size_write(dir, dir->i_size - name_len * 2); dir->i_mtime = dir->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); - btrfs_free_path(path); return 0; } @@ -3306,6 +3311,15 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, if (root->ref_cows || root == root->fs_info->tree_root) btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); + /* + * This function is also used to drop the items in the log tree before + * we relog the inode, so if root != BTRFS_I(inode)->root, it means + * it is used to drop the loged items. So we shouldn't kill the delayed + * items. + */ + if (min_type == 0 && root == BTRFS_I(inode)->root) + btrfs_kill_delayed_inode_items(inode); + path = btrfs_alloc_path(); BUG_ON(!path); path->reada = -1; @@ -4208,7 +4222,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, return d_splice_alias(inode, dentry); } -static unsigned char btrfs_filetype_table[] = { +unsigned char btrfs_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; @@ -4222,6 +4236,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; + struct list_head ins_list; + struct list_head del_list; int ret; struct extent_buffer *leaf; int slot; @@ -4234,6 +4250,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, char tmp_name[32]; char *name_ptr; int name_len; + int is_curr = 0; /* filp->f_pos points to the current index? */ /* FIXME, use a real flag for deciding about the key type */ if (root->fs_info->tree_root == root) @@ -4258,8 +4275,16 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, filp->f_pos = 2; } path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; path->reada = 2; + if (key_type == BTRFS_DIR_INDEX_KEY) { + INIT_LIST_HEAD(&ins_list); + INIT_LIST_HEAD(&del_list); + btrfs_get_delayed_items(inode, &ins_list, &del_list); + } + btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; key.objectid = inode->i_ino; @@ -4289,8 +4314,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, break; if (found_key.offset < filp->f_pos) goto next; + if (key_type == BTRFS_DIR_INDEX_KEY && + btrfs_should_delete_dir_index(&del_list, + found_key.offset)) + goto next; filp->f_pos = found_key.offset; + is_curr = 1; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); di_cur = 0; @@ -4345,6 +4375,15 @@ next: path->slots[0]++; } + if (key_type == BTRFS_DIR_INDEX_KEY) { + if (is_curr) + filp->f_pos++; + ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, + &ins_list); + if (ret) + goto nopos; + } + /* Reached end of directory/root. Bump pos past the last item. */ if (key_type == BTRFS_DIR_INDEX_KEY) /* @@ -4357,6 +4396,8 @@ next: nopos: ret = 0; err: + if (key_type == BTRFS_DIR_INDEX_KEY) + btrfs_put_delayed_items(&ins_list, &del_list); btrfs_free_path(path); return ret; } @@ -4434,6 +4475,8 @@ void btrfs_dirty_inode(struct inode *inode) } } btrfs_end_transaction(trans, root); + if (BTRFS_I(inode)->delayed_node) + btrfs_balance_delayed_items(root); } /* @@ -4502,9 +4545,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) int ret = 0; if (BTRFS_I(dir)->index_cnt == (u64)-1) { - ret = btrfs_set_inode_index_count(dir); - if (ret) - return ret; + ret = btrfs_inode_delayed_dir_index_count(dir); + if (ret) { + ret = btrfs_set_inode_index_count(dir); + if (ret) + return ret; + } } *index = BTRFS_I(dir)->index_cnt; @@ -4671,7 +4717,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, if (ret == 0) { ret = btrfs_insert_dir_item(trans, root, name, name_len, - parent_inode->i_ino, &key, + parent_inode, &key, btrfs_inode_type(inode), index); BUG_ON(ret); @@ -6784,6 +6830,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->dummy_inode = 0; ei->force_compress = BTRFS_COMPRESS_NONE; + ei->delayed_node = NULL; + inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree, GFP_NOFS); extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); @@ -6874,6 +6922,7 @@ void btrfs_destroy_inode(struct inode *inode) inode_tree_del(inode); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); free: + btrfs_remove_delayed_node(inode); call_rcu(&inode->i_rcu, btrfs_i_callback); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2616f7ed4799..df59401af742 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -422,7 +422,7 @@ static noinline int create_subvol(struct btrfs_root *root, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir->i_ino, &key, + name, namelen, dir, &key, BTRFS_FT_DIR, index); if (ret) goto fail; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..cc5a2a8a5acb 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -40,6 +40,7 @@ #include #include #include "compat.h" +#include "delayed-inode.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -1206,10 +1207,14 @@ static int __init init_btrfs_fs(void) if (err) goto free_extent_io; - err = btrfs_interface_init(); + err = btrfs_delayed_inode_init(); if (err) goto free_extent_map; + err = btrfs_interface_init(); + if (err) + goto free_delayed_inode; + err = register_filesystem(&btrfs_fs_type); if (err) goto unregister_ioctl; @@ -1219,6 +1224,8 @@ static int __init init_btrfs_fs(void) unregister_ioctl: btrfs_interface_exit(); +free_delayed_inode: + btrfs_delayed_inode_exit(); free_extent_map: extent_map_exit(); free_extent_io: @@ -1235,6 +1242,7 @@ free_sysfs: static void __exit exit_btrfs_fs(void) { btrfs_destroy_cachep(); + btrfs_delayed_inode_exit(); extent_map_exit(); extent_io_exit(); btrfs_interface_exit(); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..b83ed5e64a32 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -487,19 +487,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0, 1); + int ret; + + ret = __btrfs_end_transaction(trans, root, 0, 1); + if (ret) + return ret; + return 0; } int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 1, 1); + int ret; + + ret = __btrfs_end_transaction(trans, root, 1, 1); + if (ret) + return ret; + return 0; } int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0, 0); + int ret; + + ret = __btrfs_end_transaction(trans, root, 0, 0); + if (ret) + return ret; + return 0; +} + +int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + return __btrfs_end_transaction(trans, root, 1, 1); } /* @@ -967,7 +988,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode->i_ino, &key, + parent_inode, &key, BTRFS_FT_DIR, index); BUG_ON(ret); @@ -1037,6 +1058,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, int ret; list_for_each_entry(pending, head, list) { + /* + * We must deal with the delayed items before creating + * snapshots, or we will create a snapthot with inconsistent + * information. + */ + ret = btrfs_run_delayed_items(trans, fs_info->fs_root); + BUG_ON(ret); + ret = create_pending_snapshot(trans, fs_info, pending); BUG_ON(ret); } @@ -1290,6 +1319,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, BUG_ON(ret); } + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + /* * rename don't use btrfs_join_transaction, so, once we * set the transaction to blocked above, we aren't going @@ -1316,6 +1348,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); @@ -1432,6 +1467,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) root = list_entry(list.next, struct btrfs_root, root_list); list_del(&root->root_list); + btrfs_kill_all_delayed_nodes(root); + if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) btrfs_drop_snapshot(root, NULL, 0); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e441acc6c584..cb928c6c42e6 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -115,6 +115,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, int wait_for_unblock); int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, + struct btrfs_root *root); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); void btrfs_throttle(struct btrfs_root *root); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..ae0b72856bfb 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2773,6 +2773,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.type = (u8)-1; max_key.offset = (u64)-1; + ret = btrfs_commit_inode_delayed_items(trans, inode); + if (ret) { + btrfs_free_path(path); + btrfs_free_path(dst_path); + return ret; + } + mutex_lock(&BTRFS_I(inode)->log_mutex); /* -- cgit v1.2.2 From 0d0ca30f180906224be6279788f2b202cfd959d8 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sun, 22 May 2011 07:11:22 -0400 Subject: Btrfs: update the delayed inode code to use the btrfs_ino helper. Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 11 ++++++----- fs/btrfs/dir-item.c | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 95485318f001..c25405f69360 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -88,6 +88,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( struct btrfs_delayed_node *node; struct btrfs_inode *btrfs_inode = BTRFS_I(inode); struct btrfs_root *root = btrfs_inode->root; + u64 ino = btrfs_ino(inode); int ret; again: @@ -98,7 +99,7 @@ again: } spin_lock(&root->inode_lock); - node = radix_tree_lookup(&root->delayed_nodes_tree, inode->i_ino); + node = radix_tree_lookup(&root->delayed_nodes_tree, ino); if (node) { if (btrfs_inode->delayed_node) { spin_unlock(&root->inode_lock); @@ -115,7 +116,7 @@ again: node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); if (!node) return ERR_PTR(-ENOMEM); - btrfs_init_delayed_node(node, root, inode->i_ino); + btrfs_init_delayed_node(node, root, ino); atomic_inc(&node->refs); /* cached in the btrfs inode */ atomic_inc(&node->refs); /* can be accessed */ @@ -127,7 +128,7 @@ again: } spin_lock(&root->inode_lock); - ret = radix_tree_insert(&root->delayed_nodes_tree, inode->i_ino, node); + ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); if (ret == -EEXIST) { kmem_cache_free(delayed_node_cache, node); spin_unlock(&root->inode_lock); @@ -1274,7 +1275,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, */ BUG_ON(ret); - delayed_item->key.objectid = dir->i_ino; + delayed_item->key.objectid = btrfs_ino(dir); btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); delayed_item->key.offset = index; @@ -1337,7 +1338,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, if (IS_ERR(node)) return PTR_ERR(node); - item_key.objectid = dir->i_ino; + item_key.objectid = btrfs_ino(dir); btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); item_key.offset = index; diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index f53fb3847c96..e757202a014e 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -138,7 +138,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_disk_key disk_key; u32 data_size; - key.objectid = dir->i_ino; + key.objectid = btrfs_ino(dir); btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); -- cgit v1.2.2 From 8e531cdfeb75269c6c5aae33651cca39707848da Mon Sep 17 00:00:00 2001 From: liubo Date: Fri, 6 May 2011 10:36:09 +0800 Subject: Btrfs: do not flush csum items of unchanged file data during treelog The current code relogs the entire inode every time during fsync log, and it is much better suited to small files rather than large ones. During my performance test, the fsync performace of large files sucks, and we can ascribe this to the tremendous amount of csum infos of the large ones, cause we have to flush all of these csum infos into log trees even when there are only _one_ change in the whole file data. Apparently, to optimize fsync, we need to create a filter to skip the unnecessary csum ones, that is, the corresponding file data remains unchanged before this fsync. Here I have some test results to show, I use sysbench to do "random write + fsync". === sysbench --test=fileio --num-threads=1 --file-num=2 --file-block-size=4K --file-total-size=8G --file-test-mode=rndwr --file-io-mode=sync --file-extra-flags= [prepare, run] === Sysbench args: - Number of threads: 1 - Extra file open flags: 0 - 2 files, 4Gb each - Block size 4Kb - Number of random requests for random IO: 10000 - Read/Write ratio for combined random IO test: 1.50 - Periodic FSYNC enabled, calling fsync() each 100 requests. - Calling fsync() at the end of test, Enabled. - Using synchronous I/O mode - Doing random write test Sysbench results: === Operations performed: 0 Read, 10000 Write, 200 Other = 10200 Total Read 0b Written 39.062Mb Total transferred 39.062Mb === a) without patch: (*SPEED* : 451.01Kb/sec) 112.75 Requests/sec executed b) with patch: (*SPEED* : 4.7533Mb/sec) 1216.84 Requests/sec executed PS: I've made a _sub transid_ stuff patch, but it does not perform as effectively as this patch, and I'm wanderring where the problem is and trying to improve it more. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index b4c191d6c774..0f5537e60bb4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2667,6 +2667,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, extent = btrfs_item_ptr(src, start_slot + i, struct btrfs_file_extent_item); + if (btrfs_file_extent_generation(src, extent) < trans->transid) + continue; + found_type = btrfs_file_extent_type(src, extent); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { -- cgit v1.2.2 From 74b2107543da4ed9607ec484f63c42362dc9fca6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 13 Apr 2011 12:02:53 -0400 Subject: Btrfs: make sure to use the delalloc reserve when filling delalloc In the prealloc filling code and compressed code we don't set trans->block_rsv to the delalloc block reserve properly, which is going to make us use metadata from the wrong pool, this patch fixes that. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..3b9f1643aa57 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -619,6 +619,7 @@ retry: trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_reserve_extent(trans, root, async_extent->compressed_size, async_extent->compressed_size, @@ -1060,6 +1061,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, trans = btrfs_join_transaction(root, 1); } BUG_ON(IS_ERR(trans)); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; cow_start = (u64)-1; cur_offset = start; -- cgit v1.2.2 From 7a7eaa40a39bde4eefc91aadeb1ce3dc4e6a1252 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 13 Apr 2011 12:54:33 -0400 Subject: Btrfs: take away the num_items argument from btrfs_join_transaction I keep forgetting that btrfs_join_transaction() just ignores the num_items argument, which leads me to sending pointless patches and looking stupid :). So just kill the num_items argument from btrfs_join_transaction and btrfs_start_ioctl_transaction, since neither of them use it. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/extent-tree.c | 12 ++++++------ fs/btrfs/inode.c | 34 +++++++++++++++++----------------- fs/btrfs/ioctl.c | 4 ++-- fs/btrfs/relocation.c | 12 ++++++------ fs/btrfs/transaction.c | 13 +++++-------- fs/btrfs/transaction.h | 9 +++------ 7 files changed, 42 insertions(+), 48 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..9d6c9e332ca3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1568,7 +1568,7 @@ static int transaction_kthread(void *arg) transid = cur->transid; spin_unlock(&root->fs_info->new_trans_lock); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); @@ -2495,13 +2495,13 @@ int btrfs_commit_super(struct btrfs_root *root) down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); btrfs_commit_transaction(trans, root); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ee6bd55e16c..941b28e78931 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3174,7 +3174,7 @@ again: spin_unlock(&data_sinfo->lock); alloc: alloc_target = btrfs_get_alloc_profile(root, 1); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); @@ -3202,7 +3202,7 @@ alloc: commit_trans: if (!committed && !root->fs_info->open_ioctl_trans) { committed = 1; - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); @@ -3589,7 +3589,7 @@ again: goto out; ret = -ENOSPC; - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) goto out; ret = btrfs_commit_transaction(trans, root); @@ -3816,7 +3816,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, if (trans) return -EAGAIN; - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); return 0; @@ -7649,7 +7649,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) BUG_ON(reloc_root->commit_root != NULL); while (1) { - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); mutex_lock(&root->fs_info->drop_mutex); @@ -8176,7 +8176,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, BUG_ON(cache->ro); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); alloc_flags = update_block_group_flags(root, cache->flags); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3b9f1643aa57..e47bdf0fb75a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -420,7 +420,7 @@ again: } } if (start == 0) { - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -617,7 +617,7 @@ retry: async_extent->start + async_extent->ram_size - 1, GFP_NOFS); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_reserve_extent(trans, root, @@ -779,7 +779,7 @@ static noinline int cow_file_range(struct inode *inode, int ret = 0; BUG_ON(root == root->fs_info->tree_root); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1056,9 +1056,9 @@ static noinline int run_delalloc_nocow(struct inode *inode, BUG_ON(!path); if (root == root->fs_info->tree_root) { nolock = true; - trans = btrfs_join_transaction_nolock(root, 1); + trans = btrfs_join_transaction_nolock(root); } else { - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); } BUG_ON(IS_ERR(trans)); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1718,9 +1718,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); if (!ret) { if (nolock) - trans = btrfs_join_transaction_nolock(root, 1); + trans = btrfs_join_transaction_nolock(root); else - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1735,9 +1735,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) 0, &cached_state, GFP_NOFS); if (nolock) - trans = btrfs_join_transaction_nolock(root, 1); + trans = btrfs_join_transaction_nolock(root); else - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -2415,7 +2415,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) (u64)-1); if (root->orphan_block_rsv || root->orphan_item_inserted) { - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (!IS_ERR(trans)) btrfs_end_transaction(trans, root); } @@ -4378,9 +4378,9 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) if (wbc->sync_mode == WB_SYNC_ALL) { if (nolock) - trans = btrfs_join_transaction_nolock(root, 1); + trans = btrfs_join_transaction_nolock(root); else - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); @@ -4407,7 +4407,7 @@ void btrfs_dirty_inode(struct inode *inode) if (BTRFS_I(inode)->dummy_inode) return; - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); @@ -5226,7 +5226,7 @@ again: free_extent_map(em); em = NULL; btrfs_release_path(root, path); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return ERR_CAST(trans); goto again; @@ -5470,7 +5470,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + len - 1, 0); } - trans = btrfs_join_transaction(root, 0); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return ERR_CAST(trans); @@ -5703,7 +5703,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * to make sure the current transaction stays open * while we look for nocow cross refs */ - trans = btrfs_join_transaction(root, 0); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) goto must_cow; @@ -5841,7 +5841,7 @@ again: BUG_ON(!ordered); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { err = -ENOMEM; goto out; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2616f7ed4799..908c3d4b48c6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -242,7 +242,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); ret = btrfs_update_inode(trans, root, inode); @@ -2182,7 +2182,7 @@ static long btrfs_ioctl_trans_start(struct file *file) mutex_unlock(&root->fs_info->trans_mutex); ret = -ENOMEM; - trans = btrfs_start_ioctl_transaction(root, 0); + trans = btrfs_start_ioctl_transaction(root); if (IS_ERR(trans)) goto out_drop; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 199a80134312..8bb256667f2d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2149,7 +2149,7 @@ again: err = ret; } - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) { if (!err) btrfs_block_rsv_release(rc->extent_root, @@ -3233,7 +3233,7 @@ truncate: goto out; } - trans = btrfs_join_transaction(root, 0); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { btrfs_free_path(path); ret = PTR_ERR(trans); @@ -3642,7 +3642,7 @@ int prepare_to_relocate(struct reloc_control *rc) rc->create_reloc_tree = 1; set_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); BUG_ON(IS_ERR(trans)); btrfs_commit_transaction(trans, rc->extent_root); return 0; @@ -3831,7 +3831,7 @@ restart: btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); /* get rid of pinned extents */ - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) err = PTR_ERR(trans); else @@ -4156,7 +4156,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) set_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) { unset_reloc_control(rc); err = PTR_ERR(trans); @@ -4190,7 +4190,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) unset_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) err = PTR_ERR(trans); else diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..70bfb26df967 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -257,22 +257,19 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, { return start_transaction(root, num_items, TRANS_START); } -struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, - int num_blocks) +struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN); } -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, - int num_blocks) +struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN_NOLOCK); } -struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, - int num_blocks) +struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) { - return start_transaction(r, 0, TRANS_USERSPACE); + return start_transaction(root, 0, TRANS_USERSPACE); } /* wait for a transaction commit to be fully complete */ @@ -1171,7 +1168,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; - ac->newtrans = btrfs_join_transaction(root, 0); + ac->newtrans = btrfs_join_transaction(root); if (IS_ERR(ac->newtrans)) { int err = PTR_ERR(ac->newtrans); kfree(ac); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e441acc6c584..1f573f09dba2 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -92,12 +92,9 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, struct btrfs_root *root); struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, int num_items); -struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, - int num_blocks); -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, - int num_blocks); -struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, - int num_blocks); +struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root); int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); -- cgit v1.2.2 From 2a1eb4614d984d5cd4c928784e9afcf5c07f93be Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 13 Apr 2011 15:15:59 -0400 Subject: Btrfs: if we've already started a trans handle, use that one We currently track trans handles in current->journal_info, but we don't actually use it. This patch fixes it. This will cover the case where we have multiple people starting transactions down the call chain. This keeps us from having to allocate a new handle and all of that, we just increase the use count of the current handle, save the old block_rsv, and return. I tested this with xfstests and it worked out fine. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/transaction.c | 17 +++++++++++++++++ fs/btrfs/transaction.h | 2 ++ 2 files changed, 19 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 70bfb26df967..46f40564c168 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -184,6 +184,15 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) return ERR_PTR(-EROFS); + + if (current->journal_info) { + WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); + h = current->journal_info; + h->use_count++; + h->orig_rsv = h->block_rsv; + h->block_rsv = NULL; + goto got_it; + } again: h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); if (!h) @@ -213,7 +222,9 @@ again: h->block_group = 0; h->bytes_reserved = 0; h->delayed_ref_updates = 0; + h->use_count = 1; h->block_rsv = NULL; + h->orig_rsv = NULL; smp_mb(); if (cur_trans->blocked && may_wait_transaction(root, type)) { @@ -241,6 +252,7 @@ again: } } +got_it: if (type != TRANS_JOIN_NOLOCK) mutex_lock(&root->fs_info->trans_mutex); record_root_in_trans(h, root); @@ -428,6 +440,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_fs_info *info = root->fs_info; int count = 0; + if (--trans->use_count) { + trans->block_rsv = trans->orig_rsv; + return 0; + } + while (count < 4) { unsigned long cur = trans->delayed_ref_updates; trans->delayed_ref_updates = 0; diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 1f573f09dba2..154314f80f8d 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -47,11 +47,13 @@ struct btrfs_trans_handle { u64 transid; u64 block_group; u64 bytes_reserved; + unsigned long use_count; unsigned long blocks_reserved; unsigned long blocks_used; unsigned long delayed_ref_updates; struct btrfs_transaction *transaction; struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *orig_rsv; }; struct btrfs_pending_snapshot { -- cgit v1.2.2 From a4abeea41adfa3c143c289045f4625dfaeba2212 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 11 Apr 2011 17:25:13 -0400 Subject: Btrfs: kill trans_mutex We use trans_mutex for lots of things, here's a basic list 1) To serialize trans_handles joining the currently running transaction 2) To make sure that no new trans handles are started while we are committing 3) To protect the dead_roots list and the transaction lists Really the serializing trans_handles joining is not too hard, and can really get bogged down in acquiring a reference to the transaction. So replace the trans_mutex with a trans_lock spinlock and use it to do the following 1) Protect fs_info->running_transaction. All trans handles have to do is check this, and then take a reference of the transaction and keep on going. 2) Protect the fs_info->trans_list. This doesn't get used too much, basically it just holds the current transactions, which will usually just be the currently committing transaction and the currently running transaction at most. 3) Protect the dead roots list. This is only ever processed by splicing the list so this is relatively simple. 4) Protect the fs_info->reloc_ctl stuff. This is very lightweight and was using the trans_mutex before, so this is a pretty straightforward change. 5) Protect fs_info->no_trans_join. Because we don't hold the trans_lock over the entirety of the commit we need to have a way to block new people from creating a new transaction while we're doing our work. So we set no_trans_join and in join_transaction we test to see if that is set, and if it is we do a wait_on_commit. 6) Make the transaction use count atomic so we don't need to take locks to modify it when we're dropping references. 7) Add a commit_lock to the transaction to make sure multiple people trying to commit the same transaction don't race and commit at the same time. 8) Make open_ioctl_trans an atomic so we don't have to take any locks for ioctl trans. I have tested this with xfstests, but obviously it is a pretty hairy change so lots of testing is greatly appreciated. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 6 +- fs/btrfs/disk-io.c | 30 +++--- fs/btrfs/extent-tree.c | 3 +- fs/btrfs/file.c | 4 +- fs/btrfs/ioctl.c | 12 +-- fs/btrfs/relocation.c | 16 +-- fs/btrfs/transaction.c | 271 ++++++++++++++++++++++++++----------------------- fs/btrfs/transaction.h | 4 +- 8 files changed, 177 insertions(+), 169 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..522a39b0033d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -919,7 +919,6 @@ struct btrfs_fs_info { * is required instead of the faster short fsync log commits */ u64 last_trans_log_full_commit; - u64 open_ioctl_trans; unsigned long mount_opt:20; unsigned long compress_type:4; u64 max_inline; @@ -936,7 +935,6 @@ struct btrfs_fs_info { struct super_block *sb; struct inode *btree_inode; struct backing_dev_info bdi; - struct mutex trans_mutex; struct mutex tree_log_mutex; struct mutex transaction_kthread_mutex; struct mutex cleaner_mutex; @@ -957,6 +955,7 @@ struct btrfs_fs_info { struct rw_semaphore subvol_sem; struct srcu_struct subvol_srcu; + spinlock_t trans_lock; struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; @@ -969,6 +968,7 @@ struct btrfs_fs_info { atomic_t async_submit_draining; atomic_t nr_async_bios; atomic_t async_delalloc_pages; + atomic_t open_ioctl_trans; /* * this is used by the balancing code to wait for all the pending @@ -1032,6 +1032,7 @@ struct btrfs_fs_info { int closing; int log_root_recovering; int enospc_unlink; + int trans_no_join; u64 total_pinned; @@ -1053,7 +1054,6 @@ struct btrfs_fs_info { struct reloc_control *reloc_ctl; spinlock_t delalloc_lock; - spinlock_t new_trans_lock; u64 delalloc_bytes; /* data_alloc_cluster is only used in ssd mode */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9d6c9e332ca3..93ef254ec432 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1551,22 +1551,22 @@ static int transaction_kthread(void *arg) vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); mutex_lock(&root->fs_info->transaction_kthread_mutex); - spin_lock(&root->fs_info->new_trans_lock); + spin_lock(&root->fs_info->trans_lock); cur = root->fs_info->running_transaction; if (!cur) { - spin_unlock(&root->fs_info->new_trans_lock); + spin_unlock(&root->fs_info->trans_lock); goto sleep; } now = get_seconds(); if (!cur->blocked && (now < cur->start_time || now - cur->start_time < 30)) { - spin_unlock(&root->fs_info->new_trans_lock); + spin_unlock(&root->fs_info->trans_lock); delay = HZ * 5; goto sleep; } transid = cur->transid; - spin_unlock(&root->fs_info->new_trans_lock); + spin_unlock(&root->fs_info->trans_lock); trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); @@ -1658,7 +1658,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_operations); INIT_LIST_HEAD(&fs_info->caching_block_groups); spin_lock_init(&fs_info->delalloc_lock); - spin_lock_init(&fs_info->new_trans_lock); + spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->ref_cache_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); @@ -1687,6 +1687,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->sb = sb; fs_info->max_inline = 8192 * 1024; fs_info->metadata_ratio = 0; + fs_info->trans_no_join = 0; fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); @@ -1735,7 +1736,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->do_barriers = 1; - mutex_init(&fs_info->trans_mutex); mutex_init(&fs_info->ordered_operations_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->chunk_mutex); @@ -3006,10 +3006,13 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) WARN_ON(1); - mutex_lock(&root->fs_info->trans_mutex); mutex_lock(&root->fs_info->transaction_kthread_mutex); + spin_lock(&root->fs_info->trans_lock); list_splice_init(&root->fs_info->trans_list, &list); + root->fs_info->trans_no_join = 1; + spin_unlock(&root->fs_info->trans_lock); + while (!list_empty(&list)) { t = list_entry(list.next, struct btrfs_transaction, list); if (!t) @@ -3034,23 +3037,18 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) t->blocked = 0; if (waitqueue_active(&root->fs_info->transaction_wait)) wake_up(&root->fs_info->transaction_wait); - mutex_unlock(&root->fs_info->trans_mutex); - mutex_lock(&root->fs_info->trans_mutex); t->commit_done = 1; if (waitqueue_active(&t->commit_wait)) wake_up(&t->commit_wait); - mutex_unlock(&root->fs_info->trans_mutex); - - mutex_lock(&root->fs_info->trans_mutex); btrfs_destroy_pending_snapshots(t); btrfs_destroy_delalloc_inodes(root); - spin_lock(&root->fs_info->new_trans_lock); + spin_lock(&root->fs_info->trans_lock); root->fs_info->running_transaction = NULL; - spin_unlock(&root->fs_info->new_trans_lock); + spin_unlock(&root->fs_info->trans_lock); btrfs_destroy_marked_extents(root, &t->dirty_pages, EXTENT_DIRTY); @@ -3064,8 +3062,10 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) kmem_cache_free(btrfs_transaction_cachep, t); } + spin_lock(&root->fs_info->trans_lock); + root->fs_info->trans_no_join = 0; + spin_unlock(&root->fs_info->trans_lock); mutex_unlock(&root->fs_info->transaction_kthread_mutex); - mutex_unlock(&root->fs_info->trans_mutex); return 0; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 941b28e78931..ca599654ce19 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3200,7 +3200,8 @@ alloc: /* commit the current transaction and try again */ commit_trans: - if (!committed && !root->fs_info->open_ioctl_trans) { + if (!committed && + !atomic_read(&root->fs_info->open_ioctl_trans)) { committed = 1; trans = btrfs_join_transaction(root); if (IS_ERR(trans)) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 75899a01dded..cd5e82e500cf 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1222,14 +1222,12 @@ int btrfs_sync_file(struct file *file, int datasync) * the current transaction, we can bail out now without any * syncing */ - mutex_lock(&root->fs_info->trans_mutex); + smp_mb(); if (BTRFS_I(inode)->last_trans <= root->fs_info->last_trans_committed) { BTRFS_I(inode)->last_trans = 0; - mutex_unlock(&root->fs_info->trans_mutex); goto out; } - mutex_unlock(&root->fs_info->trans_mutex); /* * ok we haven't committed the transaction yet, lets do a commit diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 908c3d4b48c6..a578620e06a8 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2177,9 +2177,7 @@ static long btrfs_ioctl_trans_start(struct file *file) if (ret) goto out; - mutex_lock(&root->fs_info->trans_mutex); - root->fs_info->open_ioctl_trans++; - mutex_unlock(&root->fs_info->trans_mutex); + atomic_inc(&root->fs_info->open_ioctl_trans); ret = -ENOMEM; trans = btrfs_start_ioctl_transaction(root); @@ -2190,9 +2188,7 @@ static long btrfs_ioctl_trans_start(struct file *file) return 0; out_drop: - mutex_lock(&root->fs_info->trans_mutex); - root->fs_info->open_ioctl_trans--; - mutex_unlock(&root->fs_info->trans_mutex); + atomic_dec(&root->fs_info->open_ioctl_trans); mnt_drop_write(file->f_path.mnt); out: return ret; @@ -2426,9 +2422,7 @@ long btrfs_ioctl_trans_end(struct file *file) btrfs_end_transaction(trans, root); - mutex_lock(&root->fs_info->trans_mutex); - root->fs_info->open_ioctl_trans--; - mutex_unlock(&root->fs_info->trans_mutex); + atomic_dec(&root->fs_info->open_ioctl_trans); mnt_drop_write(file->f_path.mnt); return 0; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 8bb256667f2d..09c30d37d43e 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2136,10 +2136,10 @@ int prepare_to_merge(struct reloc_control *rc, int err) u64 num_bytes = 0; int ret; - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&root->fs_info->trans_lock); rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; rc->merging_rsv_size += rc->nodes_relocated * 2; - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->trans_lock); again: if (!err) { num_bytes = rc->merging_rsv_size; @@ -2208,9 +2208,9 @@ int merge_reloc_roots(struct reloc_control *rc) int ret; again: root = rc->extent_root; - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&root->fs_info->trans_lock); list_splice_init(&rc->reloc_roots, &reloc_roots); - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->trans_lock); while (!list_empty(&reloc_roots)) { found = 1; @@ -3583,17 +3583,17 @@ next: static void set_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - mutex_lock(&fs_info->trans_mutex); + spin_lock(&fs_info->trans_lock); fs_info->reloc_ctl = rc; - mutex_unlock(&fs_info->trans_mutex); + spin_unlock(&fs_info->trans_lock); } static void unset_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - mutex_lock(&fs_info->trans_mutex); + spin_lock(&fs_info->trans_lock); fs_info->reloc_ctl = NULL; - mutex_unlock(&fs_info->trans_mutex); + spin_unlock(&fs_info->trans_lock); } static int check_extent_flags(u64 flags) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 46f40564c168..43816f8b23e7 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -34,6 +34,7 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) { WARN_ON(atomic_read(&transaction->use_count) == 0); if (atomic_dec_and_test(&transaction->use_count)) { + BUG_ON(!list_empty(&transaction->list)); memset(transaction, 0, sizeof(*transaction)); kmem_cache_free(btrfs_transaction_cachep, transaction); } @@ -48,47 +49,73 @@ static noinline void switch_commit_root(struct btrfs_root *root) /* * either allocate a new transaction or hop into the existing one */ -static noinline int join_transaction(struct btrfs_root *root) +static noinline int join_transaction(struct btrfs_root *root, int nofail) { struct btrfs_transaction *cur_trans; + + spin_lock(&root->fs_info->trans_lock); + if (root->fs_info->trans_no_join) { + if (!nofail) { + spin_unlock(&root->fs_info->trans_lock); + return -EBUSY; + } + } + cur_trans = root->fs_info->running_transaction; - if (!cur_trans) { - cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, - GFP_NOFS); - if (!cur_trans) - return -ENOMEM; - root->fs_info->generation++; - atomic_set(&cur_trans->num_writers, 1); - cur_trans->num_joined = 0; - cur_trans->transid = root->fs_info->generation; - init_waitqueue_head(&cur_trans->writer_wait); - init_waitqueue_head(&cur_trans->commit_wait); - cur_trans->in_commit = 0; - cur_trans->blocked = 0; - atomic_set(&cur_trans->use_count, 1); - cur_trans->commit_done = 0; - cur_trans->start_time = get_seconds(); - - cur_trans->delayed_refs.root = RB_ROOT; - cur_trans->delayed_refs.num_entries = 0; - cur_trans->delayed_refs.num_heads_ready = 0; - cur_trans->delayed_refs.num_heads = 0; - cur_trans->delayed_refs.flushing = 0; - cur_trans->delayed_refs.run_delayed_start = 0; - spin_lock_init(&cur_trans->delayed_refs.lock); - - INIT_LIST_HEAD(&cur_trans->pending_snapshots); - list_add_tail(&cur_trans->list, &root->fs_info->trans_list); - extent_io_tree_init(&cur_trans->dirty_pages, - root->fs_info->btree_inode->i_mapping, - GFP_NOFS); - spin_lock(&root->fs_info->new_trans_lock); - root->fs_info->running_transaction = cur_trans; - spin_unlock(&root->fs_info->new_trans_lock); - } else { + if (cur_trans) { + atomic_inc(&cur_trans->use_count); + atomic_inc(&cur_trans->num_writers); + cur_trans->num_joined++; + spin_unlock(&root->fs_info->trans_lock); + return 0; + } + spin_unlock(&root->fs_info->trans_lock); + + cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); + if (!cur_trans) + return -ENOMEM; + spin_lock(&root->fs_info->trans_lock); + if (root->fs_info->running_transaction) { + kmem_cache_free(btrfs_transaction_cachep, cur_trans); + cur_trans = root->fs_info->running_transaction; + atomic_inc(&cur_trans->use_count); atomic_inc(&cur_trans->num_writers); cur_trans->num_joined++; + spin_unlock(&root->fs_info->trans_lock); + return 0; } + atomic_set(&cur_trans->num_writers, 1); + cur_trans->num_joined = 0; + init_waitqueue_head(&cur_trans->writer_wait); + init_waitqueue_head(&cur_trans->commit_wait); + cur_trans->in_commit = 0; + cur_trans->blocked = 0; + /* + * One for this trans handle, one so it will live on until we + * commit the transaction. + */ + atomic_set(&cur_trans->use_count, 2); + cur_trans->commit_done = 0; + cur_trans->start_time = get_seconds(); + + cur_trans->delayed_refs.root = RB_ROOT; + cur_trans->delayed_refs.num_entries = 0; + cur_trans->delayed_refs.num_heads_ready = 0; + cur_trans->delayed_refs.num_heads = 0; + cur_trans->delayed_refs.flushing = 0; + cur_trans->delayed_refs.run_delayed_start = 0; + spin_lock_init(&cur_trans->commit_lock); + spin_lock_init(&cur_trans->delayed_refs.lock); + + INIT_LIST_HEAD(&cur_trans->pending_snapshots); + list_add_tail(&cur_trans->list, &root->fs_info->trans_list); + extent_io_tree_init(&cur_trans->dirty_pages, + root->fs_info->btree_inode->i_mapping, + GFP_NOFS); + root->fs_info->generation++; + cur_trans->transid = root->fs_info->generation; + root->fs_info->running_transaction = cur_trans; + spin_unlock(&root->fs_info->trans_lock); return 0; } @@ -99,39 +126,28 @@ static noinline int join_transaction(struct btrfs_root *root) * to make sure the old root from before we joined the transaction is deleted * when the transaction commits */ -static noinline int record_root_in_trans(struct btrfs_trans_handle *trans, - struct btrfs_root *root) +int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, + struct btrfs_root *root) { if (root->ref_cows && root->last_trans < trans->transid) { WARN_ON(root == root->fs_info->extent_root); WARN_ON(root->commit_root != root->node); + spin_lock(&root->fs_info->fs_roots_radix_lock); + if (root->last_trans == trans->transid) { + spin_unlock(&root->fs_info->fs_roots_radix_lock); + return 0; + } + root->last_trans = trans->transid; radix_tree_tag_set(&root->fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); - root->last_trans = trans->transid; + spin_unlock(&root->fs_info->fs_roots_radix_lock); btrfs_init_reloc_root(trans, root); } return 0; } -int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - if (!root->ref_cows) - return 0; - - mutex_lock(&root->fs_info->trans_mutex); - if (root->last_trans == trans->transid) { - mutex_unlock(&root->fs_info->trans_mutex); - return 0; - } - - record_root_in_trans(trans, root); - mutex_unlock(&root->fs_info->trans_mutex); - return 0; -} - /* wait for commit against the current transaction to become unblocked * when this is done, it is safe to start a new transaction, but the current * transaction might not be fully on disk. @@ -140,21 +156,23 @@ static void wait_current_trans(struct btrfs_root *root) { struct btrfs_transaction *cur_trans; + spin_lock(&root->fs_info->trans_lock); cur_trans = root->fs_info->running_transaction; if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); atomic_inc(&cur_trans->use_count); + spin_unlock(&root->fs_info->trans_lock); while (1) { prepare_to_wait(&root->fs_info->transaction_wait, &wait, TASK_UNINTERRUPTIBLE); if (!cur_trans->blocked) break; - mutex_unlock(&root->fs_info->trans_mutex); schedule(); - mutex_lock(&root->fs_info->trans_mutex); } finish_wait(&root->fs_info->transaction_wait, &wait); put_transaction(cur_trans); + } else { + spin_unlock(&root->fs_info->trans_lock); } } @@ -167,10 +185,16 @@ enum btrfs_trans_type { static int may_wait_transaction(struct btrfs_root *root, int type) { - if (!root->fs_info->log_root_recovering && - ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || - type == TRANS_USERSPACE)) + if (root->fs_info->log_root_recovering) + return 0; + + if (type == TRANS_USERSPACE) + return 1; + + if (type == TRANS_START && + !atomic_read(&root->fs_info->open_ioctl_trans)) return 1; + return 0; } @@ -198,23 +222,21 @@ again: if (!h) return ERR_PTR(-ENOMEM); - if (type != TRANS_JOIN_NOLOCK) - mutex_lock(&root->fs_info->trans_mutex); if (may_wait_transaction(root, type)) wait_current_trans(root); - ret = join_transaction(root); + do { + ret = join_transaction(root, type == TRANS_JOIN_NOLOCK); + if (ret == -EBUSY) + wait_current_trans(root); + } while (ret == -EBUSY); + if (ret < 0) { kmem_cache_free(btrfs_trans_handle_cachep, h); - if (type != TRANS_JOIN_NOLOCK) - mutex_unlock(&root->fs_info->trans_mutex); return ERR_PTR(ret); } cur_trans = root->fs_info->running_transaction; - atomic_inc(&cur_trans->use_count); - if (type != TRANS_JOIN_NOLOCK) - mutex_unlock(&root->fs_info->trans_mutex); h->transid = cur_trans->transid; h->transaction = cur_trans; @@ -253,11 +275,7 @@ again: } got_it: - if (type != TRANS_JOIN_NOLOCK) - mutex_lock(&root->fs_info->trans_mutex); - record_root_in_trans(h, root); - if (type != TRANS_JOIN_NOLOCK) - mutex_unlock(&root->fs_info->trans_mutex); + btrfs_record_root_in_trans(h, root); if (!current->journal_info && type != TRANS_USERSPACE) current->journal_info = h; @@ -289,17 +307,13 @@ static noinline int wait_for_commit(struct btrfs_root *root, struct btrfs_transaction *commit) { DEFINE_WAIT(wait); - mutex_lock(&root->fs_info->trans_mutex); while (!commit->commit_done) { prepare_to_wait(&commit->commit_wait, &wait, TASK_UNINTERRUPTIBLE); if (commit->commit_done) break; - mutex_unlock(&root->fs_info->trans_mutex); schedule(); - mutex_lock(&root->fs_info->trans_mutex); } - mutex_unlock(&root->fs_info->trans_mutex); finish_wait(&commit->commit_wait, &wait); return 0; } @@ -309,50 +323,49 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) struct btrfs_transaction *cur_trans = NULL, *t; int ret; - mutex_lock(&root->fs_info->trans_mutex); - ret = 0; if (transid) { if (transid <= root->fs_info->last_trans_committed) - goto out_unlock; + goto out; /* find specified transaction */ + spin_lock(&root->fs_info->trans_lock); list_for_each_entry(t, &root->fs_info->trans_list, list) { if (t->transid == transid) { cur_trans = t; + atomic_inc(&cur_trans->use_count); break; } if (t->transid > transid) break; } + spin_unlock(&root->fs_info->trans_lock); ret = -EINVAL; if (!cur_trans) - goto out_unlock; /* bad transid */ + goto out; /* bad transid */ } else { /* find newest transaction that is committing | committed */ + spin_lock(&root->fs_info->trans_lock); list_for_each_entry_reverse(t, &root->fs_info->trans_list, list) { if (t->in_commit) { if (t->commit_done) - goto out_unlock; + goto out; cur_trans = t; + atomic_inc(&cur_trans->use_count); break; } } + spin_unlock(&root->fs_info->trans_lock); if (!cur_trans) - goto out_unlock; /* nothing committing|committed */ + goto out; /* nothing committing|committed */ } - atomic_inc(&cur_trans->use_count); - mutex_unlock(&root->fs_info->trans_mutex); - wait_for_commit(root, cur_trans); - mutex_lock(&root->fs_info->trans_mutex); put_transaction(cur_trans); ret = 0; -out_unlock: - mutex_unlock(&root->fs_info->trans_mutex); +out: return ret; } @@ -401,10 +414,8 @@ harder: void btrfs_throttle(struct btrfs_root *root) { - mutex_lock(&root->fs_info->trans_mutex); - if (!root->fs_info->open_ioctl_trans) + if (!atomic_read(&root->fs_info->open_ioctl_trans)) wait_current_trans(root); - mutex_unlock(&root->fs_info->trans_mutex); } static int should_end_transaction(struct btrfs_trans_handle *trans, @@ -422,6 +433,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_transaction *cur_trans = trans->transaction; int updates; + smp_mb(); if (cur_trans->blocked || cur_trans->delayed_refs.flushing) return 1; @@ -467,9 +479,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_metadata(trans, root); - if (lock && !root->fs_info->open_ioctl_trans && - should_end_transaction(trans, root)) + if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && + should_end_transaction(trans, root)) { trans->transaction->blocked = 1; + smp_wmb(); + } if (lock && cur_trans->blocked && !cur_trans->in_commit) { if (throttle) @@ -739,9 +753,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, */ int btrfs_add_dead_root(struct btrfs_root *root) { - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&root->fs_info->trans_lock); list_add(&root->root_list, &root->fs_info->dead_roots); - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->trans_lock); return 0; } @@ -757,6 +771,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, int ret; int err = 0; + spin_lock(&fs_info->fs_roots_radix_lock); while (1) { ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, (void **)gang, 0, @@ -769,6 +784,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); + spin_unlock(&fs_info->fs_roots_radix_lock); btrfs_free_log(trans, root); btrfs_update_reloc_root(trans, root); @@ -783,10 +799,12 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, err = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); + spin_lock(&fs_info->fs_roots_radix_lock); if (err) break; } } + spin_unlock(&fs_info->fs_roots_radix_lock); return err; } @@ -972,7 +990,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, parent = dget_parent(dentry); parent_inode = parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; - record_root_in_trans(trans, parent_root); + btrfs_record_root_in_trans(trans, parent_root); /* * insert the directory item @@ -990,7 +1008,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); - record_root_in_trans(trans, root); + btrfs_record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); @@ -1080,20 +1098,20 @@ static void update_super_roots(struct btrfs_root *root) int btrfs_transaction_in_commit(struct btrfs_fs_info *info) { int ret = 0; - spin_lock(&info->new_trans_lock); + spin_lock(&info->trans_lock); if (info->running_transaction) ret = info->running_transaction->in_commit; - spin_unlock(&info->new_trans_lock); + spin_unlock(&info->trans_lock); return ret; } int btrfs_transaction_blocked(struct btrfs_fs_info *info) { int ret = 0; - spin_lock(&info->new_trans_lock); + spin_lock(&info->trans_lock); if (info->running_transaction) ret = info->running_transaction->blocked; - spin_unlock(&info->new_trans_lock); + spin_unlock(&info->trans_lock); return ret; } @@ -1117,9 +1135,7 @@ static void wait_current_trans_commit_start(struct btrfs_root *root, &wait); break; } - mutex_unlock(&root->fs_info->trans_mutex); schedule(); - mutex_lock(&root->fs_info->trans_mutex); finish_wait(&root->fs_info->transaction_blocked_wait, &wait); } } @@ -1145,9 +1161,7 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, &wait); break; } - mutex_unlock(&root->fs_info->trans_mutex); schedule(); - mutex_lock(&root->fs_info->trans_mutex); finish_wait(&root->fs_info->transaction_wait, &wait); } @@ -1193,22 +1207,18 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, } /* take transaction reference */ - mutex_lock(&root->fs_info->trans_mutex); cur_trans = trans->transaction; atomic_inc(&cur_trans->use_count); - mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); schedule_delayed_work(&ac->work, 0); /* wait for transaction to start and unblock */ - mutex_lock(&root->fs_info->trans_mutex); if (wait_for_unblock) wait_current_trans_commit_start_and_unblock(root, cur_trans); else wait_current_trans_commit_start(root, cur_trans); put_transaction(cur_trans); - mutex_unlock(&root->fs_info->trans_mutex); return 0; } @@ -1252,38 +1262,41 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = btrfs_run_delayed_refs(trans, root, 0); BUG_ON(ret); - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&cur_trans->commit_lock); if (cur_trans->in_commit) { + spin_unlock(&cur_trans->commit_lock); atomic_inc(&cur_trans->use_count); - mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); ret = wait_for_commit(root, cur_trans); BUG_ON(ret); - mutex_lock(&root->fs_info->trans_mutex); put_transaction(cur_trans); - mutex_unlock(&root->fs_info->trans_mutex); return 0; } trans->transaction->in_commit = 1; trans->transaction->blocked = 1; + spin_unlock(&cur_trans->commit_lock); wake_up(&root->fs_info->transaction_blocked_wait); + spin_lock(&root->fs_info->trans_lock); if (cur_trans->list.prev != &root->fs_info->trans_list) { prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); if (!prev_trans->commit_done) { atomic_inc(&prev_trans->use_count); - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->trans_lock); wait_for_commit(root, prev_trans); - mutex_lock(&root->fs_info->trans_mutex); put_transaction(prev_trans); + } else { + spin_unlock(&root->fs_info->trans_lock); } + } else { + spin_unlock(&root->fs_info->trans_lock); } if (now < cur_trans->start_time || now - cur_trans->start_time < 1) @@ -1291,12 +1304,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, do { int snap_pending = 0; + joined = cur_trans->num_joined; if (!list_empty(&trans->transaction->pending_snapshots)) snap_pending = 1; WARN_ON(cur_trans != trans->transaction); - mutex_unlock(&root->fs_info->trans_mutex); if (flush_on_commit || snap_pending) { btrfs_start_delalloc_inodes(root, 1); @@ -1316,14 +1329,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, prepare_to_wait(&cur_trans->writer_wait, &wait, TASK_UNINTERRUPTIBLE); - smp_mb(); if (atomic_read(&cur_trans->num_writers) > 1) schedule_timeout(MAX_SCHEDULE_TIMEOUT); else if (should_grow) schedule_timeout(1); - mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); + spin_lock(&root->fs_info->trans_lock); + root->fs_info->trans_no_join = 1; + spin_unlock(&root->fs_info->trans_lock); } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); @@ -1364,9 +1378,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_prepare_extent_commit(trans, root); cur_trans = root->fs_info->running_transaction; - spin_lock(&root->fs_info->new_trans_lock); - root->fs_info->running_transaction = NULL; - spin_unlock(&root->fs_info->new_trans_lock); btrfs_set_root_node(&root->fs_info->tree_root->root_item, root->fs_info->tree_root->node); @@ -1387,10 +1398,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, sizeof(root->fs_info->super_copy)); trans->transaction->blocked = 0; + spin_lock(&root->fs_info->trans_lock); + root->fs_info->running_transaction = NULL; + root->fs_info->trans_no_join = 0; + spin_unlock(&root->fs_info->trans_lock); wake_up(&root->fs_info->transaction_wait); - mutex_unlock(&root->fs_info->trans_mutex); ret = btrfs_write_and_wait_transaction(trans, root); BUG_ON(ret); write_ctree_super(trans, root, 0); @@ -1403,22 +1417,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_finish_extent_commit(trans, root); - mutex_lock(&root->fs_info->trans_mutex); - cur_trans->commit_done = 1; root->fs_info->last_trans_committed = cur_trans->transid; wake_up(&cur_trans->commit_wait); + spin_lock(&root->fs_info->trans_lock); list_del_init(&cur_trans->list); + spin_unlock(&root->fs_info->trans_lock); + put_transaction(cur_trans); put_transaction(cur_trans); trace_btrfs_transaction_commit(root); - mutex_unlock(&root->fs_info->trans_mutex); - if (current->journal_info == trans) current->journal_info = NULL; @@ -1438,9 +1451,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) LIST_HEAD(list); struct btrfs_fs_info *fs_info = root->fs_info; - mutex_lock(&fs_info->trans_mutex); + spin_lock(&fs_info->trans_lock); list_splice_init(&fs_info->dead_roots, &list); - mutex_unlock(&fs_info->trans_mutex); + spin_unlock(&fs_info->trans_lock); while (!list_empty(&list)) { root = list_entry(list.next, struct btrfs_root, root_list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 154314f80f8d..11c6efcd4ed2 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -28,10 +28,12 @@ struct btrfs_transaction { * transaction can end */ atomic_t num_writers; + atomic_t use_count; unsigned long num_joined; + + spinlock_t commit_lock; int in_commit; - atomic_t use_count; int commit_done; int blocked; struct list_head list; -- cgit v1.2.2 From fcb80c2affd63237cff5b34cba5756be7c976a5a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 3 May 2011 10:40:22 -0400 Subject: Btrfs: fix how we do space reservation for truncate The ceph guys keep running into problems where we have space reserved in our orphan block rsv when freeing it up. This is because they tend to do snapshots alot, so their truncates tend to use a bunch of space, so when we go to do things like update the inode we have to steal reservation space in order to make the reservation happen. This happens because truncate can use as much space as it freaking feels like, but we still have to hold space for removing the orphan item and updating the inode, which will definitely always happen. So in order to fix this we need to split all of the reservation stuf up. So with this patch we have 1) The orphan block reserve which only holds the space for deleting our orphan item when everything is over. 2) The truncate block reserve which gets allocated and used specifically for the space that the truncate will use on a per truncate basis. 3) The transaction will always have 1 item's worth of data reserved so we can update the inode normally. Hopefully this will make the ceph problem go away. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 3 ++ fs/btrfs/extent-tree.c | 46 +++++++++++++++----- fs/btrfs/inode.c | 111 +++++++++++++++++++++++++++++++++++++------------ 3 files changed, 123 insertions(+), 37 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 522a39b0033d..f31aed7fedd9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2224,6 +2224,9 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, void btrfs_block_rsv_release(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes); +int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *rsv); int btrfs_set_block_group_ro(struct btrfs_root *root, struct btrfs_block_group_cache *cache); int btrfs_set_block_group_rw(struct btrfs_root *root, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ca599654ce19..a2ca561c70f0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3980,6 +3980,37 @@ static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) 3 * num_items; } +int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *rsv) +{ + struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv; + u64 num_bytes; + int ret; + + /* + * Truncate should be freeing data, but give us 2 items just in case it + * needs to use some space. We may want to be smarter about this in the + * future. + */ + num_bytes = calc_trans_metadata_size(root, 2); + + /* We already have enough bytes, just return */ + if (rsv->reserved >= num_bytes) + return 0; + + num_bytes -= rsv->reserved; + + /* + * You should have reserved enough space before hand to do this, so this + * should not fail. + */ + ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes); + BUG_ON(ret); + + return 0; +} + int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, int num_items) @@ -4020,23 +4051,18 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; /* - * one for deleting orphan item, one for updating inode and - * two for calling btrfs_truncate_inode_items. - * - * btrfs_truncate_inode_items is a delete operation, it frees - * more space than it uses in most cases. So two units of - * metadata space should be enough for calling it many times. - * If all of the metadata space is used, we can commit - * transaction and use space it freed. + * We need to hold space in order to delete our orphan item once we've + * added it, so this takes the reservation so we can release it later + * when we are truly done with the orphan item. */ - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = calc_trans_metadata_size(root, 1); return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_orphan_release_metadata(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = calc_trans_metadata_size(root, 1); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e47bdf0fb75a..bc12ba23db5f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6591,6 +6591,7 @@ out: static int btrfs_truncate(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_block_rsv *rsv; int ret; int err = 0; struct btrfs_trans_handle *trans; @@ -6604,28 +6605,83 @@ static int btrfs_truncate(struct inode *inode) btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_ordered_update_i_size(inode, inode->i_size, NULL); - trans = btrfs_start_transaction(root, 5); - if (IS_ERR(trans)) - return PTR_ERR(trans); + /* + * Yes ladies and gentelment, this is indeed ugly. The fact is we have + * 3 things going on here + * + * 1) We need to reserve space for our orphan item and the space to + * delete our orphan item. Lord knows we don't want to have a dangling + * orphan item because we didn't reserve space to remove it. + * + * 2) We need to reserve space to update our inode. + * + * 3) We need to have something to cache all the space that is going to + * be free'd up by the truncate operation, but also have some slack + * space reserved in case it uses space during the truncate (thank you + * very much snapshotting). + * + * And we need these to all be seperate. The fact is we can use alot of + * space doing the truncate, and we have no earthly idea how much space + * we will use, so we need the truncate reservation to be seperate so it + * doesn't end up using space reserved for updating the inode or + * removing the orphan item. We also need to be able to stop the + * transaction and start a new one, which means we need to be able to + * update the inode several times, and we have no idea of knowing how + * many times that will be, so we can't just reserve 1 item for the + * entirety of the opration, so that has to be done seperately as well. + * Then there is the orphan item, which does indeed need to be held on + * to for the whole operation, and we need nobody to touch this reserved + * space except the orphan code. + * + * So that leaves us with + * + * 1) root->orphan_block_rsv - for the orphan deletion. + * 2) rsv - for the truncate reservation, which we will steal from the + * transaction reservation. + * 3) fs_info->trans_block_rsv - this will have 1 items worth left for + * updating the inode. + */ + rsv = btrfs_alloc_block_rsv(root); + if (!rsv) + return -ENOMEM; + btrfs_add_durable_block_rsv(root->fs_info, rsv); + + trans = btrfs_start_transaction(root, 4); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + goto out; + } btrfs_set_trans_block_group(trans, inode); + /* + * Reserve space for the truncate process. Truncate should be adding + * space, but if there are snapshots it may end up using space. + */ + ret = btrfs_truncate_reserve_metadata(trans, root, rsv); + BUG_ON(ret); + ret = btrfs_orphan_add(trans, inode); if (ret) { btrfs_end_transaction(trans, root); - return ret; + goto out; } nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); - /* Now start a transaction for the truncate */ - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return PTR_ERR(trans); + /* + * Ok so we've already migrated our bytes over for the truncate, so here + * just reserve the one slot we need for updating the inode. + */ + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + goto out; + } btrfs_set_trans_block_group(trans, inode); - trans->block_rsv = root->orphan_block_rsv; + trans->block_rsv = rsv; /* * setattr is responsible for setting the ordered_data_close flag, @@ -6649,24 +6705,18 @@ static int btrfs_truncate(struct inode *inode) while (1) { if (!trans) { - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, inode); - trans->block_rsv = root->orphan_block_rsv; - } + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + goto out; + } - ret = btrfs_block_rsv_check(trans, root, - root->orphan_block_rsv, 0, 5); - if (ret == -EAGAIN) { - ret = btrfs_commit_transaction(trans, root); - if (ret) - return ret; - trans = NULL; - continue; - } else if (ret) { - err = ret; - break; + ret = btrfs_truncate_reserve_metadata(trans, root, + rsv); + BUG_ON(ret); + + btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = rsv; } ret = btrfs_truncate_inode_items(trans, root, inode, @@ -6677,6 +6727,7 @@ static int btrfs_truncate(struct inode *inode) break; } + trans->block_rsv = &root->fs_info->trans_block_rsv; ret = btrfs_update_inode(trans, root, inode); if (ret) { err = ret; @@ -6690,6 +6741,7 @@ static int btrfs_truncate(struct inode *inode) } if (ret == 0 && inode->i_nlink > 0) { + trans->block_rsv = root->orphan_block_rsv; ret = btrfs_orphan_del(trans, inode); if (ret) err = ret; @@ -6701,15 +6753,20 @@ static int btrfs_truncate(struct inode *inode) ret = btrfs_orphan_del(NULL, inode); } + trans->block_rsv = &root->fs_info->trans_block_rsv; ret = btrfs_update_inode(trans, root, inode); if (ret && !err) err = ret; nr = trans->blocks_used; ret = btrfs_end_transaction_throttle(trans, root); + btrfs_btree_balance_dirty(root, nr); + +out: + btrfs_free_block_rsv(root, rsv); + if (ret && !err) err = ret; - btrfs_btree_balance_dirty(root, nr); return err; } -- cgit v1.2.2 From af60bed24eb0e3b6d93eaa6bb395a5721e6c09a8 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 4 May 2011 11:11:17 -0400 Subject: Btrfs: set range_start to the right start in count_range_bits In count_range_bits we are adjusting total_bytes based on the range we are searching for, but we don't adjust the range start according to the range we are searching for, which makes for weird results. For example, if the range [0-8192] is set DELALLOC, but I search for 4096-8192, I will get back 4096 for the number of bytes found, but the range_start will be 0, which makes it look like the range is [0-4096]. So instead set range_start = max(cur_start, state->start). This makes everything come out right. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31b..b5f6f227a97c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1480,7 +1480,7 @@ u64 count_range_bits(struct extent_io_tree *tree, if (total_bytes >= max_bytes) break; if (!found) { - *start = state->start; + *start = max(cur_start, state->start); found = 1; } last = state->end; -- cgit v1.2.2 From cb25c2ea6a79702ab7895b873c6c43e0d3bc3c72 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 May 2011 12:17:34 -0400 Subject: Btrfs: map the node block when looking for readahead targets If we have particularly full nodes, we could call btrfs_node_blockptr up to 32 times, which is 32 pairs of kmap/kunmap, which _sucks_. So go ahead and map the extent buffer while we look for readahead targets. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..009bcf7f1e4b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1229,6 +1229,7 @@ static void reada_for_search(struct btrfs_root *root, u64 search; u64 target; u64 nread = 0; + u64 gen; int direction = path->reada; struct extent_buffer *eb; u32 nr; @@ -1256,6 +1257,15 @@ static void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; while (1) { + if (!node->map_token) { + unsigned long offset = btrfs_node_key_ptr_offset(nr); + map_private_extent_buffer(node, offset, + sizeof(struct btrfs_key_ptr), + &node->map_token, + &node->kaddr, + &node->map_start, + &node->map_len, KM_USER1); + } if (direction < 0) { if (nr == 0) break; @@ -1273,14 +1283,23 @@ static void reada_for_search(struct btrfs_root *root, search = btrfs_node_blockptr(node, nr); if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { - readahead_tree_block(root, search, blocksize, - btrfs_node_ptr_generation(node, nr)); + gen = btrfs_node_ptr_generation(node, nr); + if (node->map_token) { + unmap_extent_buffer(node, node->map_token, + KM_USER1); + node->map_token = NULL; + } + readahead_tree_block(root, search, blocksize, gen); nread += blocksize; } nscan++; if ((nread > 65536 || nscan > 32)) break; } + if (node->map_token) { + unmap_extent_buffer(node, node->map_token, KM_USER1); + node->map_token = NULL; + } } /* -- cgit v1.2.2 From 7e2355ba1a11649f0b212a29fdb9f47476f1248e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 May 2011 12:25:37 -0400 Subject: Btrfs: don't look at the extent buffer level 3 times in a row We have a bit of debugging in btrfs_search_slot to make sure the level of the cow block is the same as the original block we were cow'ing. I don't think I've ever seen this tripped, so kill it. This saves us 2 kmap's per level in our search. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 009bcf7f1e4b..f7a0a64b868f 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1672,9 +1672,6 @@ again: } cow_done: BUG_ON(!cow && ins_len); - if (level != btrfs_header_level(b)) - WARN_ON(1); - level = btrfs_header_level(b); p->nodes[level] = b; if (!p->skip_locking) -- cgit v1.2.2 From d82a6f1d7e8b61ed5996334d0db66651bb43641d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 May 2011 15:26:06 -0400 Subject: Btrfs: kill BTRFS_I(inode)->block_group Originally this was going to be used as a way to give hints to the allocator, but frankly we can get much better hints elsewhere and it's not even used at all for anything usefull. In addition to be completely useless, when we initialize an inode we try and find a freeish block group to set as the inodes block group, and with a completely full 40gb fs this takes _forever_, so I imagine with say 1tb fs this is just unbearable. So just axe the thing altoghether, we don't need it and it saves us 8 bytes in the inode and saves us 500 microseconds per inode lookup in my testcase. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/btrfs_inode.h | 3 -- fs/btrfs/ctree.h | 3 +- fs/btrfs/extent-tree.c | 10 ++---- fs/btrfs/inode.c | 87 ++++++-------------------------------------------- fs/btrfs/ioctl.c | 3 +- fs/btrfs/transaction.c | 1 - fs/btrfs/transaction.h | 14 -------- fs/btrfs/xattr.c | 2 -- 8 files changed, 13 insertions(+), 110 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884ce..4bc852d3b83d 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -120,9 +120,6 @@ struct btrfs_inode { */ u64 index_cnt; - /* the start of block group preferred for allocations. */ - u64 block_group; - /* the fsync log has some corner cases that mean we have to check * directories to see if any unlinks have been done before * the directory was logged. See tree-log.c for all the diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f31aed7fedd9..0f8c489bcc02 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2512,8 +2512,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc); int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *new_root, - u64 new_dirid, u64 alloc_hint); + struct btrfs_root *new_root, u64 new_dirid); int btrfs_merge_bio_hook(struct page *page, unsigned long offset, size_t size, struct bio *bio, unsigned long bio_flags); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a2ca561c70f0..9f0a4e3bd8a9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5319,6 +5319,7 @@ checks: btrfs_add_free_space(block_group, offset, search_start - offset); BUG_ON(offset > search_start); + btrfs_put_block_group(block_group); break; loop: failed_cluster_refill = false; @@ -5411,14 +5412,7 @@ loop: ret = -ENOSPC; } else if (!ins->objectid) { ret = -ENOSPC; - } - - /* we found what we needed */ - if (ins->objectid) { - if (!(data & BTRFS_BLOCK_GROUP_DATA)) - trans->block_group = block_group->key.objectid; - - btrfs_put_block_group(block_group); + } else if (ins->objectid) { ret = 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bc12ba23db5f..dd5938a7de21 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -136,7 +136,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; - btrfs_set_trans_block_group(trans, inode); key.objectid = inode->i_ino; key.offset = start; @@ -422,7 +421,6 @@ again: if (start == 0) { trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; /* lets try to make an inline extent */ @@ -781,7 +779,6 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(root == root->fs_info->tree_root); trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; num_bytes = (end - start + blocksize) & ~(blocksize - 1); @@ -1502,8 +1499,6 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, { struct btrfs_ordered_sum *sum; - btrfs_set_trans_block_group(trans, inode); - list_for_each_entry(sum, list, list) { btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root->fs_info->csum_root, sum); @@ -1722,7 +1717,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) else trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); @@ -1739,7 +1733,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) else trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) @@ -2495,7 +2488,6 @@ static void btrfs_read_locked_inode(struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key location; int maybe_acls; - u64 alloc_group_block; u32 rdev; int ret; @@ -2539,8 +2531,6 @@ static void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->index_cnt = (u64)-1; BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); - alloc_group_block = btrfs_inode_block_group(leaf, inode_item); - /* * try to precache a NULL acl entry for files that don't have * any xattrs or acls @@ -2549,8 +2539,6 @@ static void btrfs_read_locked_inode(struct inode *inode) if (!maybe_acls) cache_no_acl(inode); - BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, - alloc_group_block, 0); btrfs_free_path(path); inode_item = NULL; @@ -2630,7 +2618,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_inode_transid(leaf, item, trans->transid); btrfs_set_inode_rdev(leaf, item, inode->i_rdev); btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); - btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); + btrfs_set_inode_block_group(leaf, item, 0); if (leaf->map_token) { unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); @@ -2971,8 +2959,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, @@ -3068,8 +3054,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { err = btrfs_unlink_subvol(trans, root, dir, BTRFS_I(inode)->location.objectid, @@ -3649,7 +3633,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) err = PTR_ERR(trans); break; } - btrfs_set_trans_block_group(trans, inode); err = btrfs_drop_extents(trans, inode, cur_offset, cur_offset + hole_size, @@ -3785,7 +3768,6 @@ void btrfs_evict_inode(struct inode *inode) while (1) { trans = btrfs_start_transaction(root, 0); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = root->orphan_block_rsv; ret = btrfs_block_rsv_check(trans, root, @@ -4383,7 +4365,6 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, inode); if (nolock) ret = btrfs_end_transaction_nolock(trans, root); else @@ -4409,7 +4390,6 @@ void btrfs_dirty_inode(struct inode *inode) trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret && ret == -ENOSPC) { @@ -4424,7 +4404,6 @@ void btrfs_dirty_inode(struct inode *inode) } return; } - btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret) { @@ -4519,8 +4498,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *dir, const char *name, int name_len, - u64 ref_objectid, u64 objectid, - u64 alloc_hint, int mode, u64 *index) + u64 ref_objectid, u64 objectid, int mode, + u64 *index) { struct inode *inode; struct btrfs_inode_item *inode_item; @@ -4567,8 +4546,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, owner = 0; else owner = 1; - BTRFS_I(inode)->block_group = - btrfs_find_block_group(root, 0, alloc_hint, owner); key[0].objectid = objectid; btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); @@ -4729,11 +4706,9 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, - BTRFS_I(dir)->block_group, mode, &index); + mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -4745,7 +4720,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, goto out_unlock; } - btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -4754,8 +4728,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, init_special_inode(inode, inode->i_mode, rdev); btrfs_update_inode(trans, root, inode); } - btrfs_update_inode_block_group(trans, inode); - btrfs_update_inode_block_group(trans, dir); out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); @@ -4791,11 +4763,9 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, - BTRFS_I(dir)->block_group, mode, &index); + mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -4807,7 +4777,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, goto out_unlock; } - btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -4818,8 +4787,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_file_inode_operations; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } - btrfs_update_inode_block_group(trans, inode); - btrfs_update_inode_block_group(trans, dir); out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); @@ -4866,8 +4833,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, btrfs_inc_nlink(inode); inode->i_ctime = CURRENT_TIME; - - btrfs_set_trans_block_group(trans, dir); ihold(inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); @@ -4876,7 +4841,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, drop_inode = 1; } else { struct dentry *parent = dget_parent(dentry); - btrfs_update_inode_block_group(trans, dir); err = btrfs_update_inode(trans, root, inode); BUG_ON(err); btrfs_log_new_name(trans, inode, NULL, parent); @@ -4917,12 +4881,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) trans = btrfs_start_transaction(root, 5); if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, - BTRFS_I(dir)->block_group, S_IFDIR | mode, - &index); + S_IFDIR | mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_fail; @@ -4936,7 +4898,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) inode->i_op = &btrfs_dir_inode_operations; inode->i_fop = &btrfs_dir_file_operations; - btrfs_set_trans_block_group(trans, inode); btrfs_i_size_write(inode, 0); err = btrfs_update_inode(trans, root, inode); @@ -4950,8 +4911,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) d_instantiate(dentry, inode); drop_on_err = 0; - btrfs_update_inode_block_group(trans, inode); - btrfs_update_inode_block_group(trans, dir); out_fail: nr = trans->blocks_used; @@ -6652,8 +6611,6 @@ static int btrfs_truncate(struct inode *inode) goto out; } - btrfs_set_trans_block_group(trans, inode); - /* * Reserve space for the truncate process. Truncate should be adding * space, but if there are snapshots it may end up using space. @@ -6680,7 +6637,6 @@ static int btrfs_truncate(struct inode *inode) err = PTR_ERR(trans); goto out; } - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = rsv; /* @@ -6715,7 +6671,6 @@ static int btrfs_truncate(struct inode *inode) rsv); BUG_ON(ret); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = rsv; } @@ -6775,15 +6730,14 @@ out: * create a new subvolume directory/inode (helper for the ioctl). */ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *new_root, - u64 new_dirid, u64 alloc_hint) + struct btrfs_root *new_root, u64 new_dirid) { struct inode *inode; int err; u64 index = 0; inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, - new_dirid, alloc_hint, S_IFDIR | 0700, &index); + new_dirid, S_IFDIR | 0700, &index); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &btrfs_dir_inode_operations; @@ -6893,21 +6847,6 @@ void btrfs_destroy_inode(struct inode *inode) spin_unlock(&root->fs_info->ordered_extent_lock); } - if (root == root->fs_info->tree_root) { - struct btrfs_block_group_cache *block_group; - - block_group = btrfs_lookup_block_group(root->fs_info, - BTRFS_I(inode)->block_group); - if (block_group && block_group->inode == inode) { - spin_lock(&block_group->lock); - block_group->inode = NULL; - spin_unlock(&block_group->lock); - btrfs_put_block_group(block_group); - } else if (block_group) { - btrfs_put_block_group(block_group); - } - } - spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", @@ -7091,8 +7030,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto out_notrans; } - btrfs_set_trans_block_group(trans, new_dir); - if (dest != root) btrfs_record_root_in_trans(trans, dest); @@ -7331,12 +7268,9 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, - BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, - &index); + S_IFLNK|S_IRWXUGO, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -7348,7 +7282,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, goto out_unlock; } - btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -7359,8 +7292,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_file_inode_operations; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } - btrfs_update_inode_block_group(trans, inode); - btrfs_update_inode_block_group(trans, dir); if (drop_inode) goto out_unlock; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a578620e06a8..8e90ccf4b76a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -413,8 +413,7 @@ static noinline int create_subvol(struct btrfs_root *root, btrfs_record_root_in_trans(trans, new_root); - ret = btrfs_create_subvol_root(trans, new_root, new_dirid, - BTRFS_I(dir)->block_group); + ret = btrfs_create_subvol_root(trans, new_root, new_dirid); /* * insert the directory item */ diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 43816f8b23e7..f4ea695325b2 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -241,7 +241,6 @@ again: h->transid = cur_trans->transid; h->transaction = cur_trans; h->blocks_used = 0; - h->block_group = 0; h->bytes_reserved = 0; h->delayed_ref_updates = 0; h->use_count = 1; diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 11c6efcd4ed2..da7289e06a82 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -47,7 +47,6 @@ struct btrfs_transaction { struct btrfs_trans_handle { u64 transid; - u64 block_group; u64 bytes_reserved; unsigned long use_count; unsigned long blocks_reserved; @@ -70,19 +69,6 @@ struct btrfs_pending_snapshot { struct list_head list; }; -static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans, - struct inode *inode) -{ - trans->block_group = BTRFS_I(inode)->block_group; -} - -static inline void btrfs_update_inode_block_group( - struct btrfs_trans_handle *trans, - struct inode *inode) -{ - BTRFS_I(inode)->block_group = trans->block_group; -} - static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, struct inode *inode) { diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index cfd660550ded..72ab0295ca74 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -158,8 +158,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans, if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, inode); - ret = do_setxattr(trans, inode, name, value, size, flags); if (ret) goto out; -- cgit v1.2.2 From 589d8ade83f07c0f11c8191c0ca309f34d7a2c14 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 May 2011 17:30:53 -0400 Subject: Btrfs: try not to sleep as much when doing slow caching When the fs is super full and we unmount the fs, we could get stuck in this thing where unmount is waiting for the caching kthread to make progress and the caching kthread keeps scheduling because we're in the middle of a commit. So instead just let the caching kthread keep going and only yeild if need_resched(). This makes my horrible umount case go from taking up to 10 minutes to taking less than 20 seconds. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9f0a4e3bd8a9..96be62450318 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -378,15 +378,18 @@ again: if (ret) break; - caching_ctl->progress = last; - btrfs_release_path(extent_root, path); - up_read(&fs_info->extent_commit_sem); - mutex_unlock(&caching_ctl->mutex); - if (btrfs_transaction_in_commit(fs_info)) - schedule_timeout(1); - else + if (need_resched() || + btrfs_next_leaf(extent_root, path)) { + caching_ctl->progress = last; + btrfs_release_path(extent_root, path); + up_read(&fs_info->extent_commit_sem); + mutex_unlock(&caching_ctl->mutex); cond_resched(); - goto again; + goto again; + } + leaf = path->nodes[0]; + nritems = btrfs_header_nritems(leaf); + continue; } if (key.objectid < block_group->key.objectid) { -- cgit v1.2.2 From 026fd317828500524cdc7e5ff9e8e7923abb2868 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 13 May 2011 10:32:11 -0400 Subject: Btrfs: don't always do readahead Our readahead is sort of sloppy, and really isn't always needed. For example if ls is doing a stating ls (which is the default) it's going to stat in non-disk order, so if say you have a directory with a stupid amount of files, readahead is going to do nothing but waste time in the case of doing the stat. Taking the unconditional readahead out made my test go from 57 minutes to 36 minutes. This means that everywhere we do loop through the tree we want to make sure we do set path->reada properly, so I went through and found all of the places where we loop through the path and set reada to 1. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 2 -- fs/btrfs/extent-tree.c | 3 ++- fs/btrfs/inode.c | 14 ++++++++++++-- fs/btrfs/relocation.c | 6 ++++++ 4 files changed, 20 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f7a0a64b868f..f61c16c1481a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -48,8 +48,6 @@ struct btrfs_path *btrfs_alloc_path(void) { struct btrfs_path *path; path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); - if (path) - path->reada = 1; return path; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 96be62450318..1ba2cc58eab5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -347,7 +347,7 @@ static int caching_kthread(void *data) */ path->skip_locking = 1; path->search_commit_root = 1; - path->reada = 2; + path->reada = 1; key.objectid = last; key.offset = 0; @@ -8556,6 +8556,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = 1; cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); if (cache_gen != 0 && diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index dd5938a7de21..6228a304b547 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4242,7 +4242,9 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, filp->f_pos = 2; } path = btrfs_alloc_path(); - path->reada = 2; + if (!path) + return -ENOMEM; + path->reada = 1; btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; @@ -5043,7 +5045,15 @@ again: if (!path) { path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) { + err = -ENOMEM; + goto out; + } + /* + * Chances are we'll be called again, so go ahead and do + * readahead + */ + path->reada = 1; } ret = btrfs_lookup_file_extent(trans, root, path, diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 09c30d37d43e..5872b41581f4 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -676,6 +676,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, err = -ENOMEM; goto out; } + path1->reada = 1; + path2->reada = 2; node = alloc_backref_node(cache); if (!node) { @@ -1996,6 +1998,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = 1; reloc_root = root->reloc_root; root_item = &reloc_root->root_item; @@ -3297,6 +3300,7 @@ static int find_data_references(struct reloc_control *rc, path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = 1; root = read_fs_root(rc->extent_root->fs_info, ref_root); if (IS_ERR(root)) { @@ -3665,6 +3669,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = 1; ret = prepare_to_relocate(rc); if (ret) { @@ -4090,6 +4095,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = -1; key.objectid = BTRFS_TREE_RELOC_OBJECTID; key.type = BTRFS_ROOT_ITEM_KEY; -- cgit v1.2.2 From cca1c81f43e26ab60c0d1090fb90992358d69bdf Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 13 May 2011 11:07:12 -0400 Subject: Btrfs: don't try to allocate from a block group that doesn't have enough space If we have a very large filesystem, we can spend a lot of time in find_free_extent just trying to allocate from empty block groups. So instead check to see if the block group even has enough space for the allocation, and if not go on to the next block group. Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1ba2cc58eab5..c8c318494dee 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5159,6 +5159,14 @@ have_block_group: if (unlikely(block_group->ro)) goto loop; + spin_lock(&block_group->tree_lock); + if (cached && + block_group->free_space < num_bytes + empty_size) { + spin_unlock(&block_group->tree_lock); + goto loop; + } + spin_unlock(&block_group->tree_lock); + /* * Ok we want to try and use the cluster allocator, so lets look * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will -- cgit v1.2.2 From 207dde8289d9b005b665cb9d8d2bb9464256101d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 13 May 2011 14:49:23 -0400 Subject: Btrfs: check for duplicate entries in the free space cache If there are duplicate entries in the free space cache, discard the entire cache and load it the old fashioned way. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 63731a1fb0a1..d634a7e42207 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -420,7 +420,14 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, spin_lock(&block_group->tree_lock); ret = link_free_space(block_group, e); spin_unlock(&block_group->tree_lock); - BUG_ON(ret); + if (ret) { + printk(KERN_ERR "Duplicate entries in " + "free space cache, dumping\n"); + kunmap(page); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } } else { e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); if (!e->bitmap) { @@ -437,6 +444,14 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, recalculate_thresholds(block_group); spin_unlock(&block_group->tree_lock); list_add_tail(&e->list, &bitmaps); + if (ret) { + printk(KERN_ERR "Duplicate entries in " + "free space cache, dumping\n"); + kunmap(page); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } } num_entries--; @@ -909,10 +924,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, * logically. */ if (bitmap) { - WARN_ON(info->bitmap); + if (info->bitmap) { + WARN_ON_ONCE(1); + return -EEXIST; + } p = &(*p)->rb_right; } else { - WARN_ON(!info->bitmap); + if (!info->bitmap) { + WARN_ON_ONCE(1); + return -EEXIST; + } p = &(*p)->rb_left; } } -- cgit v1.2.2 From d90c732122a1f6d0efe388a8a204f67f144b2eb3 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 17 May 2011 09:50:54 -0400 Subject: Btrfs: leave spinning on lookup and map the leaf On lookup we only want to read the inode item, so leave the path spinning. Also we're just wholesale reading the leaf off, so map the leaf so we don't do a bunch of kmap/kunmaps. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6228a304b547..dc8fb2b3a145 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2493,6 +2493,7 @@ static void btrfs_read_locked_inode(struct inode *inode) path = btrfs_alloc_path(); BUG_ON(!path); + path->leave_spinning = 1; memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); ret = btrfs_lookup_inode(NULL, root, path, &location, 0); @@ -2502,6 +2503,12 @@ static void btrfs_read_locked_inode(struct inode *inode) leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); + if (!leaf->map_token) + map_private_extent_buffer(leaf, (unsigned long)inode_item, + sizeof(struct btrfs_inode_item), + &leaf->map_token, &leaf->kaddr, + &leaf->map_start, &leaf->map_len, + KM_USER1); inode->i_mode = btrfs_inode_mode(leaf, inode_item); inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); @@ -2539,6 +2546,11 @@ static void btrfs_read_locked_inode(struct inode *inode) if (!maybe_acls) cache_no_acl(inode); + if (leaf->map_token) { + unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); + leaf->map_token = NULL; + } + btrfs_free_path(path); inode_item = NULL; -- cgit v1.2.2 From 0956c798ef8dbe0fc215870eb68bd2d8e789f86a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 18 May 2011 00:11:22 +0000 Subject: BTRFS: Remove unused node_lock 240f62c8756 replaced the node_lock with rcu_read_lock, but forgot to remove the actual lock in the data structure. Remove it here. Signed-off-by: Andi Kleen Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 3 --- fs/btrfs/disk-io.c | 1 - 2 files changed, 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..f290b98e2fe6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1088,9 +1088,6 @@ struct btrfs_fs_info { struct btrfs_root { struct extent_buffer *node; - /* the node lock is held while changing the node pointer */ - spinlock_t node_lock; - struct extent_buffer *commit_root; struct btrfs_root *log_root; struct btrfs_root *reloc_root; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..64b289690f9d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1064,7 +1064,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->orphan_list); INIT_LIST_HEAD(&root->root_list); - spin_lock_init(&root->node_lock); spin_lock_init(&root->orphan_lock); spin_lock_init(&root->inode_lock); spin_lock_init(&root->accounting_lock); -- cgit v1.2.2 From e2156867159ae7b3bc38ef1c26ea0ee30a895ef8 Mon Sep 17 00:00:00 2001 From: Hugo Mills Date: Sat, 14 May 2011 17:43:41 +0000 Subject: btrfs: Ensure the tree search ioctl returns the right number of records Btrfs's tree search ioctl has a field to indicate that no more than a given number of records should be returned. The ioctl doesn't honour this, as the tested value is not incremented until the end of the copy_to_sk function. This patch removes an unnecessary local variable, and updates the num_found counter as each key is found in the tree. Signed-off-by: Hugo Mills Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2616f7ed4799..ce773fb736a1 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1279,7 +1279,6 @@ static noinline int copy_to_sk(struct btrfs_root *root, int nritems; int i; int slot; - int found = 0; int ret = 0; leaf = path->nodes[0]; @@ -1326,7 +1325,7 @@ static noinline int copy_to_sk(struct btrfs_root *root, item_off, item_len); *sk_offset += item_len; } - found++; + (*num_found)++; if (*num_found >= sk->nr_items) break; @@ -1345,7 +1344,6 @@ advance_key: } else ret = 1; overflow: - *num_found += found; return ret; } -- cgit v1.2.2 From 0f3b708c11914b684d17fed975eed19db902a8de Mon Sep 17 00:00:00 2001 From: Jamey Sharp Date: Thu, 5 May 2011 19:03:46 +0000 Subject: btrfs: Delete unused version.sh script. In 2008, commit b4f6c45dfbf84f47c21f73f6370ad1292b0627fd dropped the use of fs/btrfs/version.sh, but left the script behind. Kill it. Commit by Jamey Sharp and Josh Triplett. Signed-off-by: Jamey Sharp Signed-off-by: Josh Triplett Cc: Chris Mason Signed-off-by: Chris Mason --- fs/btrfs/version.sh | 43 ------------------------------------------- 1 file changed, 43 deletions(-) delete mode 100644 fs/btrfs/version.sh (limited to 'fs/btrfs') diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh deleted file mode 100644 index 1ca1952fd917..000000000000 --- a/fs/btrfs/version.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# -# determine-version -- report a useful version for releases -# -# Copyright 2008, Aron Griffis -# Copyright 2008, Oracle -# Released under the GNU GPLv2 - -v="v0.16" - -which git &> /dev/null -if [ $? == 0 ]; then - git branch >& /dev/null - if [ $? == 0 ]; then - if head=`git rev-parse --verify HEAD 2>/dev/null`; then - if tag=`git describe --tags 2>/dev/null`; then - v="$tag" - fi - - # Are there uncommitted changes? - git update-index --refresh --unmerged > /dev/null - if git diff-index --name-only HEAD | \ - grep -v "^scripts/package" \ - | read dummy; then - v="$v"-dirty - fi - fi - fi -fi - -echo "#ifndef __BUILD_VERSION" > .build-version.h -echo "#define __BUILD_VERSION" >> .build-version.h -echo "#define BTRFS_BUILD_VERSION \"Btrfs $v\"" >> .build-version.h -echo "#endif" >> .build-version.h - -diff -q version.h .build-version.h >& /dev/null - -if [ $? == 0 ]; then - rm .build-version.h - exit 0 -fi - -mv .build-version.h version.h -- cgit v1.2.2 From c4f675cd40d955d539180506c09515c90169b15b Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Fri, 20 May 2011 20:20:30 +0000 Subject: btrfs: don't spin in shrink_delalloc if there is nothing to free Observed as a large delay when --mixed filesystem is filled up. Test example: 1. create tiny --mixed FS: $ dd if=/dev/zero of=2G.img seek=$((2048 * 1024 * 1024 - 1)) count=1 bs=1 $ mkfs.btrfs --mixed 2G.img $ mount -oloop 2G.img /mnt/ut/ 2. Try to fill it up: $ dd if=/dev/urandom of=10M.file bs=10240 count=1024 $ seq 1 256 | while read file_no; do echo $file_no; time cp 10M.file ${file_no}.copy; done Up to '200.copy' it goes fast, but when disk fills-up each -ENOSPC message takes 3 seconds to pop-up _every_ ENOSPC (and in usermode linux it's even more: 30-60 seconds!). (Maybe, time depends on kernel's timer resolution). No IO, no CPU load, just rescheduling. Some debugging revealed busy spinning in shrink_delalloc. Signed-off-by: Sergei Trofimovich Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ee6bd55e16c..9f5fdd37451d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3425,6 +3425,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, if (reserved == 0) return 0; + /* nothing to shrink - nothing to reclaim */ + if (root->fs_info->delalloc_bytes == 0) + return 0; + max_reclaim = min(reserved, to_reclaim); while (loops < 1024) { -- cgit v1.2.2 From 9694b3fcbb0f5dd498fdf53c82f22fcc37989152 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Fri, 20 May 2011 20:20:31 +0000 Subject: btrfs: typo: 'btrfS' -> 'btrfs' Signed-off-by: Sergei Trofimovich Signed-off-by: Chris Mason --- fs/btrfs/dir-item.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index c62f02f6ae69..dec93485d539 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -452,7 +452,7 @@ int verify_dir_item(struct btrfs_root *root, namelen = XATTR_NAME_MAX; if (btrfs_dir_name_len(leaf, dir_item) > namelen) { - printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n", + printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n", (unsigned)btrfs_dir_data_len(leaf, dir_item)); return 1; } -- cgit v1.2.2 From 27160b6b5a1744b6eaa8416e2b901ec937b1eee0 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Fri, 20 May 2011 20:20:32 +0000 Subject: btrfs: fix typo 'testeing' -> 'testing' Signed-off-by: Sergei Trofimovich Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..72650ceb9829 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1310,7 +1310,7 @@ static int btrfs_set_bit_hook(struct inode *inode, /* * set_bit and clear bit hooks normally require _irqsave/restore - * but in this case, we are only testeing for the DELALLOC + * but in this case, we are only testing for the DELALLOC * bit, which is only set or cleared with irqs on */ if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { @@ -1344,7 +1344,7 @@ static int btrfs_clear_bit_hook(struct inode *inode, { /* * set_bit and clear bit hooks normally require _irqsave/restore - * but in this case, we are only testeing for the DELALLOC + * but in this case, we are only testing for the DELALLOC * bit, which is only set or cleared with irqs on */ if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { -- cgit v1.2.2 From b0b802d7e34b0b4a78f911c3a8aad88aa91fd7ab Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 19 May 2011 07:03:42 +0000 Subject: Btrfs: return error code to caller when btrfs_previous_item fails The error code is returned instead of calling BUG_ON when btrfs_previous_item returns the error. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c7367ae5a3e6..e40cdd5b4669 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -949,14 +949,14 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, BTRFS_DEV_EXTENT_KEY); - BUG_ON(ret); + if (ret) + goto out; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); BUG_ON(found_key.offset > start || found_key.offset + btrfs_dev_extent_length(leaf, extent) < start); - ret = 0; } else if (ret == 0) { leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], @@ -969,6 +969,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, root, path); BUG_ON(ret); +out: btrfs_free_path(path); return ret; } -- cgit v1.2.2 From 65a246c5ffe3b487a001de025816326939e63362 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 19 May 2011 04:37:44 +0000 Subject: Btrfs: return error code to caller when btrfs_del_item fails The error code is returned instead of calling BUG_ON when btrfs_del_item returns the error. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/file-item.c | 10 ++++++---- fs/btrfs/root-tree.c | 6 +++++- fs/btrfs/tree-log.c | 10 +++++++--- fs/btrfs/volumes.c | 4 +--- 4 files changed, 19 insertions(+), 11 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..6e7556aa02e8 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -551,10 +551,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { if (path->slots[0] == 0) - goto out; + break; path->slots[0]--; } else if (ret < 0) { - goto out; + break; } leaf = path->nodes[0]; @@ -579,7 +579,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, /* delete the entire item, it is inside our range */ if (key.offset >= bytenr && csum_end <= end_byte) { ret = btrfs_del_item(trans, root, path); - BUG_ON(ret); + if (ret) + goto out; if (key.offset == bytenr) break; } else if (key.offset < bytenr && csum_end > end_byte) { @@ -633,9 +634,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, } btrfs_release_path(root, path); } + ret = 0; out: btrfs_free_path(path); - return 0; + return ret; } int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6928bff62daa..2cf5f5142159 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -385,7 +385,10 @@ again: *sequence = btrfs_root_ref_sequence(leaf, ref); ret = btrfs_del_item(trans, tree_root, path); - BUG_ON(ret); + if (ret) { + err = ret; + goto out; + } } else err = -ENOENT; @@ -397,6 +400,7 @@ again: goto again; } +out: btrfs_free_path(path); return err; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..cf2baeb70462 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1050,7 +1050,8 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, break; ret = btrfs_del_item(trans, root, path); - BUG_ON(ret); + if (ret) + goto out; btrfs_release_path(root, path); inode = read_one_inode(root, key.offset); @@ -1068,8 +1069,10 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, */ key.offset = (u64)-1; } + ret = 0; +out: btrfs_release_path(root, path); - return 0; + return ret; } @@ -2587,7 +2590,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, break; ret = btrfs_del_item(trans, log, path); - BUG_ON(ret); + if (ret) + break; btrfs_release_path(log, path); } btrfs_release_path(log, path); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e40cdd5b4669..deca1a0326ad 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -967,7 +967,6 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, if (device->bytes_used > 0) device->bytes_used -= btrfs_dev_extent_length(leaf, extent); ret = btrfs_del_item(trans, root, path); - BUG_ON(ret); out: btrfs_free_path(path); @@ -1770,10 +1769,9 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_del_item(trans, root, path); - BUG_ON(ret); btrfs_free_path(path); - return 0; + return ret; } static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 -- cgit v1.2.2 From 1cd307990d6e2b4965620e339a92e0d7ae853e13 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 19 May 2011 05:19:08 +0000 Subject: Btrfs: BUG_ON is deleted from the caller of btrfs_truncate_item & btrfs_extend_item Currently, btrfs_truncate_item and btrfs_extend_item returns only 0. So, the check by BUG_ON in the caller is unnecessary. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 8 ++------ fs/btrfs/dir-item.c | 1 - fs/btrfs/extent-tree.c | 3 --- fs/btrfs/file-item.c | 3 --- fs/btrfs/inode-item.c | 2 -- fs/btrfs/inode.c | 1 - fs/btrfs/tree-log.c | 1 - 7 files changed, 2 insertions(+), 17 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..6f1a59cc41ff 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3216,7 +3216,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, struct btrfs_path *path, u32 new_size, int from_end) { - int ret = 0; int slot; struct extent_buffer *leaf; struct btrfs_item *item; @@ -3314,12 +3313,11 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, btrfs_set_item_size(leaf, item, new_size); btrfs_mark_buffer_dirty(leaf); - ret = 0; if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } - return ret; + return 0; } /* @@ -3329,7 +3327,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u32 data_size) { - int ret = 0; int slot; struct extent_buffer *leaf; struct btrfs_item *item; @@ -3394,12 +3391,11 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, btrfs_set_item_size(leaf, item, old_size + data_size); btrfs_mark_buffer_dirty(leaf); - ret = 0; if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } - return ret; + return 0; } /* diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index dec93485d539..dd421c48c353 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -50,7 +50,6 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle if (di) return ERR_PTR(-EEXIST); ret = btrfs_extend_item(trans, root, path, data_size); - WARN_ON(ret > 0); } if (ret < 0) return ERR_PTR(ret); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9f5fdd37451d..103e141afeb3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -947,7 +947,6 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_extend_item(trans, root, path, new_size); - BUG_ON(ret); leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); @@ -1555,7 +1554,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans, size = btrfs_extent_inline_ref_size(type); ret = btrfs_extend_item(trans, root, path, size); - BUG_ON(ret); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, ei); @@ -1684,7 +1682,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans, end - ptr - size); item_size -= size; ret = btrfs_truncate_item(trans, root, path, item_size, 1); - BUG_ON(ret); } btrfs_mark_buffer_dirty(leaf); return 0; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 6e7556aa02e8..fb9b02667e75 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -495,7 +495,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, u32 new_size = (bytenr - key->offset) >> blocksize_bits; new_size *= csum_size; ret = btrfs_truncate_item(trans, root, path, new_size, 1); - BUG_ON(ret); } else if (key->offset >= bytenr && csum_end > end_byte && end_byte > key->offset) { /* @@ -508,7 +507,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, new_size *= csum_size; ret = btrfs_truncate_item(trans, root, path, new_size, 0); - BUG_ON(ret); key->offset = end_byte; ret = btrfs_set_item_key_safe(trans, root, path, key); @@ -763,7 +761,6 @@ again: goto insert; ret = btrfs_extend_item(trans, root, path, diff); - BUG_ON(ret); goto csum; } diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 64f1150bb48d..baa74f3db691 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -130,7 +130,6 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, item_size - (ptr + sub_item_len - item_start)); ret = btrfs_truncate_item(trans, root, path, item_size - sub_item_len, 1); - BUG_ON(ret); out: btrfs_free_path(path); return ret; @@ -167,7 +166,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); ret = btrfs_extend_item(trans, root, path, ins_len); - BUG_ON(ret); ref = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_ref); ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 72650ceb9829..e9e2b4778279 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3428,7 +3428,6 @@ search_again: btrfs_file_extent_calc_inline_size(size); ret = btrfs_truncate_item(trans, root, path, size, 1); - BUG_ON(ret); } else if (root->ref_cows) { inode_sub_bytes(inode, item_end + 1 - found_key.offset); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cf2baeb70462..0350147106d5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -382,7 +382,6 @@ insert: } else if (found_size < item_size) { ret = btrfs_extend_item(trans, root, path, item_size - found_size); - BUG_ON(ret); } } else if (ret) { return ret; -- cgit v1.2.2 From c00e9493f1412621c8665a707d63e32b0768f572 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 28 Apr 2011 09:10:23 +0000 Subject: Btrfs: return error to caller if read_one_inode() fails When read_one_inode() fails, error code is returned to caller instead of BUG_ON(). Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 0350147106d5..46b7b57650ab 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -677,7 +677,10 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); inode = read_one_inode(root, location.objectid); - BUG_ON(!inode); + if (!inode) { + kfree(name); + return -EIO; + } ret = link_to_fixup_dir(trans, root, path, location.objectid); BUG_ON(ret); @@ -816,7 +819,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, return -ENOENT; inode = read_one_inode(root, key->objectid); - BUG_ON(!inode); + if (!inode) { + iput(dir); + return -EIO; + } ref_ptr = btrfs_item_ptr_offset(eb, slot); ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); @@ -1054,7 +1060,8 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); inode = read_one_inode(root, key.offset); - BUG_ON(!inode); + if (!inode) + return -EIO; ret = fixup_inode_link_count(trans, root, inode); BUG_ON(ret); @@ -1090,7 +1097,8 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, struct inode *inode; inode = read_one_inode(root, objectid); - BUG_ON(!inode); + if (!inode) + return -EIO; key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); @@ -1177,7 +1185,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, int ret; dir = read_one_inode(root, key->objectid); - BUG_ON(!dir); + if (!dir) + return -EIO; name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); @@ -1433,7 +1442,10 @@ again: btrfs_release_path(root, path); btrfs_release_path(log, log_path); inode = read_one_inode(root, location.objectid); - BUG_ON(!inode); + if (!inode) { + kfree(name); + return -EIO; + } ret = link_to_fixup_dir(trans, root, path, location.objectid); -- cgit v1.2.2 From 37daa4f968e9470ae9f30e246a5781717c598271 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 28 Apr 2011 09:18:21 +0000 Subject: Btrfs: check return value of btrfs_inc_extent_ref() If return value of btrfs_inc_extent_ref() is not 0, BUG() is called. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 46b7b57650ab..c2d887566400 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -589,6 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ins.objectid, ins.offset, 0, root->root_key.objectid, key->objectid, offset); + BUG_ON(ret); } else { /* * insert the extent pointer in the extent -- cgit v1.2.2 From b083916638eee513be501f53b42a4be0b9851db0 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 14 May 2011 07:10:51 +0000 Subject: fs/btrfs: Add missing btrfs_free_path Btrfs_alloc_path should be matched with btrfs_free_path in error-handling code. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @r exists@ local idexpression struct btrfs_path * x; expression ra,rb; position p1,p2; @@ x = btrfs_alloc_path@p1(...) ... when != btrfs_free_path(x,...) when != if (...) { ... btrfs_free_path(x,...) ...} when != x = ra if(...) { ... when != x = rb when forall when != btrfs_free_path(x,...) \(return <+...x...+>; \| return@p2...; \) } @script:python@ p1 << r.p1; p2 << r.p2; @@ cocci.print_main("alloc",p1) cocci.print_secs("return",p2) // Signed-off-by: Julia Lawall Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 1 + fs/btrfs/super.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e9e2b4778279..80fcd5177731 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7314,6 +7314,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, datasize); if (err) { drop_inode = 1; + btrfs_free_path(path); goto out_unlock; } leaf = path->nodes[0]; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..46d7eed7e965 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -506,8 +506,10 @@ static struct dentry *get_default_root(struct super_block *sb, */ dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); - if (IS_ERR(di)) + if (IS_ERR(di)) { + btrfs_free_path(path); return ERR_CAST(di); + } if (!di) { /* * Ok the default dir item isn't there. This is weird since -- cgit v1.2.2 From 8233767a227ac5843f1023b88c7272a7b5058f5f Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 06:44:57 +0000 Subject: Btrfs: allocate extent state and check the result properly It doesn't allocate extent_state and check the result properly: - in set_extent_bit, it doesn't allocate extent_state if the path is not allowed wait - in clear_extent_bit, it doesn't check the result after atomic-ly allocate, we trigger BUG_ON() if it's fail - if allocate fail, we trigger BUG_ON instead of returning -ENOMEM since the return value of clear_extent_bit() is ignored by many callers Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31b..9ccea86dd015 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -439,6 +439,15 @@ static int clear_state_bit(struct extent_io_tree *tree, return ret; } +static struct extent_state * +alloc_extent_state_atomic(struct extent_state *prealloc) +{ + if (!prealloc) + prealloc = alloc_extent_state(GFP_ATOMIC); + + return prealloc; +} + /* * clear some bits on a range in the tree. This may require splitting * or inserting elements in the tree, so the gfp mask is used to @@ -476,8 +485,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); - if (!prealloc) - return -ENOMEM; + BUG_ON(!prealloc); } spin_lock(&tree->lock); @@ -529,8 +537,8 @@ hit_next: */ if (state->start < start) { - if (!prealloc) - prealloc = alloc_extent_state(GFP_ATOMIC); + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = split_state(tree, state, prealloc, start); BUG_ON(err == -EEXIST); prealloc = NULL; @@ -551,8 +559,8 @@ hit_next: * on the first half */ if (state->start <= end && state->end > end) { - if (!prealloc) - prealloc = alloc_extent_state(GFP_ATOMIC); + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); if (wake) @@ -725,8 +733,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); - if (!prealloc) - return -ENOMEM; + BUG_ON(!prealloc); } spin_lock(&tree->lock); @@ -743,6 +750,8 @@ again: */ node = tree_search(tree, start); if (!node) { + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = insert_state(tree, prealloc, start, end, &bits); prealloc = NULL; BUG_ON(err == -EEXIST); @@ -811,6 +820,9 @@ hit_next: err = -EEXIST; goto out; } + + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = split_state(tree, state, prealloc, start); BUG_ON(err == -EEXIST); prealloc = NULL; @@ -841,6 +853,9 @@ hit_next: this_end = end; else this_end = last_start - 1; + + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = insert_state(tree, prealloc, start, this_end, &bits); BUG_ON(err == -EEXIST); @@ -865,6 +880,9 @@ hit_next: err = -EEXIST; goto out; } + + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); -- cgit v1.2.2 From c7f895a2b2d1a002810d52e7b6653c9dc2fd0b0b Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 06:45:49 +0000 Subject: Btrfs: fix unsafe usage of merge_state merge_state can free the current state if it can be merged with the next node, but in set_extent_bit(), after merge_state, we still use the current extent to get the next node and cache it into cached_state Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9ccea86dd015..ebfff5b44752 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -780,20 +780,18 @@ hit_next: if (err) goto out; + next_node = rb_next(node); cache_state(state, cached_state); merge_state(tree, state); if (last_end == (u64)-1) goto out; start = last_end + 1; - if (start < end && prealloc && !need_resched()) { - next_node = rb_next(node); - if (next_node) { - state = rb_entry(next_node, struct extent_state, - rb_node); - if (state->start == start) - goto hit_next; - } + if (next_node && start < end && prealloc && !need_resched()) { + state = rb_entry(next_node, struct extent_state, + rb_node); + if (state->start == start) + goto hit_next; } goto search_again; } @@ -856,14 +854,22 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); + + /* + * Avoid to free 'prealloc' if it can be merged with + * the later extent. + */ + atomic_inc(&prealloc->refs); err = insert_state(tree, prealloc, start, this_end, &bits); BUG_ON(err == -EEXIST); if (err) { + free_extent_state(prealloc); prealloc = NULL; goto out; } cache_state(prealloc, cached_state); + free_extent_state(prealloc); prealloc = NULL; start = this_end + 1; goto search_again; -- cgit v1.2.2 From 4f6c9328c6c0d8558907f16579a9d47815abef80 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:06:40 +0000 Subject: Btrfs: fix bh leak on __btrfs_open_devices path 'bh' is forgot to release if no error is detected Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index deca1a0326ad..290100fc47be 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -597,6 +597,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, list_add(&device->dev_alloc_list, &fs_devices->alloc_list); } + brelse(bh); continue; error_brelse: -- cgit v1.2.2 From c9513edb0079f97749c2ac00c887a22c4ba44792 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:07:30 +0000 Subject: Btrfs: fix the race between reading and updating devices On btrfs_congested_fn and __unplug_io_fn paths, we should hold device_list_mutex to avoid remove/add device path to update fs_devices->devices On __btrfs_close_devices and btrfs_prepare_sprout paths, the devices in fs_devices->devices or fs_devices->devices is updated, so we should hold the mutex to avoid the reader side to reach them Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 ++ fs/btrfs/volumes.c | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 64b289690f9d..4e53a4fc467f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1410,6 +1410,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) struct btrfs_device *device; struct backing_dev_info *bdi; + mutex_lock(&info->fs_devices->device_list_mutex); list_for_each_entry(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; @@ -1419,6 +1420,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) break; } } + mutex_unlock(&info->fs_devices->device_list_mutex); return ret; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 290100fc47be..43c4f09e441c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -481,6 +481,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) if (--fs_devices->opened > 0) return 0; + mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { blkdev_put(device->bdev, device->mode); @@ -495,6 +496,8 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) device->writeable = 0; device->in_fs_metadata = 0; } + mutex_unlock(&fs_devices->device_list_mutex); + WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; @@ -1415,7 +1418,11 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, INIT_LIST_HEAD(&seed_devices->devices); INIT_LIST_HEAD(&seed_devices->alloc_list); mutex_init(&seed_devices->device_list_mutex); + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_splice_init(&fs_devices->devices, &seed_devices->devices); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); list_for_each_entry(device, &seed_devices->devices, dev_list) { device->fs_devices = seed_devices; -- cgit v1.2.2 From 0c1daee085cff1395d1eba4ad6faff7810a594d8 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:08:16 +0000 Subject: Btrfs: fix the race between remove dev and alloc chunk On remove device path, it updates device->dev_alloc_list but does not hold chunk lock Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 43c4f09e441c..ee197ec28547 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1291,7 +1291,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) } if (device->writeable) { + lock_chunks(root); list_del_init(&device->dev_alloc_list); + unlock_chunks(root); root->fs_info->fs_devices->rw_devices--; } @@ -1345,7 +1347,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) } fs_devices->seed = device->fs_devices->seed; device->fs_devices->seed = NULL; + lock_chunks(root); __btrfs_close_devices(device->fs_devices); + unlock_chunks(root); free_fs_devices(device->fs_devices); } @@ -1377,8 +1381,10 @@ out: return ret; error_undo: if (device->writeable) { + lock_chunks(root); list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); + unlock_chunks(root); root->fs_info->fs_devices->rw_devices++; } goto error_brelse; -- cgit v1.2.2 From 46224705656633466ca7dc71d81b3c0abc76cae4 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:08:47 +0000 Subject: Btrfs: drop unnecessary device lock Drop device_list_mutex for the reader side on clone_fs_devices and btrfs_rm_device pathes since the fs_info->volume_mutex can ensure the device list is not updated btrfs_close_extra_devices is the initialized path, we can not add or remove device at this time, so we can simply drop the mutex safely, like other initialized function does(add_missing_dev, __find_device, __btrfs_open_devices ...). Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ee197ec28547..0b5ca2737268 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -406,7 +406,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) fs_devices->latest_trans = orig->latest_trans; memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); - mutex_lock(&orig->device_list_mutex); + /* We have held the volume lock, it is safe to get the devices. */ list_for_each_entry(orig_dev, &orig->devices, dev_list) { device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) @@ -429,10 +429,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) device->fs_devices = fs_devices; fs_devices->num_devices++; } - mutex_unlock(&orig->device_list_mutex); return fs_devices; error: - mutex_unlock(&orig->device_list_mutex); free_fs_devices(fs_devices); return ERR_PTR(-ENOMEM); } @@ -443,7 +441,7 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) mutex_lock(&uuid_mutex); again: - mutex_lock(&fs_devices->device_list_mutex); + /* This is the initialized path, it is safe to release the devices. */ list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { if (device->in_fs_metadata) continue; @@ -463,7 +461,6 @@ again: kfree(device->name); kfree(device); } - mutex_unlock(&fs_devices->device_list_mutex); if (fs_devices->seed) { fs_devices = fs_devices->seed; @@ -1242,14 +1239,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) device = NULL; devices = &root->fs_info->fs_devices->devices; - mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + /* + * It is safe to read the devices since the volume_mutex + * is held. + */ list_for_each_entry(tmp, devices, dev_list) { if (tmp->in_fs_metadata && !tmp->bdev) { device = tmp; break; } } - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); bdev = NULL; bh = NULL; disk_super = NULL; -- cgit v1.2.2 From 1f78160ce1b1b8e657e2248118c4d91f881763f0 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:09:16 +0000 Subject: Btrfs: using rcu lock in the reader side of devices list fs_devices->devices is only updated on remove and add device paths, so we can use rcu to protect it in the reader side Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 14 ++++----- fs/btrfs/ioctl.c | 7 +++-- fs/btrfs/volumes.c | 85 +++++++++++++++++++++++++++++++++++++----------------- fs/btrfs/volumes.h | 2 ++ 4 files changed, 72 insertions(+), 36 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4e53a4fc467f..deba3d9c8853 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1410,8 +1410,8 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) struct btrfs_device *device; struct backing_dev_info *bdi; - mutex_lock(&info->fs_devices->device_list_mutex); - list_for_each_entry(device, &info->fs_devices->devices, dev_list) { + rcu_read_lock(); + list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; bdi = blk_get_backing_dev_info(device->bdev); @@ -1420,7 +1420,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) break; } } - mutex_unlock(&info->fs_devices->device_list_mutex); + rcu_read_unlock(); return ret; } @@ -2332,9 +2332,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) sb = &root->fs_info->super_for_commit; dev_item = &sb->dev_item; - mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + rcu_read_lock(); head = &root->fs_info->fs_devices->devices; - list_for_each_entry(dev, head, dev_list) { + list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { total_errors++; continue; @@ -2367,7 +2367,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) } total_errors = 0; - list_for_each_entry(dev, head, dev_list) { + list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) continue; if (!dev->in_fs_metadata || !dev->writeable) @@ -2377,7 +2377,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) if (ret) total_errors++; } - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + rcu_read_unlock(); if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ce773fb736a1..0de71feb8e1c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -281,8 +281,9 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - mutex_lock(&fs_info->fs_devices->device_list_mutex); - list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { + rcu_read_lock(); + list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, + dev_list) { if (!device->bdev) continue; q = bdev_get_queue(device->bdev); @@ -292,7 +293,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) minlen); } } - mutex_unlock(&fs_info->fs_devices->device_list_mutex); + rcu_read_unlock(); if (!num_devices) return -EOPNOTSUPP; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0b5ca2737268..e7844f8a347a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -363,7 +363,7 @@ static noinline int device_list_add(const char *path, INIT_LIST_HEAD(&device->dev_alloc_list); mutex_lock(&fs_devices->device_list_mutex); - list_add(&device->dev_list, &fs_devices->devices); + list_add_rcu(&device->dev_list, &fs_devices->devices); mutex_unlock(&fs_devices->device_list_mutex); device->fs_devices = fs_devices; @@ -471,6 +471,29 @@ again: return 0; } +static void __free_device(struct work_struct *work) +{ + struct btrfs_device *device; + + device = container_of(work, struct btrfs_device, rcu_work); + + if (device->bdev) + blkdev_put(device->bdev, device->mode); + + kfree(device->name); + kfree(device); +} + +static void free_device(struct rcu_head *head) +{ + struct btrfs_device *device; + + device = container_of(head, struct btrfs_device, rcu); + + INIT_WORK(&device->rcu_work, __free_device); + schedule_work(&device->rcu_work); +} + static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; @@ -480,18 +503,27 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { - if (device->bdev) { - blkdev_put(device->bdev, device->mode); + struct btrfs_device *new_device; + + if (device->bdev) fs_devices->open_devices--; - } + if (device->writeable) { list_del_init(&device->dev_alloc_list); fs_devices->rw_devices--; } - device->bdev = NULL; - device->writeable = 0; - device->in_fs_metadata = 0; + new_device = kmalloc(sizeof(*new_device), GFP_NOFS); + BUG_ON(!new_device); + memcpy(new_device, device, sizeof(*new_device)); + new_device->name = kstrdup(device->name, GFP_NOFS); + BUG_ON(!new_device->name); + new_device->bdev = NULL; + new_device->writeable = 0; + new_device->in_fs_metadata = 0; + list_replace_rcu(&device->dev_list, &new_device->dev_list); + + call_rcu(&device->rcu, free_device); } mutex_unlock(&fs_devices->device_list_mutex); @@ -1204,11 +1236,13 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) struct block_device *bdev; struct buffer_head *bh = NULL; struct btrfs_super_block *disk_super; + struct btrfs_fs_devices *cur_devices; u64 all_avail; u64 devid; u64 num_devices; u8 *dev_uuid; int ret = 0; + bool clear_super = false; mutex_lock(&uuid_mutex); mutex_lock(&root->fs_info->volume_mutex); @@ -1294,6 +1328,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) list_del_init(&device->dev_alloc_list); unlock_chunks(root); root->fs_info->fs_devices->rw_devices--; + clear_super = true; } ret = btrfs_shrink_device(device, 0); @@ -1304,16 +1339,15 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (ret) goto error_undo; - device->in_fs_metadata = 0; - /* * the device list mutex makes sure that we don't change * the device list while someone else is writing out all * the device supers. */ + + cur_devices = device->fs_devices; mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_del_init(&device->dev_list); - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + list_del_rcu(&device->dev_list); device->fs_devices->num_devices--; @@ -1327,36 +1361,36 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (device->bdev == root->fs_info->fs_devices->latest_bdev) root->fs_info->fs_devices->latest_bdev = next_device->bdev; - if (device->bdev) { - blkdev_put(device->bdev, device->mode); - device->bdev = NULL; + if (device->bdev) device->fs_devices->open_devices--; - } + + call_rcu(&device->rcu, free_device); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); - if (device->fs_devices->open_devices == 0) { + if (cur_devices->open_devices == 0) { struct btrfs_fs_devices *fs_devices; fs_devices = root->fs_info->fs_devices; while (fs_devices) { - if (fs_devices->seed == device->fs_devices) + if (fs_devices->seed == cur_devices) break; fs_devices = fs_devices->seed; } - fs_devices->seed = device->fs_devices->seed; - device->fs_devices->seed = NULL; + fs_devices->seed = cur_devices->seed; + cur_devices->seed = NULL; lock_chunks(root); - __btrfs_close_devices(device->fs_devices); + __btrfs_close_devices(cur_devices); unlock_chunks(root); - free_fs_devices(device->fs_devices); + free_fs_devices(cur_devices); } /* * at this point, the device is zero sized. We want to * remove it from the devices list and zero out the old super */ - if (device->writeable) { + if (clear_super) { /* make sure this device isn't detected as part of * the FS anymore */ @@ -1365,8 +1399,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) sync_dirty_buffer(bh); } - kfree(device->name); - kfree(device); ret = 0; error_brelse: @@ -1425,7 +1457,8 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, mutex_init(&seed_devices->device_list_mutex); mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_splice_init(&fs_devices->devices, &seed_devices->devices); + list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, + synchronize_rcu); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); @@ -1624,7 +1657,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) * half setup */ mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_add(&device->dev_list, &root->fs_info->fs_devices->devices); + list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); root->fs_info->fs_devices->num_devices++; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..f1b2e4f53fc2 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -86,6 +86,8 @@ struct btrfs_device { u8 uuid[BTRFS_UUID_SIZE]; struct btrfs_work work; + struct rcu_head rcu; + struct work_struct rcu_work; }; struct btrfs_fs_devices { -- cgit v1.2.2 From 90a887c9a2e25bcb1fc658fad59dfbc6fb792734 Mon Sep 17 00:00:00 2001 From: Dan Magenheimer Date: Thu, 26 May 2011 10:01:56 -0600 Subject: btrfs: add cleancache support This sixth patch of eight in this cleancache series "opts-in" cleancache for btrfs. Filesystems must explicitly enable cleancache by calling cleancache_init_fs anytime an instance of the filesystem is mounted. Btrfs uses its own readpage which must be hooked, but all other cleancache hooks are in the VFS layer including the matching cleancache_flush_fs hook which must be called on unmount. Details and a FAQ can be found in Documentation/vm/cleancache.txt [v6-v8: no changes] [v5: jeremy@goop.org: simplify init hook and any future fs init changes] Signed-off-by: Dan Magenheimer Signed-off-by: Chris Mason Reviewed-by: Jeremy Fitzhardinge Reviewed-by: Konrad Rzeszutek Wilk Cc: Andrew Morton Cc: Al Viro Cc: Matthew Wilcox Cc: Nick Piggin Cc: Mel Gorman Cc: Rik Van Riel Cc: Jan Beulich Cc: Andreas Dilger Cc: Ted Ts'o Cc: Mark Fasheh Cc: Joel Becker Cc: Nitin Gupta --- fs/btrfs/extent_io.c | 9 +++++++++ fs/btrfs/super.c | 2 ++ 2 files changed, 11 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31b..0cee46e01081 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "extent_io.h" #include "extent_map.h" #include "compat.h" @@ -2015,6 +2016,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree, set_page_extent_mapped(page); + if (!PageUptodate(page)) { + if (cleancache_get_page(page) == 0) { + BUG_ON(blocksize != PAGE_SIZE); + goto out; + } + } + end = page_end; while (1) { lock_extent(tree, start, end, GFP_NOFS); @@ -2148,6 +2156,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, cur = cur + iosize; page_offset += iosize; } +out: if (!nr) { if (!PageError(page)) SetPageUptodate(page); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..be4ffa12f3ef 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -624,6 +625,7 @@ static int btrfs_fill_super(struct super_block *sb, sb->s_root = root_dentry; save_mount_options(sb, data); + cleancache_init_fs(sb); return 0; fail_close: -- cgit v1.2.2 From 4cb5300bc839b8a943eb19c9f27f25470e22d0ca Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 24 May 2011 15:35:30 -0400 Subject: Btrfs: add mount -o auto_defrag This will detect small random writes into files and queue the up for an auto defrag process. It isn't well suited to database workloads yet, but works for smaller files such as rpm, sqlite or bdb databases. Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 1 + fs/btrfs/ctree.h | 45 ++++- fs/btrfs/disk-io.c | 12 ++ fs/btrfs/file.c | 257 ++++++++++++++++++++++++++++ fs/btrfs/inode.c | 12 ++ fs/btrfs/ioctl.c | 448 ++++++++++++++++++++++++++++++++++++++----------- fs/btrfs/ioctl.h | 31 ---- fs/btrfs/super.c | 7 +- 8 files changed, 678 insertions(+), 135 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index d0b0e43a6a8b..93b1aa932014 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -153,6 +153,7 @@ struct btrfs_inode { unsigned ordered_data_close:1; unsigned orphan_meta_reserved:1; unsigned dummy_inode:1; + unsigned in_defrag:1; /* * always compress this one file diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 026fc47b42cf..332323e19dd1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1074,6 +1074,11 @@ struct btrfs_fs_info { /* all metadata allocations go through this cluster */ struct btrfs_free_cluster meta_alloc_cluster; + /* auto defrag inodes go here */ + spinlock_t defrag_inodes_lock; + struct rb_root defrag_inodes; + atomic_t defrag_running; + spinlock_t ref_cache_lock; u64 total_ref_cache_size; @@ -1205,6 +1210,38 @@ struct btrfs_root { struct super_block anon_super; }; +struct btrfs_ioctl_defrag_range_args { + /* start of the defrag operation */ + __u64 start; + + /* number of bytes to defrag, use (u64)-1 to say all */ + __u64 len; + + /* + * flags for the operation, which can include turning + * on compression for this one defrag + */ + __u64 flags; + + /* + * any extent bigger than this will be considered + * already defragged. Use 0 to take the kernel default + * Use 1 to say every single extent must be rewritten + */ + __u32 extent_thresh; + + /* + * which compression method to use if turning on compression + * for this defrag operation. If unspecified, zlib will + * be used + */ + __u32 compress_type; + + /* spare for later */ + __u32 unused[4]; +}; + + /* * inode items have the data typically returned from stat and store other * info about object characteristics. There is one for every file and dir in @@ -1302,6 +1339,7 @@ struct btrfs_root { #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) +#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -2528,8 +2566,13 @@ extern const struct dentry_operations btrfs_dentry_operations; long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); void btrfs_update_iflags(struct inode *inode); void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); - +int btrfs_defrag_file(struct inode *inode, struct file *file, + struct btrfs_ioctl_defrag_range_args *range, + u64 newer_than, unsigned long max_pages); /* file.c */ +int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, + struct inode *inode); +int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_sync_file(struct file *file, int datasync); int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 16d335b342a2..b2588a552658 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1475,6 +1475,7 @@ static int cleaner_kthread(void *arg) btrfs_run_delayed_iputs(root); btrfs_clean_old_snapshots(root); mutex_unlock(&root->fs_info->cleaner_mutex); + btrfs_run_defrag_inodes(root->fs_info); } if (freezing(current)) { @@ -1616,6 +1617,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, spin_lock_init(&fs_info->ref_cache_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); + spin_lock_init(&fs_info->defrag_inodes_lock); init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; @@ -1638,9 +1640,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); + atomic_set(&fs_info->defrag_running, 0); fs_info->sb = sb; fs_info->max_inline = 8192 * 1024; fs_info->metadata_ratio = 0; + fs_info->defrag_inodes = RB_ROOT; fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); @@ -2501,6 +2505,14 @@ int close_ctree(struct btrfs_root *root) smp_mb(); btrfs_scrub_cancel(root); + + /* wait for any defraggers to finish */ + wait_event(fs_info->transaction_wait, + (atomic_read(&fs_info->defrag_running) == 0)); + + /* clear out the rbtree of defraggable inodes */ + btrfs_run_defrag_inodes(root->fs_info); + btrfs_put_block_group_cache(fs_info); /* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 58ddc4442159..c6a22d783c35 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -40,6 +40,263 @@ #include "locking.h" #include "compat.h" +/* + * when auto defrag is enabled we + * queue up these defrag structs to remember which + * inodes need defragging passes + */ +struct inode_defrag { + struct rb_node rb_node; + /* objectid */ + u64 ino; + /* + * transid where the defrag was added, we search for + * extents newer than this + */ + u64 transid; + + /* root objectid */ + u64 root; + + /* last offset we were able to defrag */ + u64 last_offset; + + /* if we've wrapped around back to zero once already */ + int cycled; +}; + +/* pop a record for an inode into the defrag tree. The lock + * must be held already + * + * If you're inserting a record for an older transid than an + * existing record, the transid already in the tree is lowered + * + * If an existing record is found the defrag item you + * pass in is freed + */ +static int __btrfs_add_inode_defrag(struct inode *inode, + struct inode_defrag *defrag) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct inode_defrag *entry; + struct rb_node **p; + struct rb_node *parent = NULL; + + p = &root->fs_info->defrag_inodes.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct inode_defrag, rb_node); + + if (defrag->ino < entry->ino) + p = &parent->rb_left; + else if (defrag->ino > entry->ino) + p = &parent->rb_right; + else { + /* if we're reinserting an entry for + * an old defrag run, make sure to + * lower the transid of our existing record + */ + if (defrag->transid < entry->transid) + entry->transid = defrag->transid; + if (defrag->last_offset > entry->last_offset) + entry->last_offset = defrag->last_offset; + goto exists; + } + } + BTRFS_I(inode)->in_defrag = 1; + rb_link_node(&defrag->rb_node, parent, p); + rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); + return 0; + +exists: + kfree(defrag); + return 0; + +} + +/* + * insert a defrag record for this inode if auto defrag is + * enabled + */ +int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct inode_defrag *defrag; + int ret = 0; + u64 transid; + + if (!btrfs_test_opt(root, AUTO_DEFRAG)) + return 0; + + if (root->fs_info->closing) + return 0; + + if (BTRFS_I(inode)->in_defrag) + return 0; + + if (trans) + transid = trans->transid; + else + transid = BTRFS_I(inode)->root->last_trans; + + defrag = kzalloc(sizeof(*defrag), GFP_NOFS); + if (!defrag) + return -ENOMEM; + + defrag->ino = inode->i_ino; + defrag->transid = transid; + defrag->root = root->root_key.objectid; + + spin_lock(&root->fs_info->defrag_inodes_lock); + if (!BTRFS_I(inode)->in_defrag) + ret = __btrfs_add_inode_defrag(inode, defrag); + spin_unlock(&root->fs_info->defrag_inodes_lock); + return ret; +} + +/* + * must be called with the defrag_inodes lock held + */ +struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino, + struct rb_node **next) +{ + struct inode_defrag *entry = NULL; + struct rb_node *p; + struct rb_node *parent = NULL; + + p = info->defrag_inodes.rb_node; + while (p) { + parent = p; + entry = rb_entry(parent, struct inode_defrag, rb_node); + + if (ino < entry->ino) + p = parent->rb_left; + else if (ino > entry->ino) + p = parent->rb_right; + else + return entry; + } + + if (next) { + while (parent && ino > entry->ino) { + parent = rb_next(parent); + entry = rb_entry(parent, struct inode_defrag, rb_node); + } + *next = parent; + } + return NULL; +} + +/* + * run through the list of inodes in the FS that need + * defragging + */ +int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) +{ + struct inode_defrag *defrag; + struct btrfs_root *inode_root; + struct inode *inode; + struct rb_node *n; + struct btrfs_key key; + struct btrfs_ioctl_defrag_range_args range; + u64 first_ino = 0; + int num_defrag; + int defrag_batch = 1024; + + memset(&range, 0, sizeof(range)); + range.len = (u64)-1; + + atomic_inc(&fs_info->defrag_running); + spin_lock(&fs_info->defrag_inodes_lock); + while(1) { + n = NULL; + + /* find an inode to defrag */ + defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n); + if (!defrag) { + if (n) + defrag = rb_entry(n, struct inode_defrag, rb_node); + else if (first_ino) { + first_ino = 0; + continue; + } else { + break; + } + } + + /* remove it from the rbtree */ + first_ino = defrag->ino + 1; + rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); + + if (fs_info->closing) + goto next_free; + + spin_unlock(&fs_info->defrag_inodes_lock); + + /* get the inode */ + key.objectid = defrag->root; + btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); + key.offset = (u64)-1; + inode_root = btrfs_read_fs_root_no_name(fs_info, &key); + if (IS_ERR(inode_root)) + goto next; + + key.objectid = defrag->ino; + btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); + key.offset = 0; + + inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); + if (IS_ERR(inode)) + goto next; + + /* do a chunk of defrag */ + BTRFS_I(inode)->in_defrag = 0; + range.start = defrag->last_offset; + num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, + defrag_batch); + /* + * if we filled the whole defrag batch, there + * must be more work to do. Queue this defrag + * again + */ + if (num_defrag == defrag_batch) { + defrag->last_offset = range.start; + __btrfs_add_inode_defrag(inode, defrag); + /* + * we don't want to kfree defrag, we added it back to + * the rbtree + */ + defrag = NULL; + } else if (defrag->last_offset && !defrag->cycled) { + /* + * we didn't fill our defrag batch, but + * we didn't start at zero. Make sure we loop + * around to the start of the file. + */ + defrag->last_offset = 0; + defrag->cycled = 1; + __btrfs_add_inode_defrag(inode, defrag); + defrag = NULL; + } + + iput(inode); +next: + spin_lock(&fs_info->defrag_inodes_lock); +next_free: + kfree(defrag); + } + spin_unlock(&fs_info->defrag_inodes_lock); + + atomic_dec(&fs_info->defrag_running); + + /* + * during unmount, we use the transaction_wait queue to + * wait for the defragger to stop + */ + wake_up(&fs_info->transaction_wait); + return 0; +} /* simple helper to fault in pages and copy. This should go away * and be replaced with calls into generic code. diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d378f8b70ef7..bb51bb1fa44f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -342,6 +342,10 @@ static noinline int compress_file_range(struct inode *inode, int will_compress; int compress_type = root->fs_info->compress_type; + /* if this is a small write inside eof, kick off a defragbot */ + if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024) + btrfs_add_inode_defrag(NULL, inode); + actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; @@ -799,6 +803,10 @@ static noinline int cow_file_range(struct inode *inode, disk_num_bytes = num_bytes; ret = 0; + /* if this is a small write inside eof, kick off defrag */ + if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024) + btrfs_add_inode_defrag(trans, inode); + if (start == 0) { /* lets try to make an inline extent */ ret = cow_file_range_inline(trans, root, inode, @@ -5371,6 +5379,9 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, if (IS_ERR(trans)) return ERR_CAST(trans); + if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024) + btrfs_add_inode_defrag(trans, inode); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; alloc_hint = get_extent_allocation_hint(inode, start, len); @@ -6682,6 +6693,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->ordered_data_close = 0; ei->orphan_meta_reserved = 0; ei->dummy_inode = 0; + ei->in_defrag = 0; ei->force_compress = BTRFS_COMPRESS_NONE; ei->delayed_node = NULL; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index c4f17e4e2c9c..85e818ce00c5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -656,6 +656,106 @@ out_unlock: return error; } +/* + * When we're defragging a range, we don't want to kick it off again + * if it is really just waiting for delalloc to send it down. + * If we find a nice big extent or delalloc range for the bytes in the + * file you want to defrag, we return 0 to let you know to skip this + * part of the file + */ +static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh) +{ + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map *em = NULL; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + u64 end; + + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); + read_unlock(&em_tree->lock); + + if (em) { + end = extent_map_end(em); + free_extent_map(em); + if (end - offset > thresh) + return 0; + } + /* if we already have a nice delalloc here, just stop */ + thresh /= 2; + end = count_range_bits(io_tree, &offset, offset + thresh, + thresh, EXTENT_DELALLOC, 1); + if (end >= thresh) + return 0; + return 1; +} + +/* + * helper function to walk through a file and find extents + * newer than a specific transid, and smaller than thresh. + * + * This is used by the defragging code to find new and small + * extents + */ +static int find_new_extents(struct btrfs_root *root, + struct inode *inode, u64 newer_than, + u64 *off, int thresh) +{ + struct btrfs_path *path; + struct btrfs_key min_key; + struct btrfs_key max_key; + struct extent_buffer *leaf; + struct btrfs_file_extent_item *extent; + int type; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + min_key.objectid = inode->i_ino; + min_key.type = BTRFS_EXTENT_DATA_KEY; + min_key.offset = *off; + + max_key.objectid = inode->i_ino; + max_key.type = (u8)-1; + max_key.offset = (u64)-1; + + path->keep_locks = 1; + + while(1) { + ret = btrfs_search_forward(root, &min_key, &max_key, + path, 0, newer_than); + if (ret != 0) + goto none; + if (min_key.objectid != inode->i_ino) + goto none; + if (min_key.type != BTRFS_EXTENT_DATA_KEY) + goto none; + + leaf = path->nodes[0]; + extent = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + + type = btrfs_file_extent_type(leaf, extent); + if (type == BTRFS_FILE_EXTENT_REG && + btrfs_file_extent_num_bytes(leaf, extent) < thresh && + check_defrag_in_cache(inode, min_key.offset, thresh)) { + *off = min_key.offset; + btrfs_free_path(path); + return 0; + } + + if (min_key.offset == (u64)-1) + goto none; + + min_key.offset++; + btrfs_release_path(path); + } +none: + btrfs_free_path(path); + return -ENOENT; +} + static int should_defrag_range(struct inode *inode, u64 start, u64 len, int thresh, u64 *last_len, u64 *skip, u64 *defrag_end) @@ -665,10 +765,6 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 1; - - if (thresh == 0) - thresh = 256 * 1024; - /* * make sure that once we start defragging and extent, we keep on * defragging it @@ -727,27 +823,176 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, return ret; } -static int btrfs_defrag_file(struct file *file, - struct btrfs_ioctl_defrag_range_args *range) +/* + * it doesn't do much good to defrag one or two pages + * at a time. This pulls in a nice chunk of pages + * to COW and defrag. + * + * It also makes sure the delalloc code has enough + * dirty data to avoid making new small extents as part + * of the defrag + * + * It's a good idea to start RA on this range + * before calling this. + */ +static int cluster_pages_for_defrag(struct inode *inode, + struct page **pages, + unsigned long start_index, + int num_pages) { - struct inode *inode = fdentry(file)->d_inode; - struct btrfs_root *root = BTRFS_I(inode)->root; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + unsigned long file_end; + u64 isize = i_size_read(inode); + u64 page_start; + u64 page_end; + int ret; + int i; + int i_done; struct btrfs_ordered_extent *ordered; - struct page *page; + struct extent_state *cached_state = NULL; + + if (isize == 0) + return 0; + file_end = (isize - 1) >> PAGE_CACHE_SHIFT; + + ret = btrfs_delalloc_reserve_space(inode, + num_pages << PAGE_CACHE_SHIFT); + if (ret) + return ret; +again: + ret = 0; + i_done = 0; + + /* step one, lock all the pages */ + for (i = 0; i < num_pages; i++) { + struct page *page; + page = grab_cache_page(inode->i_mapping, + start_index + i); + if (!page) + break; + + if (!PageUptodate(page)) { + btrfs_readpage(NULL, page); + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + page_cache_release(page); + ret = -EIO; + break; + } + } + isize = i_size_read(inode); + file_end = (isize - 1) >> PAGE_CACHE_SHIFT; + if (!isize || page->index > file_end || + page->mapping != inode->i_mapping) { + /* whoops, we blew past eof, skip this page */ + unlock_page(page); + page_cache_release(page); + break; + } + pages[i] = page; + i_done++; + } + if (!i_done || ret) + goto out; + + if (!(inode->i_sb->s_flags & MS_ACTIVE)) + goto out; + + /* + * so now we have a nice long stream of locked + * and up to date pages, lets wait on them + */ + for (i = 0; i < i_done; i++) + wait_on_page_writeback(pages[i]); + + page_start = page_offset(pages[0]); + page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; + + lock_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, 0, &cached_state, + GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1); + if (ordered && + ordered->file_offset + ordered->len > page_start && + ordered->file_offset < page_end) { + btrfs_put_ordered_extent(ordered); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, + &cached_state, GFP_NOFS); + for (i = 0; i < i_done; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + btrfs_wait_ordered_range(inode, page_start, + page_end - page_start); + goto again; + } + if (ordered) + btrfs_put_ordered_extent(ordered); + + clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, + page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, + GFP_NOFS); + + if (i_done != num_pages) { + atomic_inc(&BTRFS_I(inode)->outstanding_extents); + btrfs_delalloc_release_space(inode, + (num_pages - i_done) << PAGE_CACHE_SHIFT); + } + + + btrfs_set_extent_delalloc(inode, page_start, page_end - 1, + &cached_state); + + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, &cached_state, + GFP_NOFS); + + for (i = 0; i < i_done; i++) { + clear_page_dirty_for_io(pages[i]); + ClearPageChecked(pages[i]); + set_page_extent_mapped(pages[i]); + set_page_dirty(pages[i]); + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + return i_done; +out: + for (i = 0; i < i_done; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT); + return ret; + +} + +int btrfs_defrag_file(struct inode *inode, struct file *file, + struct btrfs_ioctl_defrag_range_args *range, + u64 newer_than, unsigned long max_to_defrag) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *disk_super; + struct file_ra_state *ra = NULL; unsigned long last_index; - unsigned long ra_pages = root->fs_info->bdi.ra_pages; - unsigned long total_read = 0; u64 features; - u64 page_start; - u64 page_end; u64 last_len = 0; u64 skip = 0; u64 defrag_end = 0; + u64 newer_off = range->start; + int newer_left = 0; unsigned long i; int ret; + int defrag_count = 0; int compress_type = BTRFS_COMPRESS_ZLIB; + int extent_thresh = range->extent_thresh; + int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT; + u64 new_align = ~((u64)128 * 1024 - 1); + struct page **pages = NULL; + + if (extent_thresh == 0) + extent_thresh = 256 * 1024; if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { if (range->compress_type > BTRFS_COMPRESS_TYPES) @@ -759,6 +1004,27 @@ static int btrfs_defrag_file(struct file *file, if (inode->i_size == 0) return 0; + /* + * if we were not given a file, allocate a readahead + * context + */ + if (!file) { + ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; + file_ra_state_init(ra, inode->i_mapping); + } else { + ra = &file->f_ra; + } + + pages = kmalloc(sizeof(struct page *) * newer_cluster, + GFP_NOFS); + if (!pages) { + ret = -ENOMEM; + goto out_ra; + } + + /* find the last page to defrag */ if (range->start + range->len > range->start) { last_index = min_t(u64, inode->i_size - 1, range->start + range->len - 1) >> PAGE_CACHE_SHIFT; @@ -766,11 +1032,37 @@ static int btrfs_defrag_file(struct file *file, last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; } - i = range->start >> PAGE_CACHE_SHIFT; - while (i <= last_index) { - if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, + if (newer_than) { + ret = find_new_extents(root, inode, newer_than, + &newer_off, 64 * 1024); + if (!ret) { + range->start = newer_off; + /* + * we always align our defrag to help keep + * the extents in the file evenly spaced + */ + i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + newer_left = newer_cluster; + } else + goto out_ra; + } else { + i = range->start >> PAGE_CACHE_SHIFT; + } + if (!max_to_defrag) + max_to_defrag = last_index - 1; + + while (i <= last_index && defrag_count < max_to_defrag) { + /* + * make sure we stop running if someone unmounts + * the FS + */ + if (!(inode->i_sb->s_flags & MS_ACTIVE)) + break; + + if (!newer_than && + !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, - range->extent_thresh, + extent_thresh, &last_len, &skip, &defrag_end)) { unsigned long next; @@ -782,92 +1074,39 @@ static int btrfs_defrag_file(struct file *file, i = max(i + 1, next); continue; } - - if (total_read % ra_pages == 0) { - btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, - min(last_index, i + ra_pages - 1)); - } - total_read++; - mutex_lock(&inode->i_mutex); if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) BTRFS_I(inode)->force_compress = compress_type; - ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); - if (ret) - goto err_unlock; -again: - if (inode->i_size == 0 || - i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { - ret = 0; - goto err_reservations; - } + btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster); - page = grab_cache_page(inode->i_mapping, i); - if (!page) { - ret = -ENOMEM; - goto err_reservations; - } - - if (!PageUptodate(page)) { - btrfs_readpage(NULL, page); - lock_page(page); - if (!PageUptodate(page)) { - unlock_page(page); - page_cache_release(page); - ret = -EIO; - goto err_reservations; - } - } - - if (page->mapping != inode->i_mapping) { - unlock_page(page); - page_cache_release(page); - goto again; - } - - wait_on_page_writeback(page); + ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster); + if (ret < 0) + goto out_ra; - if (PageDirty(page)) { - btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); - goto loop_unlock; - } + defrag_count += ret; + balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret); + i += ret; - page_start = (u64)page->index << PAGE_CACHE_SHIFT; - page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(io_tree, page_start, page_end, GFP_NOFS); + if (newer_than) { + if (newer_off == (u64)-1) + break; - ordered = btrfs_lookup_ordered_extent(inode, page_start); - if (ordered) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - goto again; + newer_off = max(newer_off + 1, + (u64)i << PAGE_CACHE_SHIFT); + + ret = find_new_extents(root, inode, + newer_than, &newer_off, + 64 * 1024); + if (!ret) { + range->start = newer_off; + i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + newer_left = newer_cluster; + } else { + break; + } + } else { + i++; } - set_page_extent_mapped(page); - - /* - * this makes sure page_mkwrite is called on the - * page if it is dirtied again later - */ - clear_page_dirty_for_io(page); - clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, - page_end, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, GFP_NOFS); - - btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); - ClearPageChecked(page); - set_page_dirty(page); - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - -loop_unlock: - unlock_page(page); - page_cache_release(page); - mutex_unlock(&inode->i_mutex); - - balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); - i++; } if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) @@ -899,12 +1138,14 @@ loop_unlock: btrfs_set_super_incompat_flags(disk_super, features); } - return 0; + if (!file) + kfree(ra); + return defrag_count; -err_reservations: - btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); -err_unlock: - mutex_unlock(&inode->i_mutex); +out_ra: + if (!file) + kfree(ra); + kfree(pages); return ret; } @@ -1756,7 +1997,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) /* the rest are all set to zero by kzalloc */ range->len = (u64)-1; } - ret = btrfs_defrag_file(file, range); + ret = btrfs_defrag_file(fdentry(file)->d_inode, file, + range, 0, 0); + if (ret > 0) + ret = 0; kfree(range); break; default: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index e5e0ee2cad4e..ad1ea789fcb4 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -181,37 +181,6 @@ struct btrfs_ioctl_clone_range_args { #define BTRFS_DEFRAG_RANGE_COMPRESS 1 #define BTRFS_DEFRAG_RANGE_START_IO 2 -struct btrfs_ioctl_defrag_range_args { - /* start of the defrag operation */ - __u64 start; - - /* number of bytes to defrag, use (u64)-1 to say all */ - __u64 len; - - /* - * flags for the operation, which can include turning - * on compression for this one defrag - */ - __u64 flags; - - /* - * any extent bigger than this will be considered - * already defragged. Use 0 to take the kernel default - * Use 1 to say every single extent must be rewritten - */ - __u32 extent_thresh; - - /* - * which compression method to use if turning on compression - * for this defrag operation. If unspecified, zlib will - * be used - */ - __u32 compress_type; - - /* spare for later */ - __u32 unused[4]; -}; - struct btrfs_ioctl_space_info { __u64 flags; __u64 total_bytes; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index cd0c7cd2c8fb..28e3cb2607ff 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -160,7 +160,7 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_subvolrootid, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, }; static match_table_t tokens = { @@ -191,6 +191,7 @@ static match_table_t tokens = { {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, {Opt_enospc_debug, "enospc_debug"}, {Opt_subvolrootid, "subvolrootid=%d"}, + {Opt_defrag, "autodefrag"}, {Opt_err, NULL}, }; @@ -369,6 +370,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_enospc_debug: btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); break; + case Opt_defrag: + printk(KERN_INFO "btrfs: enabling auto defrag"); + btrfs_set_opt(info->mount_opt, AUTO_DEFRAG); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); -- cgit v1.2.2 From c309df07868baa8b05d2a70637096465746fdbb5 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 26 May 2011 17:43:59 -0400 Subject: Btrfs: return -ENOMEM in clear_extent_bit The btrfs releasepage function depends on ENOMEM coming back when it is called atomic. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a90c4a12556b..0e0fe0f6ec75 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -485,7 +485,8 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); - BUG_ON(!prealloc); + if (!prealloc) + return -ENOMEM; } spin_lock(&tree->lock); -- cgit v1.2.2 From 00d01bc17cc2807292303961519d9c005794eb1d Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Wed, 25 May 2011 12:22:50 +0000 Subject: btrfs scrub: don't coalesce pages that are logically discontiguous scrub_page collects several pages into one bio as long as they are physically contiguous. As we only save one logical address for the whole bio, don't collect pages that are physically contiguous but logically discontiguous. Signed-off-by: Arne Jansen Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 87a2f1273136..6dfed0c27ac3 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -631,7 +631,8 @@ again: if (sbio->count == 0) { sbio->physical = physical; sbio->logical = logical; - } else if (sbio->physical + sbio->count * PAGE_SIZE != physical) { + } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || + sbio->logical + sbio->count * PAGE_SIZE != logical) { scrub_submit(sdev); goto again; } -- cgit v1.2.2 From a47d6b70e280401d553e7cac6f5750870de1ad21 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 May 2011 06:38:30 +0000 Subject: Btrfs: setup free ino caching in a more asynchronous way For a filesystem that has lots of files in it, the first time we mount it with free ino caching support, it can take quite a long time to setup the caching before we can create new files. Here we fill the cache with [highest_ino, BTRFS_LAST_FREE_OBJECTID] before we start the caching thread to search through the extent tree. Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/inode-map.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 000970512624..3262cd17a12f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -60,12 +60,12 @@ again: while (1) { smp_mb(); - if (fs_info->closing > 1) + if (fs_info->closing) goto out; leaf = path->nodes[0]; slot = path->slots[0]; - if (path->slots[0] >= btrfs_header_nritems(leaf)) { + if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; @@ -100,7 +100,7 @@ again: if (key.type != BTRFS_INODE_ITEM_KEY) goto next; - if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) + if (key.objectid >= root->highest_objectid) break; if (last != (u64)-1 && last + 1 != key.objectid) { @@ -114,9 +114,9 @@ next: path->slots[0]++; } - if (last < BTRFS_LAST_FREE_OBJECTID - 1) { + if (last < root->highest_objectid - 1) { __btrfs_add_free_space(ctl, last + 1, - BTRFS_LAST_FREE_OBJECTID - last - 1); + root->highest_objectid - last - 1); } spin_lock(&root->cache_lock); @@ -136,8 +136,10 @@ out: static void start_caching(struct btrfs_root *root) { + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct task_struct *tsk; int ret; + u64 objectid; spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { @@ -156,6 +158,19 @@ static void start_caching(struct btrfs_root *root) return; } + /* + * It can be quite time-consuming to fill the cache by searching + * through the extent tree, and this can keep ino allocation path + * waiting. Therefore at start we quickly find out the highest + * inode number and we know we can use inode numbers which fall in + * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID]. + */ + ret = btrfs_find_free_objectid(root, &objectid); + if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { + __btrfs_add_free_space(ctl, objectid, + BTRFS_LAST_FREE_OBJECTID - objectid + 1); + } + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", root->root_key.objectid); BUG_ON(IS_ERR(tsk)); @@ -209,7 +224,8 @@ again: start_caching(root); - if (objectid <= root->cache_progress) + if (objectid <= root->cache_progress || + objectid > root->highest_objectid) __btrfs_add_free_space(ctl, objectid, 1); else __btrfs_add_free_space(pinned, objectid, 1); -- cgit v1.2.2 From aa38572954ade525817fe88c54faebf85e5a61c0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 27 May 2011 06:53:02 -0400 Subject: fs: pass exact type of data dirties to ->dirty_inode Tell the filesystem if we just updated timestamp (I_DIRTY_SYNC) or anything else, so that the filesystem can track internally if it needs to push out a transaction for fdatasync or not. This is just the prototype change with no user for it yet. I plan to push large XFS changes for the next merge window, and getting this trivial infrastructure in this window would help a lot to avoid tree interdependencies. Also remove incorrect comments that ->dirty_inode can't block. That has been changed a long time ago, and many implementations rely on it. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/btrfs/ctree.h | 2 +- fs/btrfs/inode.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..d2177e7ad647 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2522,7 +2522,7 @@ int btrfs_readpage(struct file *file, struct page *page); void btrfs_evict_inode(struct inode *inode); void btrfs_put_inode(struct inode *inode); int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); -void btrfs_dirty_inode(struct inode *inode); +void btrfs_dirty_inode(struct inode *inode, int flags); struct inode *btrfs_alloc_inode(struct super_block *sb); void btrfs_destroy_inode(struct inode *inode); int btrfs_drop_inode(struct inode *inode); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..ecff7d7a505f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4396,7 +4396,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) * FIXME, needs more benchmarking...there are no reasons other than performance * to keep or drop this code. */ -void btrfs_dirty_inode(struct inode *inode) +void btrfs_dirty_inode(struct inode *inode, int flags) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; -- cgit v1.2.2 From 174ba50915b08dcfd07c8b5fb795b46a165fa09a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 27 May 2011 10:03:58 -0400 Subject: Btrfs: use the device_list_mutex during write_dev_supers write_dev_supers was changed to use RCU to protect the list of devices, but it was then sleeping while it actually wrote the supers. This fixes it to just use the mutex, since we really don't any concurrency in write_dev_supers anyway. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b2588a552658..98b6a71decba 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2310,7 +2310,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) sb = &root->fs_info->super_for_commit; dev_item = &sb->dev_item; - rcu_read_lock(); + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); head = &root->fs_info->fs_devices->devices; list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { @@ -2355,7 +2355,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) if (ret) total_errors++; } - rcu_read_unlock(); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); -- cgit v1.2.2 From 9e1f1de02c2275d7172e18dc4e7c2065777611bf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 3 Jun 2011 18:24:58 -0400 Subject: more conservative S_NOSEC handling Caching "we have already removed suid/caps" was overenthusiastic as merged. On network filesystems we might have had suid/caps set on another client, silently picked by this client on revalidate, all of that *without* clearing the S_NOSEC flag. AFAICS, the only reasonably sane way to deal with that is * new superblock flag; unless set, S_NOSEC is not going to be set. * local block filesystems set it in their ->mount() (more accurately, mount_bdev() does, so does btrfs ->mount(), users of mount_bdev() other than local block ones clear it) * if any network filesystem (or a cluster one) wants to use S_NOSEC, it'll need to set MS_NOSEC in sb->s_flags *AND* take care to clear S_NOSEC when inode attribute changes are picked from other clients. It's not an earth-shattering hole (anybody that can set suid on another client will almost certainly be able to write to the file before doing that anyway), but it's a bug that needs fixing. Signed-off-by: Al Viro --- fs/btrfs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 9b2e7e5bc3ef..d158b672a2d2 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -819,7 +819,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } else { char b[BDEVNAME_SIZE]; - s->s_flags = flags; + s->s_flags = flags | MS_NOSEC; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); error = btrfs_fill_super(s, fs_devices, data, flags & MS_SILENT ? 1 : 0); -- cgit v1.2.2 From 1bc8779349d6278e2713a1ff94418c2a6746a791 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Sat, 28 May 2011 21:57:55 +0200 Subject: btrfs: scrub: don't reuse bios and pages The current scrub implementation reuses bios and pages as often as possible, allocating them only on start and releasing them when finished. This leads to more problems with the block layer than it's worth. The elevator gets confused when there are more pages added to the bio than bi_size suggests. This patch completely rips out the reuse of bios and pages and allocates them freshly for each submit. Signed-off-by: Arne Jansen Signed-off-by: Chris Maosn --- fs/btrfs/scrub.c | 114 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 65 insertions(+), 49 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 6dfed0c27ac3..2d1f8909a8e1 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -117,33 +117,37 @@ static void scrub_free_csums(struct scrub_dev *sdev) } } +static void scrub_free_bio(struct bio *bio) +{ + int i; + struct page *last_page = NULL; + + if (!bio) + return; + + for (i = 0; i < bio->bi_vcnt; ++i) { + if (bio->bi_io_vec[i].bv_page == last_page) + continue; + last_page = bio->bi_io_vec[i].bv_page; + __free_page(last_page); + } + bio_put(bio); +} + static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) { int i; - int j; - struct page *last_page; if (!sdev) return; for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { struct scrub_bio *sbio = sdev->bios[i]; - struct bio *bio; if (!sbio) break; - bio = sbio->bio; - if (bio) { - last_page = NULL; - for (j = 0; j < bio->bi_vcnt; ++j) { - if (bio->bi_io_vec[j].bv_page == last_page) - continue; - last_page = bio->bi_io_vec[j].bv_page; - __free_page(last_page); - } - bio_put(bio); - } + scrub_free_bio(sbio->bio); kfree(sbio); } @@ -156,8 +160,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) { struct scrub_dev *sdev; int i; - int j; - int ret; struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; sdev = kzalloc(sizeof(*sdev), GFP_NOFS); @@ -165,7 +167,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) goto nomem; sdev->dev = dev; for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { - struct bio *bio; struct scrub_bio *sbio; sbio = kzalloc(sizeof(*sbio), GFP_NOFS); @@ -173,32 +174,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) goto nomem; sdev->bios[i] = sbio; - bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); - if (!bio) - goto nomem; - sbio->index = i; sbio->sdev = sdev; - sbio->bio = bio; sbio->count = 0; sbio->work.func = scrub_checksum; - bio->bi_private = sdev->bios[i]; - bio->bi_end_io = scrub_bio_end_io; - bio->bi_sector = 0; - bio->bi_bdev = dev->bdev; - bio->bi_size = 0; - - for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { - struct page *page; - page = alloc_page(GFP_NOFS); - if (!page) - goto nomem; - - ret = bio_add_page(bio, page, PAGE_SIZE, 0); - if (!ret) - goto nomem; - } - WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); if (i != SCRUB_BIOS_PER_DEV-1) sdev->bios[i]->next_free = i + 1; @@ -394,6 +373,7 @@ static void scrub_bio_end_io(struct bio *bio, int err) struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; sbio->err = err; + sbio->bio = bio; btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); } @@ -453,6 +433,8 @@ static void scrub_checksum(struct btrfs_work *work) } out: + scrub_free_bio(sbio->bio); + sbio->bio = NULL; spin_lock(&sdev->list_lock); sbio->next_free = sdev->first_free; sdev->first_free = sbio->index; @@ -583,25 +565,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) static int scrub_submit(struct scrub_dev *sdev) { struct scrub_bio *sbio; + struct bio *bio; + int i; if (sdev->curr == -1) return 0; sbio = sdev->bios[sdev->curr]; - sbio->bio->bi_sector = sbio->physical >> 9; - sbio->bio->bi_size = sbio->count * PAGE_SIZE; - sbio->bio->bi_next = NULL; - sbio->bio->bi_flags |= 1 << BIO_UPTODATE; - sbio->bio->bi_comp_cpu = -1; - sbio->bio->bi_bdev = sdev->dev->bdev; + bio = bio_alloc(GFP_NOFS, sbio->count); + if (!bio) + goto nomem; + + bio->bi_private = sbio; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_bdev = sdev->dev->bdev; + bio->bi_sector = sbio->physical >> 9; + + for (i = 0; i < sbio->count; ++i) { + struct page *page; + int ret; + + page = alloc_page(GFP_NOFS); + if (!page) + goto nomem; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) { + __free_page(page); + goto nomem; + } + } + sbio->err = 0; sdev->curr = -1; atomic_inc(&sdev->in_flight); - submit_bio(0, sbio->bio); + submit_bio(READ, bio); return 0; + +nomem: + scrub_free_bio(bio); + + return -ENOMEM; } static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, @@ -633,7 +640,11 @@ again: sbio->logical = logical; } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || sbio->logical + sbio->count * PAGE_SIZE != logical) { - scrub_submit(sdev); + int ret; + + ret = scrub_submit(sdev); + if (ret) + return ret; goto again; } sbio->spag[sbio->count].flags = flags; @@ -645,8 +656,13 @@ again: memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); } ++sbio->count; - if (sbio->count == SCRUB_PAGES_PER_BIO || force) - scrub_submit(sdev); + if (sbio->count == SCRUB_PAGES_PER_BIO || force) { + int ret; + + ret = scrub_submit(sdev); + if (ret) + return ret; + } return 0; } -- cgit v1.2.2 From 17aca1c987cff89dc4279371857035da902c8854 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 3 Jun 2011 01:13:45 -0400 Subject: Btrfs: fix uninit variable in the delayed inode code The nitems counter needs to start at zero Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index b46d94d1dea8..c61c32cf0f71 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -678,6 +678,7 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, INIT_LIST_HEAD(&head); next = item; + nitems = 0; /* * count the number of the continuous items that we can insert in batch -- cgit v1.2.2 From 211f96c24f117fcc6e9e2431e40d92f4de22625e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 3 Jun 2011 01:26:53 -0400 Subject: Btrfs: make sure we don't overflow the free space cache crc page The free space cache uses only one page for crcs right now, which means we can't have a cache file bigger than the crcs we can fit in the first page. This adds a check to enforce that restriction. Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index dd38d4c3a599..1cb72394498c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -590,10 +590,25 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + + /* Since the first page has all of our checksums and our generation we + * need to calculate the offset into the page that we can start writing + * our entries. + */ + first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); + filemap_write_and_wait(inode->i_mapping); btrfs_wait_ordered_range(inode, inode->i_size & ~(root->sectorsize - 1), (u64)-1); + /* make sure we don't overflow that first page */ + if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) { + /* this is really the same as running out of space, where we also return 0 */ + printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n"); + ret = 0; + goto out_update; + } + /* We need a checksum per page. */ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); if (!crc) @@ -605,12 +620,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, return -1; } - /* Since the first page has all of our checksums and our generation we - * need to calculate the offset into the page that we can start writing - * our entries. - */ - first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); - /* Get the cluster for this block_group if it exists */ if (block_group && !list_empty(&block_group->cluster_list)) cluster = list_entry(block_group->cluster_list.next, @@ -872,12 +881,14 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ret = 1; out_free: + kfree(checksums); + kfree(pages); + +out_update: if (ret != 1) { invalidate_inode_pages2_range(inode->i_mapping, 0, index); BTRFS_I(inode)->generation = 0; } - kfree(checksums); - kfree(pages); btrfs_update_inode(trans, root, inode); return ret; } -- cgit v1.2.2 From ca456ae280c0646e1e571c3b9a3834c55e90adfe Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 1 Jun 2011 09:42:49 +0000 Subject: Btrfs: don't save the inode cache in non-FS roots This adds extra checks to make sure the inode map we are caching really belongs to a FS root instead of a special relocation tree. It prevents crashes during balancing operations. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/inode-map.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 3262cd17a12f..04f7199facb4 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -388,6 +388,12 @@ int btrfs_save_ino_cache(struct btrfs_root *root, int prealloc; bool retry = false; + /* only fs tree and subvol/snap needs ino cache */ + if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID && + (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID || + root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID)) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; -- cgit v1.2.2 From 5f3f302a6f4cb74906c05fad1d03fc5e95c7e5af Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Mon, 30 May 2011 08:36:16 +0000 Subject: btrfs: false BUG_ON when degraded In degraded mode the struct btrfs_device of missing devs don't have device->name set. A kstrdup of NULL correctly returns NULL. Don't BUG in this case. Signed-off-by: Arne Jansen Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c48214ef5c09..da541dfca2e3 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -504,7 +504,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) BUG_ON(!new_device); memcpy(new_device, device, sizeof(*new_device)); new_device->name = kstrdup(device->name, GFP_NOFS); - BUG_ON(!new_device->name); + BUG_ON(device->name && !new_device->name); new_device->bdev = NULL; new_device->writeable = 0; new_device->in_fs_metadata = 0; -- cgit v1.2.2 From d132a538d258f8f52fd0cd8b5017755f4e915386 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 31 May 2011 19:33:33 +0000 Subject: Btrfs: don't save the inode cache if we are deleting this root With xfstest 254 I can panic the box every time with the inode number caching stuff on. This is because we clean the inodes out when we delete the subvolume, but then we write out the inode cache which adds an inode to the subvolume inode tree, and then when it gets evicted again the root gets added back on the dead roots list and is deleted again, so we have a double free. To stop this from happening just return 0 if refs is 0 (and we're not the tree root since tree root always has refs of 0). With this fix 254 no longer panics. Thanks, Signed-off-by: Josef Bacik Tested-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/inode-map.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 04f7199facb4..2d0d50067a7b 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -394,6 +394,11 @@ int btrfs_save_ino_cache(struct btrfs_root *root, root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID)) return 0; + /* Don't save inode cache if we are deleting this root */ + if (btrfs_root_refs(&root->root_item) == 0 && + root != root->fs_info->tree_root) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; -- cgit v1.2.2 From a4689d2bd3b00dcf5c4320f06e0ab88810fbff9c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 31 May 2011 17:08:14 +0000 Subject: btrfs: use btrfs_ino to access inode number commit 4cb5300bc ("Btrfs: add mount -o auto_defrag") accesses inode number directly while it should use the helper with the new inode number allocator. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/file.c | 2 +- fs/btrfs/ioctl.c | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e3a1b0c2394c..982b5ea9762f 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -144,7 +144,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!defrag) return -ENOMEM; - defrag->ino = inode->i_ino; + defrag->ino = btrfs_ino(inode); defrag->transid = transid; defrag->root = root->root_key.objectid; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 74c80595d707..ac37040e426a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -706,16 +706,17 @@ static int find_new_extents(struct btrfs_root *root, struct btrfs_file_extent_item *extent; int type; int ret; + u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = BTRFS_EXTENT_DATA_KEY; min_key.offset = *off; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; max_key.type = (u8)-1; max_key.offset = (u64)-1; @@ -726,7 +727,7 @@ static int find_new_extents(struct btrfs_root *root, path, 0, newer_than); if (ret != 0) goto none; - if (min_key.objectid != inode->i_ino) + if (min_key.objectid != ino) goto none; if (min_key.type != BTRFS_EXTENT_DATA_KEY) goto none; -- cgit v1.2.2 From e7786c3ae517b2c433edc91714e86be770e9f1ce Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Sat, 28 May 2011 20:58:38 +0000 Subject: btrfs: scrub: add explicit plugging With the removal of the implicit plugging scrub ends up doing more and smaller I/O than necessary. This patch adds explicit plugging per chunk. Signed-off-by: Arne Jansen Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2d1f8909a8e1..1204eab94028 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -348,9 +348,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, int ret; DECLARE_COMPLETION_ONSTACK(complete); - /* we are going to wait on this IO */ - rw |= REQ_SYNC; - bio = bio_alloc(GFP_NOFS, 1); bio->bi_bdev = bdev; bio->bi_sector = sector; @@ -359,6 +356,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, bio->bi_private = &complete; submit_bio(rw, bio); + /* this will also unplug the queue */ wait_for_completion(&complete); ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -743,6 +741,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; + struct blk_plug plug; u64 flags; int ret; int slot; @@ -847,6 +846,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, * the scrub. This might currently (crc32) end up to be about 1MB */ start_stripe = 0; + blk_start_plug(&plug); again: logical = base + offset + start_stripe * increment; for (i = start_stripe; i < nstripes; ++i) { @@ -988,6 +988,7 @@ next: scrub_submit(sdev); out: + blk_finish_plug(&plug); btrfs_free_path(path); return ret < 0 ? ret : 0; } -- cgit v1.2.2 From 4b9465cb9e3859186eefa1ca3b990a5849386320 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 3 Jun 2011 09:36:29 -0400 Subject: Btrfs: add mount -o inode_cache This makes the inode map cache default to off until we fix the overflow problem when the free space crcs don't fit inside a single page. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/free-space-cache.c | 6 ++++++ fs/btrfs/inode-map.c | 20 ++++++++++++++++++++ fs/btrfs/super.c | 8 +++++++- 4 files changed, 34 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f98c2005715..4958ef5417d6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1340,6 +1340,7 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) +#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 1cb72394498c..bffa5c4a633b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2536,6 +2536,9 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) int ret = 0; u64 root_gen = btrfs_root_generation(&root->root_item); + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return 0; + /* * If we're unmounting then just return, since this does a search on the * normal root and not the commit root and we could deadlock. @@ -2575,6 +2578,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, struct inode *inode; int ret; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return 0; + inode = lookup_free_ino_inode(root, path); if (IS_ERR(inode)) return 0; diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 2d0d50067a7b..cb79b8975c9f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -38,6 +38,9 @@ static int caching_kthread(void *data) int slot; int ret; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -141,6 +144,9 @@ static void start_caching(struct btrfs_root *root) int ret; u64 objectid; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return; + spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { spin_unlock(&root->cache_lock); @@ -178,6 +184,9 @@ static void start_caching(struct btrfs_root *root) int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) { + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return btrfs_find_free_objectid(root, objectid); + again: *objectid = btrfs_find_ino_for_alloc(root); @@ -201,6 +210,10 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid) { struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; + + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return; + again: if (root->cached == BTRFS_CACHE_FINISHED) { __btrfs_add_free_space(ctl, objectid, 1); @@ -250,6 +263,9 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) struct rb_node *n; u64 count; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return; + while (1) { n = rb_first(rbroot); if (!n) @@ -399,9 +415,13 @@ int btrfs_save_ino_cache(struct btrfs_root *root, root != root->fs_info->tree_root) return 0; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; + again: inode = lookup_free_ino_inode(root, path); if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 28e3cb2607ff..3559d0b3518a 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -160,7 +160,8 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, + Opt_inode_cache, Opt_err, }; static match_table_t tokens = { @@ -192,6 +193,7 @@ static match_table_t tokens = { {Opt_enospc_debug, "enospc_debug"}, {Opt_subvolrootid, "subvolrootid=%d"}, {Opt_defrag, "autodefrag"}, + {Opt_inode_cache, "inode_cache"}, {Opt_err, NULL}, }; @@ -360,6 +362,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) printk(KERN_INFO "btrfs: enabling disk space caching\n"); btrfs_set_opt(info->mount_opt, SPACE_CACHE); break; + case Opt_inode_cache: + printk(KERN_INFO "btrfs: enabling inode map caching\n"); + btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE); + break; case Opt_clear_cache: printk(KERN_INFO "btrfs: force clearing of disk cache\n"); btrfs_set_opt(info->mount_opt, CLEAR_CACHE); -- cgit v1.2.2 From 7841cb2898f66a73062c64d0ef5733dde7279e46 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 31 May 2011 18:07:27 +0200 Subject: btrfs: add helper for fs_info->closing wrap checking of filesystem 'closing' flag and fix a few missing memory barriers. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 9 +++++++++ fs/btrfs/extent-tree.c | 3 +-- fs/btrfs/file.c | 4 ++-- fs/btrfs/free-space-cache.c | 10 ++++------ fs/btrfs/inode-map.c | 3 +-- fs/btrfs/inode.c | 3 +-- fs/btrfs/scrub.c | 2 +- fs/btrfs/transaction.c | 2 +- 8 files changed, 20 insertions(+), 16 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4958ef5417d6..8490ee063709 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2354,6 +2354,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, struct extent_buffer *parent); +static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) +{ + /* + * Get synced with close_ctree() + */ + smp_mb(); + return fs_info->closing; +} + /* root-item.c */ int btrfs_find_root_ref(struct btrfs_root *tree_root, struct btrfs_path *path, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c9173a7827b0..5b9b6b6df242 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -366,8 +366,7 @@ again: nritems = btrfs_header_nritems(leaf); while (1) { - smp_mb(); - if (fs_info->closing > 1) { + if (btrfs_fs_closing(fs_info) > 1) { last = (u64)-1; break; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 982b5ea9762f..fa4ef18b66b1 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -129,7 +129,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!btrfs_test_opt(root, AUTO_DEFRAG)) return 0; - if (root->fs_info->closing) + if (btrfs_fs_closing(root->fs_info)) return 0; if (BTRFS_I(inode)->in_defrag) @@ -229,7 +229,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) first_ino = defrag->ino + 1; rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); - if (fs_info->closing) + if (btrfs_fs_closing(fs_info)) goto next_free; spin_unlock(&fs_info->defrag_inodes_lock); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index bffa5c4a633b..ad144736a5fd 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return inode; spin_lock(&block_group->lock); - if (!root->fs_info->closing) { + if (!btrfs_fs_closing(root->fs_info)) { block_group->inode = igrab(inode); block_group->iref = 1; } @@ -493,8 +493,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, * If we're unmounting then just return, since this does a search on the * normal root and not the commit root and we could deadlock. */ - smp_mb(); - if (fs_info->closing) + if (btrfs_fs_closing(fs_info)) return 0; /* @@ -2513,7 +2512,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root, return inode; spin_lock(&root->cache_lock); - if (!root->fs_info->closing) + if (!btrfs_fs_closing(root->fs_info)) root->cache_inode = igrab(inode); spin_unlock(&root->cache_lock); @@ -2543,8 +2542,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) * If we're unmounting then just return, since this does a search on the * normal root and not the commit root and we could deadlock. */ - smp_mb(); - if (fs_info->closing) + if (btrfs_fs_closing(fs_info)) return 0; path = btrfs_alloc_path(); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index cb79b8975c9f..b4087e0fa871 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -62,8 +62,7 @@ again: goto out; while (1) { - smp_mb(); - if (fs_info->closing) + if (btrfs_fs_closing(fs_info)) goto out; leaf = path->nodes[0]; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a83e44bf3206..02ff4a1b968b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4266,8 +4266,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) if (BTRFS_I(inode)->dummy_inode) return 0; - smp_mb(); - if (root->fs_info->closing && is_free_space_inode(root, inode)) + if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode)) nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 1204eab94028..df50fd1eca8f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1183,7 +1183,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, int ret; struct btrfs_device *dev; - if (root->fs_info->closing) + if (btrfs_fs_closing(root->fs_info)) return -EINVAL; /* diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2d5c6d2aa4e4..dd719662340e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -817,7 +817,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) btrfs_btree_balance_dirty(info->tree_root, nr); cond_resched(); - if (root->fs_info->closing || ret != -EAGAIN) + if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) break; } root->defrag_running = 0; -- cgit v1.2.2 From aa0467d8d2a00e75b2bb6a56a4ee6d70c5d1928f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 3 Jun 2011 16:29:08 +0200 Subject: btrfs: fix uninitialized variable warning With Linus' tree, today's linux-next build (powercp ppc64_defconfig) produced this warning: fs/btrfs/delayed-inode.c: In function 'btrfs_delayed_update_inode': fs/btrfs/delayed-inode.c:1598:6: warning: 'ret' may be used uninitialized in this function Introduced by commit 16cdcec736cd ("btrfs: implement delayed inode items operation"). This fixes a bug in btrfs_update_inode(): if the returned value from btrfs_delayed_update_inode is a nonzero garbage, inode stat data are not updated and several call paths may hit a BUG_ON or fail with strange code. Reported-by: Stephen Rothwell Signed-off-by: David Sterba --- fs/btrfs/delayed-inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index c61c32cf0f71..6462c29d2d37 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1595,7 +1595,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { struct btrfs_delayed_node *delayed_node; - int ret; + int ret = 0; delayed_node = btrfs_get_or_create_delayed_node(inode); if (IS_ERR(delayed_node)) -- cgit v1.2.2 From 86d4a77ba3dc4ace238a0556541a41df2bd71d49 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 25 May 2011 13:03:16 -0400 Subject: Btrfs: cache bitmaps when searching for a cluster If we are looking for a cluster in a particularly sparse or fragmented block group, we will do a lot of looping through the free space tree looking for various things, and if we need to look at bitmaps we will endup doing the whole dance twice. So instead add the bitmap entries to a temporary list so if we have to do the bitmap search we can just look through the list of entries we've found quickly instead of having to loop through the entire tree again. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 54 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 49 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ad144736a5fd..930c07f79b3d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2144,6 +2144,7 @@ again: */ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, + struct list_head *bitmaps, u64 offset, u64 bytes, u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2166,6 +2167,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, * extent entry. */ while (entry->bitmap) { + if (list_empty(&entry->list)) + list_add_tail(&entry->list, bitmaps); node = rb_next(&entry->offset_index); if (!node) return -ENOSPC; @@ -2185,8 +2188,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, return -ENOSPC; entry = rb_entry(node, struct btrfs_free_space, offset_index); - if (entry->bitmap) + if (entry->bitmap) { + if (list_empty(&entry->list)) + list_add_tail(&entry->list, bitmaps); continue; + } + /* * we haven't filled the empty size and the window is * very large. reset and try again @@ -2240,6 +2247,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, */ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, + struct list_head *bitmaps, u64 offset, u64 bytes, u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2250,10 +2258,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, if (ctl->total_bitmaps == 0) return -ENOSPC; + /* + * First check our cached list of bitmaps and see if there is an entry + * here that will work. + */ + list_for_each_entry(entry, bitmaps, list) { + if (entry->bytes < min_bytes) + continue; + ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, + bytes, min_bytes); + if (!ret) + return 0; + } + + /* + * If we do have entries on our list and we are here then we didn't find + * anything, so go ahead and get the next entry after the last entry in + * this list and start the search from there. + */ + if (!list_empty(bitmaps)) { + entry = list_entry(bitmaps->prev, struct btrfs_free_space, + list); + node = rb_next(&entry->offset_index); + if (!node) + return -ENOSPC; + entry = rb_entry(node, struct btrfs_free_space, offset_index); + goto search; + } + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); if (!entry) return -ENOSPC; +search: node = &entry->offset_index; do { entry = rb_entry(node, struct btrfs_free_space, offset_index); @@ -2284,6 +2321,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, u64 offset, u64 bytes, u64 empty_size) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct list_head bitmaps; + struct btrfs_free_space *entry, *tmp; u64 min_bytes; int ret; @@ -2322,11 +2361,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, goto out; } - ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes, - min_bytes); + INIT_LIST_HEAD(&bitmaps); + ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, + bytes, min_bytes); if (ret) - ret = setup_cluster_bitmap(block_group, cluster, offset, - bytes, min_bytes); + ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, + offset, bytes, min_bytes); + + /* Clear our temporary list */ + list_for_each_entry_safe(entry, tmp, &bitmaps, list) + list_del_init(&entry->list); if (!ret) { atomic_inc(&block_group->count); -- cgit v1.2.2 From 3de85bb95cc50d0977cbb7a0c605e894be4c790d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 25 May 2011 13:07:37 -0400 Subject: Btrfs: noinline the cluster searching functions When profiling the find cluster code it's hard to tell where we are spending our time because the bitmap and non-bitmap functions get inlined by the compiler, so make that not happen. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 930c07f79b3d..f56caacfd8ad 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2142,10 +2142,11 @@ again: /* * This searches the block group for just extents to fill the cluster with. */ -static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_cluster *cluster, - struct list_head *bitmaps, - u64 offset, u64 bytes, u64 min_bytes) +static noinline int +setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, + struct list_head *bitmaps, u64 offset, u64 bytes, + u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *first = NULL; @@ -2245,10 +2246,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, * This specifically looks for bitmaps that may work in the cluster, we assume * that we have already failed to find extents that will work. */ -static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_cluster *cluster, - struct list_head *bitmaps, - u64 offset, u64 bytes, u64 min_bytes) +static noinline int +setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, + struct list_head *bitmaps, u64 offset, u64 bytes, + u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; -- cgit v1.2.2 From f2bb8f5cfb3bce595b2de251ed7638047fc4e530 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 25 May 2011 13:10:16 -0400 Subject: Btrfs: don't commit the transaction if we dont have enough pinned bytes I noticed when running an enospc test that we would get stuck committing the transaction in check_data_space even though we truly didn't have enough space. So check to see if bytes_pinned is bigger than num_bytes, if it's not don't commit the transaction. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5b9b6b6df242..0d0a3fe77bb7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3089,6 +3089,13 @@ alloc: } goto again; } + + /* + * If we have less pinned bytes than we want to allocate then + * don't bother committing the transaction, it won't help us. + */ + if (data_sinfo->bytes_pinned < bytes) + committed = 1; spin_unlock(&data_sinfo->lock); /* commit the current transaction and try again */ -- cgit v1.2.2 From 2cdc342c204dba69ca3b2ec43d8e6ff41ed920b8 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 27 May 2011 14:07:49 -0400 Subject: Btrfs: fix bitmap regression In cleaning up the clustering code I accidently introduced a regression by adding bitmap entries to the cluster rb tree. The problem is if we've maxed out the number of bitmaps we can have for the block group we can only add free space to the bitmaps, but since the bitmap is on the cluster we can't find it and we try to create another one. This would result in a panic because the total bitmaps was bigger than the max bitmaps that were allowed. This patch fixes this by checking to see if we have a cluster, and then looking at the cluster rb tree to see if it has a bitmap entry and if it does and that space belongs to that bitmap, go ahead and add it to that bitmap. I could hit this panic every time with an fs_mark test within a couple of minutes. With this patch I no longer hit the panic and fs_mark goes to completion. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 88 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 69 insertions(+), 19 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f56caacfd8ad..8258ccf85dbd 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1417,6 +1417,23 @@ again: return 0; } +static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info, u64 offset, + u64 bytes) +{ + u64 bytes_to_set = 0; + u64 end; + + end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); + + bytes_to_set = min(end - offset, bytes); + + bitmap_set_bits(ctl, info, offset, bytes_to_set); + + return bytes_to_set; + +} + static bool use_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { @@ -1453,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, return true; } +static struct btrfs_free_space_op free_space_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { struct btrfs_free_space *bitmap_info; + struct btrfs_block_group_cache *block_group = NULL; int added = 0; - u64 bytes, offset, end; + u64 bytes, offset, bytes_added; int ret; bytes = info->bytes; @@ -1467,6 +1490,47 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, if (!ctl->op->use_bitmap(ctl, info)) return 0; + if (ctl->op == &free_space_op) + block_group = ctl->private; + + /* + * Since we link bitmaps right into the cluster we need to see if we + * have a cluster here, and if so and it has our bitmap we need to add + * the free space to that bitmap. + */ + if (block_group && !list_empty(&block_group->cluster_list)) { + struct btrfs_free_cluster *cluster; + struct rb_node *node; + struct btrfs_free_space *entry; + + cluster = list_entry(block_group->cluster_list.next, + struct btrfs_free_cluster, + block_group_list); + spin_lock(&cluster->lock); + node = rb_first(&cluster->root); + if (!node) { + spin_unlock(&cluster->lock); + goto again; + } + + entry = rb_entry(node, struct btrfs_free_space, offset_index); + if (!entry->bitmap) { + spin_unlock(&cluster->lock); + goto again; + } + + if (entry->offset == offset_to_bitmap(ctl, offset)) { + bytes_added = add_bytes_to_bitmap(ctl, entry, + offset, bytes); + bytes -= bytes_added; + offset += bytes_added; + } + spin_unlock(&cluster->lock); + if (!bytes) { + ret = 1; + goto out; + } + } again: bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); @@ -1475,19 +1539,10 @@ again: goto new_bitmap; } - end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); - - if (offset >= bitmap_info->offset && offset + bytes > end) { - bitmap_set_bits(ctl, bitmap_info, offset, end - offset); - bytes -= end - offset; - offset = end; - added = 0; - } else if (offset >= bitmap_info->offset && offset + bytes <= end) { - bitmap_set_bits(ctl, bitmap_info, offset, bytes); - bytes = 0; - } else { - BUG(); - } + bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); + bytes -= bytes_added; + offset += bytes_added; + added = 0; if (!bytes) { ret = 1; @@ -1766,11 +1821,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } -static struct btrfs_free_space_op free_space_op = { - .recalc_thresholds = recalculate_thresholds, - .use_bitmap = use_bitmap, -}; - void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; -- cgit v1.2.2 From 723bda2083d44edbd6be0f0b09f902120dc07442 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 27 May 2011 16:11:38 -0400 Subject: Btrfs: fix the allocator loop logic I was testing with empty_cluster = 0 to try and reproduce a problem and kept hitting early enospc panics. This was because our loop logic was a little confused. So this is what I did 1) Make the loop variable the ultimate decider on wether we should loop again isntead of checking to see if we had an uncached bg, empty size or empty cluster. 2) Increment loop before checking to see what we are on to make the loop definitions make more sense. 3) If we are on the chunk alloc loop don't set empty_size/empty_cluster to 0 unless we didn't actually allocate a chunk. If we did allocate a chunk we should be able to easily setup a new cluster so clearing empty_size/empty_cluster makes us less efficient. This kept me from hitting panics while trying to reproduce the other problem. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 48 +++++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 23 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0d0a3fe77bb7..b42efc2ded51 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5218,9 +5218,7 @@ loop: * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try * again */ - if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && - (found_uncached_bg || empty_size || empty_cluster || - allowed_chunk_alloc)) { + if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { index = 0; if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { found_uncached_bg = false; @@ -5260,32 +5258,36 @@ loop: goto search; } - if (loop < LOOP_CACHING_WAIT) { - loop++; - goto search; - } + loop++; if (loop == LOOP_ALLOC_CHUNK) { - empty_size = 0; - empty_cluster = 0; - } + if (allowed_chunk_alloc) { + ret = do_chunk_alloc(trans, root, num_bytes + + 2 * 1024 * 1024, data, + CHUNK_ALLOC_LIMITED); + allowed_chunk_alloc = 0; + if (ret == 1) + done_chunk_alloc = 1; + } else if (!done_chunk_alloc && + space_info->force_alloc == + CHUNK_ALLOC_NO_FORCE) { + space_info->force_alloc = CHUNK_ALLOC_LIMITED; + } - if (allowed_chunk_alloc) { - ret = do_chunk_alloc(trans, root, num_bytes + - 2 * 1024 * 1024, data, - CHUNK_ALLOC_LIMITED); - allowed_chunk_alloc = 0; - done_chunk_alloc = 1; - } else if (!done_chunk_alloc && - space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) { - space_info->force_alloc = CHUNK_ALLOC_LIMITED; + /* + * We didn't allocate a chunk, go ahead and drop the + * empty size and loop again. + */ + if (!done_chunk_alloc) + loop = LOOP_NO_EMPTY_SIZE; } - if (loop < LOOP_NO_EMPTY_SIZE) { - loop++; - goto search; + if (loop == LOOP_NO_EMPTY_SIZE) { + empty_size = 0; + empty_cluster = 0; } - ret = -ENOSPC; + + goto search; } else if (!ins->objectid) { ret = -ENOSPC; } else if (ins->objectid) { -- cgit v1.2.2 From f6a398298d34af66ec3a2d82a44a4dbc5277357d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 6 Jun 2011 10:50:35 -0400 Subject: Btrfs: fix duplicate checking logic When merging my code into the integration test the second check for duplicate entries got screwed up. This patch fixes it by dropping ret2 and just using ret for the return value, and checking if we got an error before adding the bitmap to the local list. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 8258ccf85dbd..38f3fd923043 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, pgoff_t index = 0; unsigned long first_page_offset; int num_checksums; - int ret = 0, ret2; + int ret = 0; INIT_LIST_HEAD(&bitmaps); @@ -421,11 +421,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, goto free_cache; } spin_lock(&ctl->tree_lock); - ret2 = link_free_space(ctl, e); + ret = link_free_space(ctl, e); ctl->total_bitmaps++; ctl->op->recalc_thresholds(ctl); spin_unlock(&ctl->tree_lock); - list_add_tail(&e->list, &bitmaps); if (ret) { printk(KERN_ERR "Duplicate entries in " "free space cache, dumping\n"); @@ -434,6 +433,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, page_cache_release(page); goto free_cache; } + list_add_tail(&e->list, &bitmaps); } num_entries--; -- cgit v1.2.2 From 25b8b936ed44814a5ce6fc3b2a21401f33cd56f6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 8 Jun 2011 14:36:54 -0400 Subject: Btrfs: don't map extent buffer if path->skip_locking is set Arne's scrub stuff exposed a problem with mapping the extent buffer in reada_for_search. He searches the commit root with multiple threads and with skip_locking set, so we can race and overwrite node->map_token since node isn't locked. So fix this so that we only map the extent buffer if we don't already have a map_token and skip_locking isn't set. Without this patch scrub would panic almost immediately, with the patch it doesn't panic anymore. Thanks, Reported-by: Arne Jansen Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d84089349c82..2e667868e0d2 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root, u32 nr; u32 blocksize; u32 nscan = 0; + bool map = true; if (level != 1) return; @@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; + if (node->map_token || path->skip_locking) + map = false; + while (1) { - if (!node->map_token) { + if (map && !node->map_token) { unsigned long offset = btrfs_node_key_ptr_offset(nr); map_private_extent_buffer(node, offset, sizeof(struct btrfs_key_ptr), @@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root, if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { gen = btrfs_node_ptr_generation(node, nr); - if (node->map_token) { + if (map && node->map_token) { unmap_extent_buffer(node, node->map_token, KM_USER1); node->map_token = NULL; @@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root, if ((nread > 65536 || nscan > 32)) break; } - if (node->map_token) { + if (map && node->map_token) { unmap_extent_buffer(node, node->map_token, KM_USER1); node->map_token = NULL; } -- cgit v1.2.2 From 3473f3c06a36865ae05993041fff35ee928342a7 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 9 Jun 2011 10:15:17 -0400 Subject: Btrfs: unlock the trans lock properly In btrfs_wait_for_commit if we came upon a transaction that had committed we just exited, but that's bad since we are holding the trans_lock. So break instead so that the lock is dropped. Thanks, Reported-by: David Sterba Signed-off-by: Josef Bacik --- fs/btrfs/transaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index dd719662340e..6b2e4786d189 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -349,7 +349,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) list) { if (t->in_commit) { if (t->commit_done) - goto out; + break; cur_trans = t; atomic_inc(&cur_trans->use_count); break; -- cgit v1.2.2 From ad3e34bba4b64ab8e1f5ea1a17768e1a0d9648ea Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 8 Jun 2011 14:45:50 -0400 Subject: Btrfs: don't map extent buffer if path->skip_locking is set Arne's scrub stuff exposed a problem with mapping the extent buffer in reada_for_search. He searches the commit root with multiple threads and with skip_locking set, so we can race and overwrite node->map_token since node isn't locked. So fix this so that we only map the extent buffer if we don't already have a map_token and skip_locking isn't set. Without this patch scrub would panic almost immediately, with the patch it doesn't panic anymore. Thanks, Reported-by: Arne Jansen Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d84089349c82..2e667868e0d2 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root, u32 nr; u32 blocksize; u32 nscan = 0; + bool map = true; if (level != 1) return; @@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; + if (node->map_token || path->skip_locking) + map = false; + while (1) { - if (!node->map_token) { + if (map && !node->map_token) { unsigned long offset = btrfs_node_key_ptr_offset(nr); map_private_extent_buffer(node, offset, sizeof(struct btrfs_key_ptr), @@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root, if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { gen = btrfs_node_ptr_generation(node, nr); - if (node->map_token) { + if (map && node->map_token) { unmap_extent_buffer(node, node->map_token, KM_USER1); node->map_token = NULL; @@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root, if ((nread > 65536 || nscan > 32)) break; } - if (node->map_token) { + if (map && node->map_token) { unmap_extent_buffer(node, node->map_token, KM_USER1); node->map_token = NULL; } -- cgit v1.2.2 From 8c51032f978bac5bec5dae0c5de4f85db97c1cc9 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Fri, 3 Jun 2011 10:09:26 +0200 Subject: btrfs: scrub: errors in tree enumeration due to the semantics of btrfs_search_slot the path can point to an invalid slot when ret > 0. This condition went unnoticed, which in turn could have led to an incomplete scrubbing. Signed-off-by: Arne Jansen --- fs/btrfs/scrub.c | 57 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 23 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index df50fd1eca8f..d5a4108cedaf 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -804,18 +804,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) - goto out; - - l = path->nodes[0]; - slot = path->slots[0]; - btrfs_item_key_to_cpu(l, &key, slot); - if (key.objectid != logical) { - ret = btrfs_previous_item(root, path, 0, - BTRFS_EXTENT_ITEM_KEY); - if (ret < 0) - goto out; - } + goto out_noplug; + /* + * we might miss half an extent here, but that doesn't matter, + * as it's only the prefetch + */ while (1) { l = path->nodes[0]; slot = path->slots[0]; @@ -824,7 +818,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, if (ret == 0) continue; if (ret < 0) - goto out; + goto out_noplug; break; } @@ -906,15 +900,20 @@ again: ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; - - l = path->nodes[0]; - slot = path->slots[0]; - btrfs_item_key_to_cpu(l, &key, slot); - if (key.objectid != logical) { + if (ret > 0) { ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY); if (ret < 0) goto out; + if (ret > 0) { + /* there's no smaller item, so stick with the + * larger one */ + btrfs_release_path(path); + ret = btrfs_search_slot(NULL, root, &key, + path, 0, 0); + if (ret < 0) + goto out; + } } while (1) { @@ -989,6 +988,7 @@ next: out: blk_finish_plug(&plug); +out_noplug: btrfs_free_path(path); return ret < 0 ? ret : 0; } @@ -1064,8 +1064,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) - goto out; - ret = 0; + break; + if (ret > 0) { + if (path->slots[0] >= + btrfs_header_nritems(path->nodes[0])) { + ret = btrfs_next_leaf(root, path); + if (ret) + break; + } + } l = path->nodes[0]; slot = path->slots[0]; @@ -1075,7 +1082,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) if (found_key.objectid != sdev->dev->devid) break; - if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) break; if (found_key.offset >= end) @@ -1104,7 +1111,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) cache = btrfs_lookup_block_group(fs_info, chunk_offset); if (!cache) { ret = -ENOENT; - goto out; + break; } ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, chunk_offset, length); @@ -1116,9 +1123,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) btrfs_release_path(path); } -out: btrfs_free_path(path); - return ret; + + /* + * ret can still be 1 from search_slot or next_leaf, + * that's not an error + */ + return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) -- cgit v1.2.2 From 632dd772fcbde2ba37c0e8983bd38ef4a1eac906 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Fri, 10 Jun 2011 12:07:07 +0200 Subject: btrfs: reinitialize scrub workers Scrub starts the workers each time a scrub starts and stops them after it finished. This patch adds an initialization for the workers before each start, otherwise the workers behave strangely. Signed-off-by: Arne Jansen --- fs/btrfs/disk-io.c | 2 -- fs/btrfs/scrub.c | 6 +++++- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a203d363184d..7bbbfebe47e4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1668,8 +1668,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, init_waitqueue_head(&fs_info->scrub_pause_wait); init_rwsem(&fs_info->scrub_super_lock); fs_info->scrub_workers_refcnt = 0; - btrfs_init_workers(&fs_info->scrub_workers, "scrub", - fs_info->thread_pool_size, &fs_info->generic_worker); sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index d5a4108cedaf..92cac19388ed 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1166,8 +1166,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) struct btrfs_fs_info *fs_info = root->fs_info; mutex_lock(&fs_info->scrub_lock); - if (fs_info->scrub_workers_refcnt == 0) + if (fs_info->scrub_workers_refcnt == 0) { + btrfs_init_workers(&fs_info->scrub_workers, "scrub", + fs_info->thread_pool_size, &fs_info->generic_worker); + fs_info->scrub_workers.idle_thresh = 4; btrfs_start_workers(&fs_info->scrub_workers, 1); + } ++fs_info->scrub_workers_refcnt; mutex_unlock(&fs_info->scrub_lock); -- cgit v1.2.2 From 6eef3125886df260ca0e8758d141308152226f6a Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Fri, 10 Jun 2011 13:04:58 +0200 Subject: btrfs: remove unneeded includes from scrub.c Signed-off-by: Arne Jansen --- fs/btrfs/scrub.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 92cac19388ed..a8d03d5efb5d 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -16,13 +16,7 @@ * Boston, MA 021110-1307, USA. */ -#include -#include -#include #include -#include -#include -#include #include "ctree.h" #include "volumes.h" #include "disk-io.h" -- cgit v1.2.2 From 38e87880666091fe9c572a7a2ed2e771d97ca5aa Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 10 Jun 2011 16:36:57 -0400 Subject: Btrfs: make sure to recheck for bitmaps in clusters Josef recently changed the free extent cache to look in the block group cluster for any bitmaps before trying to add a new bitmap for the same offset. This avoids BUG_ON()s due covering duplicate ranges. But it didn't go quite far enough. A given free range might span between one or more bitmaps or free space entries. The code has looping to cover this, but it doesn't check for clustered bitmaps every time. This shuffles our gotos to check for a bitmap in the cluster for every new bitmap entry we try to add. Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 38f3fd923043..9f985a429877 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1492,7 +1492,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, if (ctl->op == &free_space_op) block_group = ctl->private; - +again: /* * Since we link bitmaps right into the cluster we need to see if we * have a cluster here, and if so and it has our bitmap we need to add @@ -1510,13 +1510,13 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, node = rb_first(&cluster->root); if (!node) { spin_unlock(&cluster->lock); - goto again; + goto no_cluster_bitmap; } entry = rb_entry(node, struct btrfs_free_space, offset_index); if (!entry->bitmap) { spin_unlock(&cluster->lock); - goto again; + goto no_cluster_bitmap; } if (entry->offset == offset_to_bitmap(ctl, offset)) { @@ -1531,7 +1531,8 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, goto out; } } -again: + +no_cluster_bitmap: bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!bitmap_info) { -- cgit v1.2.2 From 38e880540f983045da7a00fbc50daad238207fc5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 10 Jun 2011 18:43:13 +0000 Subject: Btrfs: clear current->journal_info on async transaction commit Normally current->jouranl_info is cleared by commit_transaction. For an async snap or subvol creation, though, it runs in a work queue. Clear it in btrfs_commit_transaction_async() to avoid leaking a non-NULL journal_info when we return to userspace. When the actual commit runs in the other thread it won't care that it's current->journal_info is already NULL. Signed-off-by: Sage Weil Tested-by: Jim Schutt Signed-off-by: Chris Mason --- fs/btrfs/transaction.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6b2e4786d189..2b3590b9fe98 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1118,8 +1118,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, wait_current_trans_commit_start_and_unblock(root, cur_trans); else wait_current_trans_commit_start(root, cur_trans); - put_transaction(cur_trans); + if (current->journal_info == trans) + current->journal_info = NULL; + + put_transaction(cur_trans); return 0; } -- cgit v1.2.2 From 9eb9104c665aae2401a1723c044669eb10240072 Mon Sep 17 00:00:00 2001 From: richard kennedy Date: Tue, 7 Jun 2011 10:46:32 +0000 Subject: btrfs: remove 64bit alignment padding to allow extent_buffer to fit into one fewer cacheline Reorder extent_buffer to remove 8 bytes of alignment padding on 64 bit builds. This shrinks its size to 128 bytes allowing it to fit into one fewer cache lines and allows more objects per slab in its kmem_cache. slabinfo extent_buffer reports :- before:- Sizes (bytes) Slabs ---------------------------------- Object : 136 Total : 123 SlabObj: 136 Full : 121 SlabSiz: 4096 Partial: 0 Loss : 0 CpuSlab: 2 Align : 8 Objects: 30 after :- Object : 128 Total : 4 SlabObj: 128 Full : 2 SlabSiz: 4096 Partial: 0 Loss : 0 CpuSlab: 2 Align : 8 Objects: 32 Signed-off-by: Richard Kennedy Signed-off-by: Chris Mason --- fs/btrfs/extent_io.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 4e8445a4757c..a11a92ee2d30 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -126,9 +126,9 @@ struct extent_buffer { unsigned long map_len; struct page *first_page; unsigned long bflags; - atomic_t refs; struct list_head leak_list; struct rcu_head rcu_head; + atomic_t refs; /* the spinlock is used to protect most operations */ spinlock_t lock; -- cgit v1.2.2 From 027ed2f0044e95a97ed34db2d55a9ca95ba84385 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 8 Jun 2011 08:27:56 +0000 Subject: Btrfs: avoid stack bloat in btrfs_ioctl_fs_info() The size of struct btrfs_ioctl_fs_info_args is as big as 1KB, so don't declare the variable on stack. Signed-off-by: Li Zefan Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ac37040e426a..b793d112d1f6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2054,29 +2054,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) { - struct btrfs_ioctl_fs_info_args fi_args; + struct btrfs_ioctl_fs_info_args *fi_args; struct btrfs_device *device; struct btrfs_device *next; struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; + int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - fi_args.num_devices = fs_devices->num_devices; - fi_args.max_id = 0; - memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid)); + fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL); + if (!fi_args) + return -ENOMEM; + + fi_args->num_devices = fs_devices->num_devices; + memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid)); mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { - if (device->devid > fi_args.max_id) - fi_args.max_id = device->devid; + if (device->devid > fi_args->max_id) + fi_args->max_id = device->devid; } mutex_unlock(&fs_devices->device_list_mutex); - if (copy_to_user(arg, &fi_args, sizeof(fi_args))) - return -EFAULT; + if (copy_to_user(arg, fi_args, sizeof(*fi_args))) + ret = -EFAULT; - return 0; + kfree(fi_args); + return ret; } static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) -- cgit v1.2.2 From 5be76758f35ec6578e5b9b150aa513ac26bd9c54 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 9 Jun 2011 10:02:51 +0000 Subject: btrfs: fix unlocked access of delalloc_inodes list_splice_init will make delalloc_inodes empty, but without a spinlock around, this may produce corrupted list head, accessed in many placess, The race window is very tight and nobody seems to have hit it so far. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a203d363184d..33b744a5ac03 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2911,9 +2911,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) INIT_LIST_HEAD(&splice); - list_splice_init(&root->fs_info->delalloc_inodes, &splice); - spin_lock(&root->fs_info->delalloc_lock); + list_splice_init(&root->fs_info->delalloc_inodes, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, -- cgit v1.2.2 From 08d2f347e877e489ca098c87a6fd2e872fef9767 Mon Sep 17 00:00:00 2001 From: Jan Schmidt Date: Wed, 4 May 2011 16:18:50 +0200 Subject: Btrfs: fix extent state leak on failed nodatasum reads When encountering an EIO while reading from a nodatasum extent, we insert an error record into the inode's failure tree. btrfs_readpage_end_io_hook returns early for nodatasum inodes. We'd better clear the failure tree in that case, otherwise the kernel complains about BUG extent_state: Objects remaining on kmem_cache_close() on rmmod. Signed-off-by: Jan Schmidt Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 02ff4a1b968b..113913ae36e0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1986,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, } if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) - return 0; + goto good; if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { -- cgit v1.2.2 From 22b63a2971c5657dfc1bf4514f9410fc90c8b2c2 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 9 Feb 2011 16:05:31 +0200 Subject: Btrfs - use %pU to print fsid Get rid of FIXME comment. Uuids from dmesg are now the same as uuids given by btrfs-progs. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index da541dfca2e3..1efa56e18f9b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, transid = btrfs_super_generation(disk_super); if (disk_super->label[0]) printk(KERN_INFO "device label %s ", disk_super->label); - else { - /* FIXME, make a readl uuid parser */ - printk(KERN_INFO "device fsid %llx-%llx ", - *(unsigned long long *)disk_super->fsid, - *(unsigned long long *)(disk_super->fsid + 8)); - } + else + printk(KERN_INFO "device fsid %pU ", disk_super->fsid); printk(KERN_CONT "devid %llu transid %llu %s\n", (unsigned long long)devid, (unsigned long long)transid, path); ret = device_list_add(path, disk_super, devid, fs_devices_ret); -- cgit v1.2.2 From 30b4caf5d73af5c99cf1b2b46496d8bc35330992 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 8 Jun 2011 03:56:44 +0000 Subject: Btrfs: use join_transaction in btrfs_evict_inode() The WARN_ON() in start_transaction() was triggered while balancing. The cause is btrfs_relocate_chunk() started a transaction and then called iput() on the inode that stores free space cache, and iput() called btrfs_start_transaction() again. Reported-by: Tsutomu Itoh Signed-off-by: Li Zefan Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 113913ae36e0..c15636b17874 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3646,7 +3646,7 @@ void btrfs_evict_inode(struct inode *inode) btrfs_i_size_write(inode, 0); while (1) { - trans = btrfs_start_transaction(root, 0); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); trans->block_rsv = root->orphan_block_rsv; -- cgit v1.2.2 From ac08aedfa5d3de0dcb3825b598d16c2e57991f54 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Jun 2011 11:28:50 -0400 Subject: Btrfs: check the return value from set_anon_super Al Viro noticed we weren't checking for set_anon_super failures. This adds the required checks. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9f68c6898653..20c111b3fa0d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1312,7 +1312,9 @@ again: spin_lock_init(&root->cache_lock); init_waitqueue_head(&root->cache_wait); - set_anon_super(&root->anon_super, NULL); + ret = set_anon_super(&root->anon_super, NULL); + if (ret) + goto fail; if (btrfs_root_refs(&root->root_item) == 0) { ret = -ENOENT; -- cgit v1.2.2 From f4c44016218a6fce357715b9bbabbbbe1f69853c Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Jun 2011 11:30:47 -0400 Subject: Btrfs: drop the delalloc_bytes check in shrink_delalloc Even when delalloc_bytes is zero, we may need to sleep while waiting for delalloc space. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b42efc2ded51..1f61bf5b4960 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3314,10 +3314,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, if (reserved == 0) return 0; - /* nothing to shrink - nothing to reclaim */ - if (root->fs_info->delalloc_bytes == 0) - return 0; - max_reclaim = min(reserved, to_reclaim); while (loops < 1024) { -- cgit v1.2.2 From 71d7aed014457147e8f71a843d5fbf03235e4a85 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 14 Jun 2011 14:24:32 -0400 Subject: Btrfs: fix path leakage on subvol deletion The delayed ref patch accidently removed the btrfs_free_path in btrfs_unlink_subvol, this puts it back and means we don't leak a path. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c15636b17874..5813dec5101c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3076,6 +3076,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); + btrfs_free_path(path); return 0; } -- cgit v1.2.2 From 8351583e3f6e430ce8f71913909a96ad5cc6a2f6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 14 Jun 2011 15:16:14 -0400 Subject: Btrfs: protect the pending_snapshots list with trans_lock Currently there is nothing protecting the pending_snapshots list on the transaction. We only hold the directory mutex that we are snapshotting and a read lock on the subvol_sem, so we could race with somebody else creating a snapshot in a different directory and end up with list corruption. So protect this list with the trans_lock. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ioctl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b793d112d1f6..a3c4751e07db 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -482,8 +482,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); BUG_ON(ret); + spin_lock(&root->fs_info->trans_lock); list_add(&pending_snapshot->list, &trans->transaction->pending_snapshots); + spin_unlock(&root->fs_info->trans_lock); if (async_transid) { *async_transid = trans->transid; ret = btrfs_commit_transaction_async(trans, -- cgit v1.2.2 From ed0ca14021e5ae3147602128641aa7f742ab227c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 14 Jun 2011 16:22:15 -0400 Subject: Btrfs: set no_trans_join after trying to expand the transaction We can lockup if we try to allow new writers join the transaction and we have flushoncommit set or have a pending snapshot. This is because we set no_trans_join and then loop around and try to wait for ordered extents again. The problem is the ordered endio stuff needs to join the transaction, which it can't do because no_trans_join is set. So instead wait until after this loop to set no_trans_join and then make sure to wait for num_writers == 1 in case anybody got started in between us exiting the loop and setting no_trans_join. This could easily be reproduced by mounting -o flushoncommit and running xfstest 13. It cannot be reproduced with this patch. Thanks, Reported-by: Jim Schutt Signed-off-by: Josef Bacik --- fs/btrfs/transaction.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2b3590b9fe98..56695595e036 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1241,12 +1241,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, schedule_timeout(1); finish_wait(&cur_trans->writer_wait, &wait); - spin_lock(&root->fs_info->trans_lock); - root->fs_info->trans_no_join = 1; - spin_unlock(&root->fs_info->trans_lock); } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); + /* + * Ok now we need to make sure to block out any other joins while we + * commit the transaction. We could have started a join before setting + * no_join so make sure to wait for num_writers to == 1 again. + */ + spin_lock(&root->fs_info->trans_lock); + root->fs_info->trans_no_join = 1; + spin_unlock(&root->fs_info->trans_lock); + wait_event(cur_trans->writer_wait, + atomic_read(&cur_trans->num_writers) == 1); + ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); -- cgit v1.2.2 From 7585717f304f5ed005cc4ad933a69aab3efbd136 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Jun 2011 20:00:16 -0400 Subject: Btrfs: fix relocation races The recent commit to get rid of our trans_mutex introduced some races with block group relocation. The problem is that relocation needs to do some record keeping about each root, and it was relying on the transaction mutex to coordinate things in subtle ways. This fix adds a mutex just for the relocation code and makes sure it doesn't have a big impact on normal operations. The race is really fixed in btrfs_record_root_in_trans, which is where we step back and wait for the relocation code to finish accounting setup. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 14 ++++++++++ fs/btrfs/disk-io.c | 1 + fs/btrfs/relocation.c | 30 ++++++++++++++------- fs/btrfs/transaction.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 105 insertions(+), 13 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8490ee063709..a2c91a102b72 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -967,6 +967,12 @@ struct btrfs_fs_info { struct srcu_struct subvol_srcu; spinlock_t trans_lock; + /* + * the reloc mutex goes with the trans lock, it is taken + * during commit to protect us from the relocation code + */ + struct mutex reloc_mutex; + struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; @@ -1172,6 +1178,14 @@ struct btrfs_root { u32 type; u64 highest_objectid; + + /* btrfs_record_root_in_trans is a multi-step process, + * and it can race with the balancing code. But the + * race is very small, and only the first time the root + * is added to each transaction. So in_trans_setup + * is used to tell us when more checks are required + */ + unsigned long in_trans_setup; int ref_cows; int track_dirty; int in_radix; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 20c111b3fa0d..0b2b4b759136 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1620,6 +1620,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); + mutex_init(&fs_info->reloc_mutex); init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f25b10a22a0a..086b1e6b8614 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, int ret; if (!root->reloc_root) - return 0; + goto out; reloc_root = root->reloc_root; root_item = &reloc_root->root_item; @@ -1390,6 +1390,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, ret = btrfs_update_root(trans, root->fs_info->tree_root, &reloc_root->root_key, root_item); BUG_ON(ret); + +out: return 0; } @@ -2142,10 +2144,11 @@ int prepare_to_merge(struct reloc_control *rc, int err) u64 num_bytes = 0; int ret; - spin_lock(&root->fs_info->trans_lock); + mutex_lock(&root->fs_info->reloc_mutex); rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; rc->merging_rsv_size += rc->nodes_relocated * 2; - spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->reloc_mutex); + again: if (!err) { num_bytes = rc->merging_rsv_size; @@ -2214,9 +2217,16 @@ int merge_reloc_roots(struct reloc_control *rc) int ret; again: root = rc->extent_root; - spin_lock(&root->fs_info->trans_lock); + + /* + * this serializes us with btrfs_record_root_in_transaction, + * we have to make sure nobody is in the middle of + * adding their roots to the list while we are + * doing this splice + */ + mutex_lock(&root->fs_info->reloc_mutex); list_splice_init(&rc->reloc_roots, &reloc_roots); - spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->reloc_mutex); while (!list_empty(&reloc_roots)) { found = 1; @@ -3590,17 +3600,19 @@ next: static void set_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - spin_lock(&fs_info->trans_lock); + + mutex_lock(&fs_info->reloc_mutex); fs_info->reloc_ctl = rc; - spin_unlock(&fs_info->trans_lock); + mutex_unlock(&fs_info->reloc_mutex); } static void unset_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - spin_lock(&fs_info->trans_lock); + + mutex_lock(&fs_info->reloc_mutex); fs_info->reloc_ctl = NULL; - spin_unlock(&fs_info->trans_lock); + mutex_unlock(&fs_info->reloc_mutex); } static int check_extent_flags(u64 flags) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2b3590b9fe98..833996a0c628 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -126,28 +126,85 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail) * to make sure the old root from before we joined the transaction is deleted * when the transaction commits */ -int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, +static int record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root) { if (root->ref_cows && root->last_trans < trans->transid) { WARN_ON(root == root->fs_info->extent_root); WARN_ON(root->commit_root != root->node); + /* + * see below for in_trans_setup usage rules + * we have the reloc mutex held now, so there + * is only one writer in this function + */ + root->in_trans_setup = 1; + + /* make sure readers find in_trans_setup before + * they find our root->last_trans update + */ + smp_wmb(); + spin_lock(&root->fs_info->fs_roots_radix_lock); if (root->last_trans == trans->transid) { spin_unlock(&root->fs_info->fs_roots_radix_lock); return 0; } - root->last_trans = trans->transid; radix_tree_tag_set(&root->fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); spin_unlock(&root->fs_info->fs_roots_radix_lock); + root->last_trans = trans->transid; + + /* this is pretty tricky. We don't want to + * take the relocation lock in btrfs_record_root_in_trans + * unless we're really doing the first setup for this root in + * this transaction. + * + * Normally we'd use root->last_trans as a flag to decide + * if we want to take the expensive mutex. + * + * But, we have to set root->last_trans before we + * init the relocation root, otherwise, we trip over warnings + * in ctree.c. The solution used here is to flag ourselves + * with root->in_trans_setup. When this is 1, we're still + * fixing up the reloc trees and everyone must wait. + * + * When this is zero, they can trust root->last_trans and fly + * through btrfs_record_root_in_trans without having to take the + * lock. smp_wmb() makes sure that all the writes above are + * done before we pop in the zero below + */ btrfs_init_reloc_root(trans, root); + smp_wmb(); + root->in_trans_setup = 0; } return 0; } + +int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + if (!root->ref_cows) + return 0; + + /* + * see record_root_in_trans for comments about in_trans_setup usage + * and barriers + */ + smp_rmb(); + if (root->last_trans == trans->transid && + !root->in_trans_setup) + return 0; + + mutex_lock(&root->fs_info->reloc_mutex); + record_root_in_trans(trans, root); + mutex_unlock(&root->fs_info->reloc_mutex); + + return 0; +} + /* wait for commit against the current transaction to become unblocked * when this is done, it is safe to start a new transaction, but the current * transaction might not be fully on disk. @@ -882,7 +939,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, parent = dget_parent(dentry); parent_inode = parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; - btrfs_record_root_in_trans(trans, parent_root); + record_root_in_trans(trans, parent_root); /* * insert the directory item @@ -900,7 +957,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); - btrfs_record_root_in_trans(trans, root); + record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); @@ -1247,6 +1304,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); + /* + * the reloc mutex makes sure that we stop + * the balancing code from coming in and moving + * extents around in the middle of the commit + */ + mutex_lock(&root->fs_info->reloc_mutex); + ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); @@ -1312,6 +1376,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, root->fs_info->running_transaction = NULL; root->fs_info->trans_no_join = 0; spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->reloc_mutex); wake_up(&root->fs_info->transaction_wait); -- cgit v1.2.2 From 3ed4498caf381a73d6259d3ffacc914b17a507ec Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 13 Jun 2011 17:54:22 +0000 Subject: btrfs: fix dereference of ERR_PTR value smatch reports: btrfs_recover_log_trees error: 'wc.replay_dest' dereferencing possible ERR_PTR() Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 592396c6dc47..4ce8a9f41d1e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3177,7 +3177,7 @@ again: tmp_key.offset = (u64)-1; wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); - BUG_ON(!wc.replay_dest); + BUG_ON(IS_ERR_OR_NULL(wc.replay_dest)); wc.replay_dest->log_root = log; btrfs_record_root_in_trans(trans, wc.replay_dest); -- cgit v1.2.2 From 9fe6a50fb764f508dd2de47a66e62e51388791fb Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 16 Jun 2011 09:04:57 +0000 Subject: btrfs: Remove unused sysfs code Removes code no longer used. The sysfs file itself is kept, because the btrfs developers expressed interest in putting new entries to sysfs. Signed-off-by: Maarten Lankhorst Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 - fs/btrfs/disk-io.c | 1 - fs/btrfs/sysfs.c | 146 ----------------------------------------------------- 3 files changed, 148 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a2c91a102b72..8e948ec1ee6b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1195,7 +1195,6 @@ struct btrfs_root { struct btrfs_key defrag_max; int defrag_running; char *name; - int in_sysfs; /* the dirty list is only used by non-reference counted roots */ struct list_head dirty_list; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0b2b4b759136..c25ef5a0ccd6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1044,7 +1044,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->last_trans = 0; root->highest_objectid = 0; root->name = NULL; - root->in_sysfs = 0; root->inode_tree = RB_ROOT; INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index c3c223ae6691..daac9ae6d731 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -28,152 +28,6 @@ #include "disk-io.h" #include "transaction.h" -static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_root_used(&root->root_item)); -} - -static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_root_limit(&root->root_item)); -} - -static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf) -{ - - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_super_bytes_used(&fs->super_copy)); -} - -static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_super_total_bytes(&fs->super_copy)); -} - -static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_super_sectorsize(&fs->super_copy)); -} - -/* this is for root attrs (subvols/snapshots) */ -struct btrfs_root_attr { - struct attribute attr; - ssize_t (*show)(struct btrfs_root *, char *); - ssize_t (*store)(struct btrfs_root *, const char *, size_t); -}; - -#define ROOT_ATTR(name, mode, show, store) \ -static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \ - show, store) - -ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL); -ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL); - -static struct attribute *btrfs_root_attrs[] = { - &btrfs_root_attr_blocks_used.attr, - &btrfs_root_attr_block_limit.attr, - NULL, -}; - -/* this is for super attrs (actual full fs) */ -struct btrfs_super_attr { - struct attribute attr; - ssize_t (*show)(struct btrfs_fs_info *, char *); - ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t); -}; - -#define SUPER_ATTR(name, mode, show, store) \ -static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \ - show, store) - -SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL); -SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL); -SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL); - -static struct attribute *btrfs_super_attrs[] = { - &btrfs_super_attr_blocks_used.attr, - &btrfs_super_attr_total_blocks.attr, - &btrfs_super_attr_blocksize.attr, - NULL, -}; - -static ssize_t btrfs_super_attr_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, - super_kobj); - struct btrfs_super_attr *a = container_of(attr, - struct btrfs_super_attr, - attr); - - return a->show ? a->show(fs, buf) : 0; -} - -static ssize_t btrfs_super_attr_store(struct kobject *kobj, - struct attribute *attr, - const char *buf, size_t len) -{ - struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, - super_kobj); - struct btrfs_super_attr *a = container_of(attr, - struct btrfs_super_attr, - attr); - - return a->store ? a->store(fs, buf, len) : 0; -} - -static ssize_t btrfs_root_attr_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct btrfs_root *root = container_of(kobj, struct btrfs_root, - root_kobj); - struct btrfs_root_attr *a = container_of(attr, - struct btrfs_root_attr, - attr); - - return a->show ? a->show(root, buf) : 0; -} - -static ssize_t btrfs_root_attr_store(struct kobject *kobj, - struct attribute *attr, - const char *buf, size_t len) -{ - struct btrfs_root *root = container_of(kobj, struct btrfs_root, - root_kobj); - struct btrfs_root_attr *a = container_of(attr, - struct btrfs_root_attr, - attr); - return a->store ? a->store(root, buf, len) : 0; -} - -static void btrfs_super_release(struct kobject *kobj) -{ - struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, - super_kobj); - complete(&fs->kobj_unregister); -} - -static void btrfs_root_release(struct kobject *kobj) -{ - struct btrfs_root *root = container_of(kobj, struct btrfs_root, - root_kobj); - complete(&root->kobj_unregister); -} - -static const struct sysfs_ops btrfs_super_attr_ops = { - .show = btrfs_super_attr_show, - .store = btrfs_super_attr_store, -}; - -static const struct sysfs_ops btrfs_root_attr_ops = { - .show = btrfs_root_attr_show, - .store = btrfs_root_attr_store, -}; - /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; -- cgit v1.2.2 From 19fd294957e426bfdd8e19085096467ec18df5c4 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 15 Jun 2011 10:47:30 +0000 Subject: btrfs: fix wrong reservation when doing delayed inode operations We have migrated the space for the delayed inode items from trans_block_rsv to global_block_rsv, but we forgot to set trans->block_rsv to global_block_rsv when we doing delayed inode operations, and the following Oops happened: [ 9792.654889] ------------[ cut here ]------------ [ 9792.654898] WARNING: at fs/btrfs/extent-tree.c:5681 btrfs_alloc_free_block+0xca/0x27c [btrfs]() [ 9792.654899] Hardware name: To Be Filled By O.E.M. [ 9792.654900] Modules linked in: btrfs zlib_deflate libcrc32c ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables arc4 rt61pci rt2x00pci rt2x00lib snd_hda_codec_hdmi mac80211 snd_hda_codec_realtek cfg80211 snd_hda_intel edac_core snd_seq rfkill pcspkr serio_raw snd_hda_codec eeprom_93cx6 edac_mce_amd sp5100_tco i2c_piix4 k10temp snd_hwdep snd_seq_device snd_pcm floppy r8169 xhci_hcd mii snd_timer snd soundcore snd_page_alloc ipv6 firewire_ohci pata_acpi ata_generic firewire_core pata_via crc_itu_t radeon ttm drm_kms_helper drm i2c_algo_bit i2c_core [last unloaded: scsi_wait_scan] [ 9792.654919] Pid: 2762, comm: rm Tainted: G W 2.6.39+ #1 [ 9792.654920] Call Trace: [ 9792.654922] [] warn_slowpath_common+0x83/0x9b [ 9792.654925] [] warn_slowpath_null+0x1a/0x1c [ 9792.654933] [] btrfs_alloc_free_block+0xca/0x27c [btrfs] [ 9792.654945] [] ? map_extent_buffer+0x6e/0xa8 [btrfs] [ 9792.654953] [] __btrfs_cow_block+0xfc/0x30c [btrfs] [ 9792.654963] [] ? btrfs_buffer_uptodate+0x47/0x58 [btrfs] [ 9792.654970] [] ? read_block_for_search+0x94/0x368 [btrfs] [ 9792.654978] [] btrfs_cow_block+0xfe/0x146 [btrfs] [ 9792.654986] [] btrfs_search_slot+0x14d/0x4b6 [btrfs] [ 9792.654997] [] ? map_extent_buffer+0x6e/0xa8 [btrfs] [ 9792.655022] [] btrfs_lookup_inode+0x2f/0x8f [btrfs] [ 9792.655025] [] ? _cond_resched+0xe/0x22 [ 9792.655027] [] ? mutex_lock+0x29/0x50 [ 9792.655039] [] btrfs_update_delayed_inode+0x72/0x137 [btrfs] [ 9792.655051] [] btrfs_run_delayed_items+0x90/0xdb [btrfs] [ 9792.655062] [] btrfs_commit_transaction+0x228/0x654 [btrfs] [ 9792.655064] [] ? remove_wait_queue+0x3a/0x3a [ 9792.655075] [] btrfs_evict_inode+0x14d/0x202 [btrfs] [ 9792.655077] [] evict+0x71/0x111 [ 9792.655079] [] iput+0x12a/0x132 [ 9792.655081] [] do_unlinkat+0x106/0x155 [ 9792.655083] [] ? path_put+0x1f/0x23 [ 9792.655085] [] ? audit_syscall_entry+0x145/0x171 [ 9792.655087] [] ? putname+0x34/0x36 [ 9792.655090] [] sys_unlinkat+0x29/0x2b [ 9792.655092] [] system_call_fastpath+0x16/0x1b [ 9792.655093] ---[ end trace 02b696eb02b3f768 ]--- This patch fix it by setting the reservation of the transaction handle to the correct one. Reported-by: Josef Bacik Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 25 ++++++++++++++++++++----- fs/btrfs/delayed-inode.h | 1 - 2 files changed, 20 insertions(+), 6 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 6462c29d2d37..fc515b787e8c 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -297,7 +297,6 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) item->data_len = data_len; item->ins_or_del = 0; item->bytes_reserved = 0; - item->block_rsv = NULL; item->delayed_node = NULL; atomic_set(&item->refs, 1); } @@ -593,10 +592,8 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, num_bytes = btrfs_calc_trans_metadata_size(root, 1); ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); - if (!ret) { + if (!ret) item->bytes_reserved = num_bytes; - item->block_rsv = dst_rsv; - } return ret; } @@ -604,10 +601,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, struct btrfs_delayed_item *item) { + struct btrfs_block_rsv *rsv; + if (!item->bytes_reserved) return; - btrfs_block_rsv_release(root, item->block_rsv, + rsv = &root->fs_info->global_block_rsv; + btrfs_block_rsv_release(root, rsv, item->bytes_reserved); } @@ -1014,6 +1014,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_node *curr_node, *prev_node; struct btrfs_path *path; + struct btrfs_block_rsv *block_rsv; int ret = 0; path = btrfs_alloc_path(); @@ -1021,6 +1022,9 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; + block_rsv = trans->block_rsv; + trans->block_rsv = &root->fs_info->global_block_rsv; + delayed_root = btrfs_get_delayed_root(root); curr_node = btrfs_first_delayed_node(delayed_root); @@ -1045,6 +1049,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, } btrfs_free_path(path); + trans->block_rsv = block_rsv; return ret; } @@ -1052,6 +1057,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_node *node) { struct btrfs_path *path; + struct btrfs_block_rsv *block_rsv; int ret; path = btrfs_alloc_path(); @@ -1059,6 +1065,9 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; + block_rsv = trans->block_rsv; + trans->block_rsv = &node->root->fs_info->global_block_rsv; + ret = btrfs_insert_delayed_items(trans, path, node->root, node); if (!ret) ret = btrfs_delete_delayed_items(trans, path, node->root, node); @@ -1066,6 +1075,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, ret = btrfs_update_delayed_inode(trans, node->root, path, node); btrfs_free_path(path); + trans->block_rsv = block_rsv; return ret; } @@ -1116,6 +1126,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) struct btrfs_path *path; struct btrfs_delayed_node *delayed_node = NULL; struct btrfs_root *root; + struct btrfs_block_rsv *block_rsv; unsigned long nr = 0; int need_requeue = 0; int ret; @@ -1134,6 +1145,9 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) if (IS_ERR(trans)) goto free_path; + block_rsv = trans->block_rsv; + trans->block_rsv = &root->fs_info->global_block_rsv; + ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); if (!ret) ret = btrfs_delete_delayed_items(trans, path, root, @@ -1176,6 +1190,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) nr = trans->blocks_used; + trans->block_rsv = block_rsv; btrfs_end_transaction_dmeta(trans, root); __btrfs_btree_balance_dirty(root, nr); free_path: diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index eb7d240aa648..cb79b6771e82 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -75,7 +75,6 @@ struct btrfs_delayed_item { struct list_head tree_list; /* used for batch insert/delete items */ struct list_head readdir_list; /* used for readdir items */ u64 bytes_reserved; - struct btrfs_block_rsv *block_rsv; struct btrfs_delayed_node *delayed_node; atomic_t refs; int ins_or_del; -- cgit v1.2.2 From 35a30d7ce54e087d8025a725d4e5a2fdee723a9f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 13 Jun 2011 15:18:23 +0000 Subject: btrfs: fix uninitialized return value When allocation fails in btrfs_read_fs_root_no_name, ret is not set although it is returned, holding a garbage value. Signed-off-by: David Sterba Reviewed-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index c25ef5a0ccd6..1ac8db5dc0a3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1299,12 +1299,12 @@ again: return root; root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); - if (!root->free_ino_ctl) - goto fail; root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), GFP_NOFS); - if (!root->free_ino_pinned) + if (!root->free_ino_pinned || !root->free_ino_ctl) { + ret = -ENOMEM; goto fail; + } btrfs_init_free_ino_ctl(root); mutex_init(&root->fs_commit_mutex); -- cgit v1.2.2 From e999376f094162aa425ae749aa1df95ab928d010 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 17 Jun 2011 16:14:09 -0400 Subject: Btrfs: avoid delayed metadata items during commits Snapshot creation has two phases. One is the initial snapshot setup, and the second is done during commit, while nobody is allowed to modify the root we are snapshotting. The delayed metadata insertion code can break that rule, it does a delayed inode update on the inode of the parent of the snapshot, and delayed directory item insertion. This makes sure to run the pending delayed operations before we record the snapshot root, which avoids corruptions. Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 7 +++++++ fs/btrfs/delayed-inode.h | 4 ++++ fs/btrfs/transaction.c | 27 +++++++++++++++++---------- 3 files changed, 28 insertions(+), 10 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index fc515b787e8c..f1cbd028f7b3 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1237,6 +1237,13 @@ again: return 0; } +void btrfs_assert_delayed_root_empty(struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + delayed_root = btrfs_get_delayed_root(root); + WARN_ON(btrfs_first_delayed_node(delayed_root)); +} + void btrfs_balance_delayed_items(struct btrfs_root *root) { struct btrfs_delayed_root *delayed_root; diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index cb79b6771e82..d1a6a2915c66 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -137,4 +137,8 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, /* for init */ int __init btrfs_delayed_inode_init(void); void btrfs_delayed_inode_exit(void); + +/* for debugging */ +void btrfs_assert_delayed_root_empty(struct btrfs_root *root); + #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c073d85e14f3..51dcec86757f 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -957,6 +957,15 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); + /* + * pull in the delayed directory update + * and the delayed inode item + * otherwise we corrupt the FS during + * snapshot + */ + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); @@ -1018,14 +1027,6 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, int ret; list_for_each_entry(pending, head, list) { - /* - * We must deal with the delayed items before creating - * snapshots, or we will create a snapthot with inconsistent - * information. - */ - ret = btrfs_run_delayed_items(trans, fs_info->fs_root); - BUG_ON(ret); - ret = create_pending_snapshot(trans, fs_info, pending); BUG_ON(ret); } @@ -1319,15 +1320,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, */ mutex_lock(&root->fs_info->reloc_mutex); - ret = create_pending_snapshots(trans, root->fs_info); + ret = btrfs_run_delayed_items(trans, root); BUG_ON(ret); - ret = btrfs_run_delayed_items(trans, root); + ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); + /* + * make sure none of the code above managed to slip in a + * delayed item + */ + btrfs_assert_delayed_root_empty(root); + WARN_ON(cur_trans != trans->transaction); btrfs_scrub_pause(root); -- cgit v1.2.2 From 46e4edbf7ea9cf26665eb9f90c0fc7688d1a51ed Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 23 Jun 2011 23:59:32 +0200 Subject: Remove unneeded version.h includes from fs/ It was pointed out by 'make versioncheck' that some includes of linux/version.h were not needed in fs/ (fs/btrfs/ctree.h and fs/omfs/file.c). This patch removes them. Signed-off-by: Jesper Juhl Acked-by: Bob Copeland Signed-off-by: Linus Torvalds --- fs/btrfs/ctree.h | 1 - 1 file changed, 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 300628795fdb..f30ac05dbda7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -19,7 +19,6 @@ #ifndef __BTRFS_CTREE__ #define __BTRFS_CTREE__ -#include #include #include #include -- cgit v1.2.2 From 1973f0faeb4a5f35597793c65d3c94d8fd386e10 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 24 Jun 2011 13:13:29 -0400 Subject: Btrfs: make sure to record the transid in new inodes When we create a new inode, we aren't filling in the field that records the transaction that last changed this inode. If we then go to fsync that inode, it will be skipped because the field isn't filled in. Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5813dec5101c..87f1e0cf26f8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4520,6 +4520,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode_tree_add(inode); trace_btrfs_inode_new(inode); + btrfs_set_inode_last_trans(trans, inode); return inode; fail: -- cgit v1.2.2 From e0f5406727f1dfdc47b8ba4a0ff6eae4b0b5ed4c Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sat, 18 Jun 2011 20:26:38 +0000 Subject: Btrfs: fix type mismatch in find_free_extent() data parameter should be u64 because a full-sized chunk flags field is passed instead of 0/1 for distinguishing data from metadata. All underlying functions expect u64. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1f61bf5b4960..71cd456fdb60 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4842,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, u64 num_bytes, u64 empty_size, u64 search_start, u64 search_end, u64 hint_byte, struct btrfs_key *ins, - int data) + u64 data) { int ret = 0; struct btrfs_root *root = orig_root->fs_info->extent_root; @@ -4869,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, space_info = __find_space_info(root->fs_info, data); if (!space_info) { - printk(KERN_ERR "No space info for %d\n", data); + printk(KERN_ERR "No space info for %llu\n", data); return -ENOSPC; } -- cgit v1.2.2 From 9b90f5135320bc74dc6c9a8c74d69fd4821d9282 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 24 Jun 2011 16:02:51 +0000 Subject: Btrfs: make sure to update total_bitmaps when freeing cache V3 A user reported this bug again where we have more bitmaps than we are supposed to. This is because we failed to load the free space cache, but don't update the ctl->total_bitmaps counter when we remove entries from the tree. This patch fixes this problem and we should be good to go again. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 9f985a429877..bf0d61567f3d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1893,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl) while ((node = rb_last(&ctl->free_space_offset)) != NULL) { info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(ctl, info); - kfree(info->bitmap); - kmem_cache_free(btrfs_free_space_cachep, info); + if (!info->bitmap) { + unlink_free_space(ctl, info); + kmem_cache_free(btrfs_free_space_cachep, info); + } else { + free_bitmap(ctl, info); + } if (need_resched()) { spin_unlock(&ctl->tree_lock); cond_resched(); -- cgit v1.2.2 From 2f7e33d432d097a2a7f467b031bf18be91cb3d49 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Thu, 23 Jun 2011 07:27:13 +0000 Subject: btrfs: fix inconsonant inode information When iputting the inode, We may leave the delayed nodes if they have some delayed items that have not been dealt with. So when the inode is read again, we must look up the relative delayed node, and use the information in it to initialize the inode. Or we will get inconsonant inode information, it may cause that the same directory index number is allocated again, and hit the following oops: [ 5447.554187] err add delayed dir index item(name: pglog_0.965_0) into the insertion tree of the delayed node(root id: 262, inode id: 258, errno: -17) [ 5447.569766] ------------[ cut here ]------------ [ 5447.575361] kernel BUG at fs/btrfs/delayed-inode.c:1301! [SNIP] [ 5447.790721] Call Trace: [ 5447.793191] [] btrfs_insert_dir_item+0x189/0x1bb [btrfs] [ 5447.800156] [] btrfs_add_link+0x12b/0x191 [btrfs] [ 5447.806517] [] btrfs_add_nondir+0x31/0x58 [btrfs] [ 5447.812876] [] btrfs_create+0xf9/0x197 [btrfs] [ 5447.818961] [] vfs_create+0x72/0x92 [ 5447.824090] [] do_last+0x22c/0x40b [ 5447.829133] [] path_openat+0xc0/0x2ef [ 5447.834438] [] ? __perf_event_task_sched_out+0x24/0x44 [ 5447.841216] [] ? perf_event_task_sched_out+0x59/0x67 [ 5447.847846] [] do_filp_open+0x3d/0x87 [ 5447.853156] [] ? strncpy_from_user+0x43/0x4d [ 5447.859072] [] ? getname_flags+0x2e/0x80 [ 5447.864636] [] ? do_getname+0x14b/0x173 [ 5447.870112] [] ? audit_getname+0x16/0x26 [ 5447.875682] [] ? spin_lock+0xe/0x10 [ 5447.880882] [] do_sys_open+0x69/0xae [ 5447.886153] [] sys_open+0x20/0x22 [ 5447.891114] [] system_call_fastpath+0x16/0x1b Fix it by reusing the old delayed node. Reported-by: Jim Schutt Signed-off-by: Miao Xie Tested-by: Jim Schutt Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 104 ++++++++++++++++++++++++++++++++++++----------- fs/btrfs/delayed-inode.h | 1 + fs/btrfs/inode.c | 12 +++++- 3 files changed, 91 insertions(+), 26 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index f1cbd028f7b3..98c68e658a9b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -82,19 +82,16 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root( return root->fs_info->delayed_root; } -static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( - struct inode *inode) +static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) { - struct btrfs_delayed_node *node; struct btrfs_inode *btrfs_inode = BTRFS_I(inode); struct btrfs_root *root = btrfs_inode->root; u64 ino = btrfs_ino(inode); - int ret; + struct btrfs_delayed_node *node; -again: node = ACCESS_ONCE(btrfs_inode->delayed_node); if (node) { - atomic_inc(&node->refs); /* can be accessed */ + atomic_inc(&node->refs); return node; } @@ -102,8 +99,10 @@ again: node = radix_tree_lookup(&root->delayed_nodes_tree, ino); if (node) { if (btrfs_inode->delayed_node) { + atomic_inc(&node->refs); /* can be accessed */ + BUG_ON(btrfs_inode->delayed_node != node); spin_unlock(&root->inode_lock); - goto again; + return node; } btrfs_inode->delayed_node = node; atomic_inc(&node->refs); /* can be accessed */ @@ -113,6 +112,23 @@ again: } spin_unlock(&root->inode_lock); + return NULL; +} + +static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( + struct inode *inode) +{ + struct btrfs_delayed_node *node; + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_root *root = btrfs_inode->root; + u64 ino = btrfs_ino(inode); + int ret; + +again: + node = btrfs_get_delayed_node(inode); + if (node) + return node; + node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); if (!node) return ERR_PTR(-ENOMEM); @@ -548,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item( return next; } -static inline struct btrfs_delayed_node *btrfs_get_delayed_node( - struct inode *inode) -{ - struct btrfs_inode *btrfs_inode = BTRFS_I(inode); - struct btrfs_delayed_node *delayed_node; - - delayed_node = btrfs_inode->delayed_node; - if (delayed_node) - atomic_inc(&delayed_node->refs); - - return delayed_node; -} - static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, u64 root_id) { @@ -1404,8 +1407,7 @@ end: int btrfs_inode_delayed_dir_index_count(struct inode *inode) { - struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; - int ret = 0; + struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); if (!delayed_node) return -ENOENT; @@ -1415,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode) * a new directory index is added into the delayed node and index_cnt * is updated now. So we needn't lock the delayed node. */ - if (!delayed_node->index_cnt) + if (!delayed_node->index_cnt) { + btrfs_release_delayed_node(delayed_node); return -EINVAL; + } BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; - return ret; + btrfs_release_delayed_node(delayed_node); + return 0; } void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, @@ -1613,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, inode->i_ctime.tv_nsec); } +int btrfs_fill_inode(struct inode *inode, u32 *rdev) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_inode_item *inode_item; + struct btrfs_timespec *tspec; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return -ENOENT; + + mutex_lock(&delayed_node->mutex); + if (!delayed_node->inode_dirty) { + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return -ENOENT; + } + + inode_item = &delayed_node->inode_item; + + inode->i_uid = btrfs_stack_inode_uid(inode_item); + inode->i_gid = btrfs_stack_inode_gid(inode_item); + btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); + inode->i_mode = btrfs_stack_inode_mode(inode_item); + inode->i_nlink = btrfs_stack_inode_nlink(inode_item); + inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); + BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); + BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item); + inode->i_rdev = 0; + *rdev = btrfs_stack_inode_rdev(inode_item); + BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); + + tspec = btrfs_inode_atime(inode_item); + inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); + inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + + tspec = btrfs_inode_mtime(inode_item); + inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); + inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + + tspec = btrfs_inode_ctime(inode_item); + inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); + inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + + inode->i_generation = BTRFS_I(inode)->generation; + BTRFS_I(inode)->index_cnt = (u64)-1; + + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return 0; +} + int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index d1a6a2915c66..8d27af4bd8b9 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -119,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode); int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); +int btrfs_fill_inode(struct inode *inode, u32 *rdev); /* Used for drop dead root */ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 87f1e0cf26f8..447612d3a16a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode) int maybe_acls; u32 rdev; int ret; + bool filled = false; + + ret = btrfs_fill_inode(inode, &rdev); + if (!ret) + filled = true; path = btrfs_alloc_path(); BUG_ON(!path); @@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode) goto make_bad; leaf = path->nodes[0]; + + if (filled) + goto cache_acl; + inode_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); if (!leaf->map_token) @@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->index_cnt = (u64)-1; BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); - +cache_acl: /* * try to precache a NULL acl entry for files that don't have * any xattrs or acls @@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode) } btrfs_free_path(path); - inode_item = NULL; switch (inode->i_mode & S_IFMT) { case S_IFREG: -- cgit v1.2.2 From 0942caa373c676dca614ea8352ac77e0270aba73 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 28 Jun 2011 15:10:37 +0000 Subject: btrfs: add missing options displayed in mount output There are three missed mount options settable by user which are not currently displayed in mount output. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 5 +++++ fs/btrfs/super.c | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'fs/btrfs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8e948ec1ee6b..60e13ef23a5e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1336,6 +1336,11 @@ struct btrfs_ioctl_defrag_range_args { */ #define BTRFS_STRING_ITEM_KEY 253 +/* + * Flags for mount options. + * + * Note: don't forget to add new options to btrfs_show_options() + */ #define BTRFS_MOUNT_NODATASUM (1 << 0) #define BTRFS_MOUNT_NODATACOW (1 << 1) #define BTRFS_MOUNT_NOBARRIER (1 << 2) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3559d0b3518a..5746081199ee 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -721,6 +721,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_puts(seq, ",clear_cache"); if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) seq_puts(seq, ",user_subvol_rm_allowed"); + if (btrfs_test_opt(root, ENOSPC_DEBUG)) + seq_puts(seq, ",enospc_debug"); + if (btrfs_test_opt(root, AUTO_DEFRAG)) + seq_puts(seq, ",autodefrag"); + if (btrfs_test_opt(root, INODE_MAP_CACHE)) + seq_puts(seq, ",inode_cache"); return 0; } -- cgit v1.2.2 From 508794eb5ec2a2b832742e78c6766844b10c0c94 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Sat, 2 Jul 2011 21:24:41 +0000 Subject: Btrfs: don't panic if we get an error while balancing V2 A user reported an error where if we try to balance an fs after a device has been removed it will blow up. This is because we get an EIO back and this is where BUG_ON(ret) bites us in the ass. To fix we just exit. Thanks, Reported-by: Anand Jain Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 1efa56e18f9b..19450bc53632 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2098,7 +2098,8 @@ int btrfs_balance(struct btrfs_root *dev_root) chunk_root->root_key.objectid, found_key.objectid, found_key.offset); - BUG_ON(ret && ret != -ENOSPC); + if (ret && ret != -ENOSPC) + goto error; key.offset = found_key.offset - 1; } ret = 0; -- cgit v1.2.2 From 149e2d76b4886c4c7ff5e077646a8ba3563c8026 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 6 Jul 2011 18:51:53 -0400 Subject: btrfs: fix oops when doing space balance We need to make sure the data relocation inode doesn't go through the delayed metadata updates, otherwise we get an oops during balance: kernel BUG at fs/btrfs/relocation.c:4303! [SNIP] Call Trace: [] ? update_ref_for_cow+0x22d/0x330 [btrfs] [] __btrfs_cow_block+0x451/0x5e0 [btrfs] [] ? read_block_for_search+0x14d/0x4d0 [btrfs] [] btrfs_cow_block+0x10b/0x240 [btrfs] [] btrfs_search_slot+0x49e/0x7a0 [btrfs] [] btrfs_lookup_inode+0x2f/0xa0 [btrfs] [] ? mutex_lock+0x1e/0x50 [] btrfs_update_delayed_inode+0x71/0x160 [btrfs] [] ? __btrfs_release_delayed_node+0x67/0x190 [btrfs] [] btrfs_run_delayed_items+0xe8/0x120 [btrfs] [] btrfs_commit_transaction+0x250/0x850 [btrfs] [] ? find_get_pages+0x39/0x130 [] ? join_transaction+0x25/0x250 [btrfs] [] ? wake_up_bit+0x40/0x40 [] prepare_to_relocate+0xda/0xf0 [btrfs] [] relocate_block_group+0x4b/0x620 [btrfs] [] ? btrfs_clean_old_snapshots+0x35/0x150 [btrfs] [] btrfs_relocate_block_group+0x1b3/0x2e0 [btrfs] [] ? btrfs_tree_unlock+0x50/0x50 [btrfs] [] btrfs_relocate_chunk+0x8b/0x670 [btrfs] [] ? btrfs_set_path_blocking+0x3d/0x50 [btrfs] [] ? read_extent_buffer+0xd8/0x1d0 [btrfs] [] ? btrfs_previous_item+0xb1/0x150 [btrfs] [] ? read_extent_buffer+0xd8/0x1d0 [btrfs] [] btrfs_balance+0x21a/0x2b0 [btrfs] [] btrfs_ioctl+0x798/0xd20 [btrfs] [] ? handle_mm_fault+0x148/0x270 [] ? do_page_fault+0x1d8/0x4b0 [] do_vfs_ioctl+0x9a/0x540 [] sys_ioctl+0xa1/0xb0 [] system_call_fastpath+0x16/0x1b [SNIP] RIP [] btrfs_reloc_cow_block+0x22c/0x270 [btrfs] Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'fs/btrfs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 447612d3a16a..4a1373083747 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2678,12 +2678,14 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, int ret; /* - * If root is tree root, it means this inode is used to - * store free space information. And these inodes are updated - * when committing the transaction, so they needn't delaye to - * be updated, or deadlock will occured. + * If the inode is a free space inode, we can deadlock during commit + * if we put it into the delayed code. + * + * The data relocation inode should also be directly updated + * without delay */ - if (!is_free_space_inode(root, inode)) { + if (!is_free_space_inode(root, inode) + && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { ret = btrfs_delayed_update_inode(trans, root, inode); if (!ret) btrfs_set_inode_last_trans(trans, inode); -- cgit v1.2.2