aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/btrfs_inode.h6
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c130
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file.c41
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c128
-rw-r--r--fs/btrfs/root-tree.c2
-rw-r--r--fs/btrfs/super.c9
-rw-r--r--fs/btrfs/transaction.c64
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-log.c48
-rw-r--r--fs/btrfs/tree-log.h3
-rw-r--r--fs/btrfs/xattr.c2
16 files changed, 362 insertions, 92 deletions
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 69b355ae7f49..361604244271 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -27,7 +27,7 @@
27#include "btrfs_inode.h" 27#include "btrfs_inode.h"
28#include "xattr.h" 28#include "xattr.h"
29 29
30#ifdef CONFIG_BTRFS_POSIX_ACL 30#ifdef CONFIG_BTRFS_FS_POSIX_ACL
31 31
32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) 32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
33{ 33{
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = {
313 .set = btrfs_xattr_acl_access_set, 313 .set = btrfs_xattr_acl_access_set,
314}; 314};
315 315
316#else /* CONFIG_BTRFS_POSIX_ACL */ 316#else /* CONFIG_BTRFS_FS_POSIX_ACL */
317 317
318int btrfs_acl_chmod(struct inode *inode) 318int btrfs_acl_chmod(struct inode *inode)
319{ 319{
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
325 return 0; 325 return 0;
326} 326}
327 327
328#endif /* CONFIG_BTRFS_POSIX_ACL */ 328#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index c71abec0ab90..f6783a42f010 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -86,6 +86,12 @@ struct btrfs_inode {
86 * transid of the trans_handle that last modified this inode 86 * transid of the trans_handle that last modified this inode
87 */ 87 */
88 u64 last_trans; 88 u64 last_trans;
89
90 /*
91 * log transid when this inode was last modified
92 */
93 u64 last_sub_trans;
94
89 /* 95 /*
90 * transid that last logged this inode 96 * transid that last logged this inode
91 */ 97 */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 1bb897ecdeeb..444b3e9b92a4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1009,6 +1009,7 @@ struct btrfs_root {
1009 atomic_t log_writers; 1009 atomic_t log_writers;
1010 atomic_t log_commit[2]; 1010 atomic_t log_commit[2];
1011 unsigned long log_transid; 1011 unsigned long log_transid;
1012 unsigned long last_log_commit;
1012 unsigned long log_batch; 1013 unsigned long log_batch;
1013 pid_t log_start_pid; 1014 pid_t log_start_pid;
1014 bool log_multiple_pids; 1015 bool log_multiple_pids;
@@ -1152,6 +1153,7 @@ struct btrfs_root {
1152#define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 1153#define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7)
1153#define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 1154#define BTRFS_MOUNT_SSD_SPREAD (1 << 8)
1154#define BTRFS_MOUNT_NOSSD (1 << 9) 1155#define BTRFS_MOUNT_NOSSD (1 << 9)
1156#define BTRFS_MOUNT_DISCARD (1 << 10)
1155 1157
1156#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1158#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1157#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1159#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2373,7 +2375,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options);
2373int btrfs_sync_fs(struct super_block *sb, int wait); 2375int btrfs_sync_fs(struct super_block *sb, int wait);
2374 2376
2375/* acl.c */ 2377/* acl.c */
2376#ifdef CONFIG_BTRFS_POSIX_ACL 2378#ifdef CONFIG_BTRFS_FS_POSIX_ACL
2377int btrfs_check_acl(struct inode *inode, int mask); 2379int btrfs_check_acl(struct inode *inode, int mask);
2378#else 2380#else
2379#define btrfs_check_acl NULL 2381#define btrfs_check_acl NULL
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 100551a66c46..02b6afbd7450 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -917,6 +917,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
917 atomic_set(&root->log_writers, 0); 917 atomic_set(&root->log_writers, 0);
918 root->log_batch = 0; 918 root->log_batch = 0;
919 root->log_transid = 0; 919 root->log_transid = 0;
920 root->last_log_commit = 0;
920 extent_io_tree_init(&root->dirty_log_pages, 921 extent_io_tree_init(&root->dirty_log_pages,
921 fs_info->btree_inode->i_mapping, GFP_NOFS); 922 fs_info->btree_inode->i_mapping, GFP_NOFS);
922 923
@@ -1087,6 +1088,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1087 WARN_ON(root->log_root); 1088 WARN_ON(root->log_root);
1088 root->log_root = log_root; 1089 root->log_root = log_root;
1089 root->log_transid = 0; 1090 root->log_transid = 0;
1091 root->last_log_commit = 0;
1090 return 0; 1092 return 0;
1091} 1093}
1092 1094
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d0c4d584efad..94627c4cc193 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1568,23 +1568,23 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
1568 return ret; 1568 return ret;
1569} 1569}
1570 1570
1571#ifdef BIO_RW_DISCARD
1572static void btrfs_issue_discard(struct block_device *bdev, 1571static void btrfs_issue_discard(struct block_device *bdev,
1573 u64 start, u64 len) 1572 u64 start, u64 len)
1574{ 1573{
1575 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 1574 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1576 DISCARD_FL_BARRIER); 1575 DISCARD_FL_BARRIER);
1577} 1576}
1578#endif
1579 1577
1580static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1578static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1581 u64 num_bytes) 1579 u64 num_bytes)
1582{ 1580{
1583#ifdef BIO_RW_DISCARD
1584 int ret; 1581 int ret;
1585 u64 map_length = num_bytes; 1582 u64 map_length = num_bytes;
1586 struct btrfs_multi_bio *multi = NULL; 1583 struct btrfs_multi_bio *multi = NULL;
1587 1584
1585 if (!btrfs_test_opt(root, DISCARD))
1586 return 0;
1587
1588 /* Tell the block device(s) that the sectors can be discarded */ 1588 /* Tell the block device(s) that the sectors can be discarded */
1589 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, 1589 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1590 bytenr, &map_length, &multi, 0); 1590 bytenr, &map_length, &multi, 0);
@@ -1604,9 +1604,6 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1604 } 1604 }
1605 1605
1606 return ret; 1606 return ret;
1607#else
1608 return 0;
1609#endif
1610} 1607}
1611 1608
1612int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1609int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
@@ -2980,10 +2977,10 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2980 2977
2981 free_space = btrfs_super_total_bytes(disk_super); 2978 free_space = btrfs_super_total_bytes(disk_super);
2982 /* 2979 /*
2983 * we allow the metadata to grow to a max of either 5gb or 5% of the 2980 * we allow the metadata to grow to a max of either 10gb or 5% of the
2984 * space in the volume. 2981 * space in the volume.
2985 */ 2982 */
2986 min_metadata = min((u64)5 * 1024 * 1024 * 1024, 2983 min_metadata = min((u64)10 * 1024 * 1024 * 1024,
2987 div64_u64(free_space * 5, 100)); 2984 div64_u64(free_space * 5, 100));
2988 if (info->total_bytes >= min_metadata) { 2985 if (info->total_bytes >= min_metadata) {
2989 spin_unlock(&info->lock); 2986 spin_unlock(&info->lock);
@@ -3690,6 +3687,14 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
3690 if (is_data) 3687 if (is_data)
3691 goto pinit; 3688 goto pinit;
3692 3689
3690 /*
3691 * discard is sloooow, and so triggering discards on
3692 * individual btree blocks isn't a good plan. Just
3693 * pin everything in discard mode.
3694 */
3695 if (btrfs_test_opt(root, DISCARD))
3696 goto pinit;
3697
3693 buf = btrfs_find_tree_block(root, bytenr, num_bytes); 3698 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3694 if (!buf) 3699 if (!buf)
3695 goto pinit; 3700 goto pinit;
@@ -4097,7 +4102,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4097} 4102}
4098 4103
4099enum btrfs_loop_type { 4104enum btrfs_loop_type {
4100 LOOP_CACHED_ONLY = 0, 4105 LOOP_FIND_IDEAL = 0,
4101 LOOP_CACHING_NOWAIT = 1, 4106 LOOP_CACHING_NOWAIT = 1,
4102 LOOP_CACHING_WAIT = 2, 4107 LOOP_CACHING_WAIT = 2,
4103 LOOP_ALLOC_CHUNK = 3, 4108 LOOP_ALLOC_CHUNK = 3,
@@ -4126,12 +4131,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4126 struct btrfs_block_group_cache *block_group = NULL; 4131 struct btrfs_block_group_cache *block_group = NULL;
4127 int empty_cluster = 2 * 1024 * 1024; 4132 int empty_cluster = 2 * 1024 * 1024;
4128 int allowed_chunk_alloc = 0; 4133 int allowed_chunk_alloc = 0;
4134 int done_chunk_alloc = 0;
4129 struct btrfs_space_info *space_info; 4135 struct btrfs_space_info *space_info;
4130 int last_ptr_loop = 0; 4136 int last_ptr_loop = 0;
4131 int loop = 0; 4137 int loop = 0;
4132 bool found_uncached_bg = false; 4138 bool found_uncached_bg = false;
4133 bool failed_cluster_refill = false; 4139 bool failed_cluster_refill = false;
4134 bool failed_alloc = false; 4140 bool failed_alloc = false;
4141 u64 ideal_cache_percent = 0;
4142 u64 ideal_cache_offset = 0;
4135 4143
4136 WARN_ON(num_bytes < root->sectorsize); 4144 WARN_ON(num_bytes < root->sectorsize);
4137 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 4145 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -4167,14 +4175,19 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4167 empty_cluster = 0; 4175 empty_cluster = 0;
4168 4176
4169 if (search_start == hint_byte) { 4177 if (search_start == hint_byte) {
4178ideal_cache:
4170 block_group = btrfs_lookup_block_group(root->fs_info, 4179 block_group = btrfs_lookup_block_group(root->fs_info,
4171 search_start); 4180 search_start);
4172 /* 4181 /*
4173 * we don't want to use the block group if it doesn't match our 4182 * we don't want to use the block group if it doesn't match our
4174 * allocation bits, or if its not cached. 4183 * allocation bits, or if its not cached.
4184 *
4185 * However if we are re-searching with an ideal block group
4186 * picked out then we don't care that the block group is cached.
4175 */ 4187 */
4176 if (block_group && block_group_bits(block_group, data) && 4188 if (block_group && block_group_bits(block_group, data) &&
4177 block_group_cache_done(block_group)) { 4189 (block_group->cached != BTRFS_CACHE_NO ||
4190 search_start == ideal_cache_offset)) {
4178 down_read(&space_info->groups_sem); 4191 down_read(&space_info->groups_sem);
4179 if (list_empty(&block_group->list) || 4192 if (list_empty(&block_group->list) ||
4180 block_group->ro) { 4193 block_group->ro) {
@@ -4186,13 +4199,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4186 */ 4199 */
4187 btrfs_put_block_group(block_group); 4200 btrfs_put_block_group(block_group);
4188 up_read(&space_info->groups_sem); 4201 up_read(&space_info->groups_sem);
4189 } else 4202 } else {
4190 goto have_block_group; 4203 goto have_block_group;
4204 }
4191 } else if (block_group) { 4205 } else if (block_group) {
4192 btrfs_put_block_group(block_group); 4206 btrfs_put_block_group(block_group);
4193 } 4207 }
4194 } 4208 }
4195
4196search: 4209search:
4197 down_read(&space_info->groups_sem); 4210 down_read(&space_info->groups_sem);
4198 list_for_each_entry(block_group, &space_info->block_groups, list) { 4211 list_for_each_entry(block_group, &space_info->block_groups, list) {
@@ -4204,28 +4217,45 @@ search:
4204 4217
4205have_block_group: 4218have_block_group:
4206 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4219 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4220 u64 free_percent;
4221
4222 free_percent = btrfs_block_group_used(&block_group->item);
4223 free_percent *= 100;
4224 free_percent = div64_u64(free_percent,
4225 block_group->key.offset);
4226 free_percent = 100 - free_percent;
4227 if (free_percent > ideal_cache_percent &&
4228 likely(!block_group->ro)) {
4229 ideal_cache_offset = block_group->key.objectid;
4230 ideal_cache_percent = free_percent;
4231 }
4232
4207 /* 4233 /*
4208 * we want to start caching kthreads, but not too many 4234 * We only want to start kthread caching if we are at
4209 * right off the bat so we don't overwhelm the system, 4235 * the point where we will wait for caching to make
4210 * so only start them if there are less than 2 and we're 4236 * progress, or if our ideal search is over and we've
4211 * in the initial allocation phase. 4237 * found somebody to start caching.
4212 */ 4238 */
4213 if (loop > LOOP_CACHING_NOWAIT || 4239 if (loop > LOOP_CACHING_NOWAIT ||
4214 atomic_read(&space_info->caching_threads) < 2) { 4240 (loop > LOOP_FIND_IDEAL &&
4241 atomic_read(&space_info->caching_threads) < 2)) {
4215 ret = cache_block_group(block_group); 4242 ret = cache_block_group(block_group);
4216 BUG_ON(ret); 4243 BUG_ON(ret);
4217 } 4244 }
4218 }
4219
4220 cached = block_group_cache_done(block_group);
4221 if (unlikely(!cached)) {
4222 found_uncached_bg = true; 4245 found_uncached_bg = true;
4223 4246
4224 /* if we only want cached bgs, loop */ 4247 /*
4225 if (loop == LOOP_CACHED_ONLY) 4248 * If loop is set for cached only, try the next block
4249 * group.
4250 */
4251 if (loop == LOOP_FIND_IDEAL)
4226 goto loop; 4252 goto loop;
4227 } 4253 }
4228 4254
4255 cached = block_group_cache_done(block_group);
4256 if (unlikely(!cached))
4257 found_uncached_bg = true;
4258
4229 if (unlikely(block_group->ro)) 4259 if (unlikely(block_group->ro))
4230 goto loop; 4260 goto loop;
4231 4261
@@ -4405,9 +4435,11 @@ loop:
4405 } 4435 }
4406 up_read(&space_info->groups_sem); 4436 up_read(&space_info->groups_sem);
4407 4437
4408 /* LOOP_CACHED_ONLY, only search fully cached block groups 4438 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4409 * LOOP_CACHING_NOWAIT, search partially cached block groups, but 4439 * for them to make caching progress. Also
4410 * dont wait foR them to finish caching 4440 * determine the best possible bg to cache
4441 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4442 * caching kthreads as we move along
4411 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 4443 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4412 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 4444 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4413 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 4445 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
@@ -4416,12 +4448,47 @@ loop:
4416 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && 4448 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4417 (found_uncached_bg || empty_size || empty_cluster || 4449 (found_uncached_bg || empty_size || empty_cluster ||
4418 allowed_chunk_alloc)) { 4450 allowed_chunk_alloc)) {
4419 if (found_uncached_bg) { 4451 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
4420 found_uncached_bg = false; 4452 found_uncached_bg = false;
4421 if (loop < LOOP_CACHING_WAIT) { 4453 loop++;
4422 loop++; 4454 if (!ideal_cache_percent &&
4455 atomic_read(&space_info->caching_threads))
4423 goto search; 4456 goto search;
4424 } 4457
4458 /*
4459 * 1 of the following 2 things have happened so far
4460 *
4461 * 1) We found an ideal block group for caching that
4462 * is mostly full and will cache quickly, so we might
4463 * as well wait for it.
4464 *
4465 * 2) We searched for cached only and we didn't find
4466 * anything, and we didn't start any caching kthreads
4467 * either, so chances are we will loop through and
4468 * start a couple caching kthreads, and then come back
4469 * around and just wait for them. This will be slower
4470 * because we will have 2 caching kthreads reading at
4471 * the same time when we could have just started one
4472 * and waited for it to get far enough to give us an
4473 * allocation, so go ahead and go to the wait caching
4474 * loop.
4475 */
4476 loop = LOOP_CACHING_WAIT;
4477 search_start = ideal_cache_offset;
4478 ideal_cache_percent = 0;
4479 goto ideal_cache;
4480 } else if (loop == LOOP_FIND_IDEAL) {
4481 /*
4482 * Didn't find a uncached bg, wait on anything we find
4483 * next.
4484 */
4485 loop = LOOP_CACHING_WAIT;
4486 goto search;
4487 }
4488
4489 if (loop < LOOP_CACHING_WAIT) {
4490 loop++;
4491 goto search;
4425 } 4492 }
4426 4493
4427 if (loop == LOOP_ALLOC_CHUNK) { 4494 if (loop == LOOP_ALLOC_CHUNK) {
@@ -4433,7 +4500,8 @@ loop:
4433 ret = do_chunk_alloc(trans, root, num_bytes + 4500 ret = do_chunk_alloc(trans, root, num_bytes +
4434 2 * 1024 * 1024, data, 1); 4501 2 * 1024 * 1024, data, 1);
4435 allowed_chunk_alloc = 0; 4502 allowed_chunk_alloc = 0;
4436 } else { 4503 done_chunk_alloc = 1;
4504 } else if (!done_chunk_alloc) {
4437 space_info->force_alloc = 1; 4505 space_info->force_alloc = 1;
4438 } 4506 }
4439 4507
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2c726b7b9faa..ccbdcb54ec5d 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -208,7 +208,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
208 write_lock(&tree->lock); 208 write_lock(&tree->lock);
209 em = lookup_extent_mapping(tree, start, len); 209 em = lookup_extent_mapping(tree, start, len);
210 210
211 WARN_ON(em->start != start || !em); 211 WARN_ON(!em || em->start != start);
212 212
213 if (!em) 213 if (!em)
214 goto out; 214 goto out;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 2d623aa0625f..06550affbd27 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1086,8 +1086,10 @@ out_nolock:
1086 btrfs_end_transaction(trans, root); 1086 btrfs_end_transaction(trans, root);
1087 else 1087 else
1088 btrfs_commit_transaction(trans, root); 1088 btrfs_commit_transaction(trans, root);
1089 } else { 1089 } else if (ret != BTRFS_NO_LOG_SYNC) {
1090 btrfs_commit_transaction(trans, root); 1090 btrfs_commit_transaction(trans, root);
1091 } else {
1092 btrfs_end_transaction(trans, root);
1091 } 1093 }
1092 } 1094 }
1093 if (file->f_flags & O_DIRECT) { 1095 if (file->f_flags & O_DIRECT) {
@@ -1137,6 +1139,13 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1137 int ret = 0; 1139 int ret = 0;
1138 struct btrfs_trans_handle *trans; 1140 struct btrfs_trans_handle *trans;
1139 1141
1142
1143 /* we wait first, since the writeback may change the inode */
1144 root->log_batch++;
1145 /* the VFS called filemap_fdatawrite for us */
1146 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1147 root->log_batch++;
1148
1140 /* 1149 /*
1141 * check the transaction that last modified this inode 1150 * check the transaction that last modified this inode
1142 * and see if its already been committed 1151 * and see if its already been committed
@@ -1144,6 +1153,11 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1144 if (!BTRFS_I(inode)->last_trans) 1153 if (!BTRFS_I(inode)->last_trans)
1145 goto out; 1154 goto out;
1146 1155
1156 /*
1157 * if the last transaction that changed this file was before
1158 * the current transaction, we can bail out now without any
1159 * syncing
1160 */
1147 mutex_lock(&root->fs_info->trans_mutex); 1161 mutex_lock(&root->fs_info->trans_mutex);
1148 if (BTRFS_I(inode)->last_trans <= 1162 if (BTRFS_I(inode)->last_trans <=
1149 root->fs_info->last_trans_committed) { 1163 root->fs_info->last_trans_committed) {
@@ -1153,13 +1167,6 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1153 } 1167 }
1154 mutex_unlock(&root->fs_info->trans_mutex); 1168 mutex_unlock(&root->fs_info->trans_mutex);
1155 1169
1156 root->log_batch++;
1157 filemap_fdatawrite(inode->i_mapping);
1158 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1159 root->log_batch++;
1160
1161 if (datasync && !(inode->i_state & I_DIRTY_PAGES))
1162 goto out;
1163 /* 1170 /*
1164 * ok we haven't committed the transaction yet, lets do a commit 1171 * ok we haven't committed the transaction yet, lets do a commit
1165 */ 1172 */
@@ -1188,14 +1195,18 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1188 */ 1195 */
1189 mutex_unlock(&dentry->d_inode->i_mutex); 1196 mutex_unlock(&dentry->d_inode->i_mutex);
1190 1197
1191 if (ret > 0) { 1198 if (ret != BTRFS_NO_LOG_SYNC) {
1192 ret = btrfs_commit_transaction(trans, root); 1199 if (ret > 0) {
1193 } else {
1194 ret = btrfs_sync_log(trans, root);
1195 if (ret == 0)
1196 ret = btrfs_end_transaction(trans, root);
1197 else
1198 ret = btrfs_commit_transaction(trans, root); 1200 ret = btrfs_commit_transaction(trans, root);
1201 } else {
1202 ret = btrfs_sync_log(trans, root);
1203 if (ret == 0)
1204 ret = btrfs_end_transaction(trans, root);
1205 else
1206 ret = btrfs_commit_transaction(trans, root);
1207 }
1208 } else {
1209 ret = btrfs_end_transaction(trans, root);
1199 } 1210 }
1200 mutex_lock(&dentry->d_inode->i_mutex); 1211 mutex_lock(&dentry->d_inode->i_mutex);
1201out: 1212out:
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5c2caad76212..cb2849f03251 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1296,7 +1296,7 @@ again:
1296 window_start = entry->offset; 1296 window_start = entry->offset;
1297 window_free = entry->bytes; 1297 window_free = entry->bytes;
1298 last = entry; 1298 last = entry;
1299 max_extent = 0; 1299 max_extent = entry->bytes;
1300 } else { 1300 } else {
1301 last = next; 1301 last = next;
1302 window_free += next->bytes; 1302 window_free += next->bytes;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9e138b793dc7..b3ad168a0bfc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -538,7 +538,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
538 struct btrfs_root *root = BTRFS_I(inode)->root; 538 struct btrfs_root *root = BTRFS_I(inode)->root;
539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
540 struct extent_io_tree *io_tree; 540 struct extent_io_tree *io_tree;
541 int ret; 541 int ret = 0;
542 542
543 if (list_empty(&async_cow->extents)) 543 if (list_empty(&async_cow->extents))
544 return 0; 544 return 0;
@@ -552,6 +552,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
552 552
553 io_tree = &BTRFS_I(inode)->io_tree; 553 io_tree = &BTRFS_I(inode)->io_tree;
554 554
555retry:
555 /* did the compression code fall back to uncompressed IO? */ 556 /* did the compression code fall back to uncompressed IO? */
556 if (!async_extent->pages) { 557 if (!async_extent->pages) {
557 int page_started = 0; 558 int page_started = 0;
@@ -562,11 +563,11 @@ static noinline int submit_compressed_extents(struct inode *inode,
562 async_extent->ram_size - 1, GFP_NOFS); 563 async_extent->ram_size - 1, GFP_NOFS);
563 564
564 /* allocate blocks */ 565 /* allocate blocks */
565 cow_file_range(inode, async_cow->locked_page, 566 ret = cow_file_range(inode, async_cow->locked_page,
566 async_extent->start, 567 async_extent->start,
567 async_extent->start + 568 async_extent->start +
568 async_extent->ram_size - 1, 569 async_extent->ram_size - 1,
569 &page_started, &nr_written, 0); 570 &page_started, &nr_written, 0);
570 571
571 /* 572 /*
572 * if page_started, cow_file_range inserted an 573 * if page_started, cow_file_range inserted an
@@ -574,7 +575,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
574 * and IO for us. Otherwise, we need to submit 575 * and IO for us. Otherwise, we need to submit
575 * all those pages down to the drive. 576 * all those pages down to the drive.
576 */ 577 */
577 if (!page_started) 578 if (!page_started && !ret)
578 extent_write_locked_range(io_tree, 579 extent_write_locked_range(io_tree,
579 inode, async_extent->start, 580 inode, async_extent->start,
580 async_extent->start + 581 async_extent->start +
@@ -602,7 +603,21 @@ static noinline int submit_compressed_extents(struct inode *inode,
602 async_extent->compressed_size, 603 async_extent->compressed_size,
603 0, alloc_hint, 604 0, alloc_hint,
604 (u64)-1, &ins, 1); 605 (u64)-1, &ins, 1);
605 BUG_ON(ret); 606 if (ret) {
607 int i;
608 for (i = 0; i < async_extent->nr_pages; i++) {
609 WARN_ON(async_extent->pages[i]->mapping);
610 page_cache_release(async_extent->pages[i]);
611 }
612 kfree(async_extent->pages);
613 async_extent->nr_pages = 0;
614 async_extent->pages = NULL;
615 unlock_extent(io_tree, async_extent->start,
616 async_extent->start +
617 async_extent->ram_size - 1, GFP_NOFS);
618 goto retry;
619 }
620
606 em = alloc_extent_map(GFP_NOFS); 621 em = alloc_extent_map(GFP_NOFS);
607 em->start = async_extent->start; 622 em->start = async_extent->start;
608 em->len = async_extent->ram_size; 623 em->len = async_extent->ram_size;
@@ -743,8 +758,22 @@ static noinline int cow_file_range(struct inode *inode,
743 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree, 758 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
744 start, num_bytes); 759 start, num_bytes);
745 if (em) { 760 if (em) {
746 alloc_hint = em->block_start; 761 /*
747 free_extent_map(em); 762 * if block start isn't an actual block number then find the
763 * first block in this inode and use that as a hint. If that
764 * block is also bogus then just don't worry about it.
765 */
766 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
767 free_extent_map(em);
768 em = search_extent_mapping(em_tree, 0, 0);
769 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
770 alloc_hint = em->block_start;
771 if (em)
772 free_extent_map(em);
773 } else {
774 alloc_hint = em->block_start;
775 free_extent_map(em);
776 }
748 } 777 }
749 read_unlock(&BTRFS_I(inode)->extent_tree.lock); 778 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
750 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 779 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
@@ -2474,7 +2503,19 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2474 2503
2475 root = BTRFS_I(dir)->root; 2504 root = BTRFS_I(dir)->root;
2476 2505
2506 /*
2507 * 5 items for unlink inode
2508 * 1 for orphan
2509 */
2510 ret = btrfs_reserve_metadata_space(root, 6);
2511 if (ret)
2512 return ret;
2513
2477 trans = btrfs_start_transaction(root, 1); 2514 trans = btrfs_start_transaction(root, 1);
2515 if (IS_ERR(trans)) {
2516 btrfs_unreserve_metadata_space(root, 6);
2517 return PTR_ERR(trans);
2518 }
2478 2519
2479 btrfs_set_trans_block_group(trans, dir); 2520 btrfs_set_trans_block_group(trans, dir);
2480 2521
@@ -2489,6 +2530,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2489 nr = trans->blocks_used; 2530 nr = trans->blocks_used;
2490 2531
2491 btrfs_end_transaction_throttle(trans, root); 2532 btrfs_end_transaction_throttle(trans, root);
2533 btrfs_unreserve_metadata_space(root, 6);
2492 btrfs_btree_balance_dirty(root, nr); 2534 btrfs_btree_balance_dirty(root, nr);
2493 return ret; 2535 return ret;
2494} 2536}
@@ -2569,7 +2611,16 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2569 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 2611 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2570 return -ENOTEMPTY; 2612 return -ENOTEMPTY;
2571 2613
2614 ret = btrfs_reserve_metadata_space(root, 5);
2615 if (ret)
2616 return ret;
2617
2572 trans = btrfs_start_transaction(root, 1); 2618 trans = btrfs_start_transaction(root, 1);
2619 if (IS_ERR(trans)) {
2620 btrfs_unreserve_metadata_space(root, 5);
2621 return PTR_ERR(trans);
2622 }
2623
2573 btrfs_set_trans_block_group(trans, dir); 2624 btrfs_set_trans_block_group(trans, dir);
2574 2625
2575 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 2626 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -2592,6 +2643,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2592out: 2643out:
2593 nr = trans->blocks_used; 2644 nr = trans->blocks_used;
2594 ret = btrfs_end_transaction_throttle(trans, root); 2645 ret = btrfs_end_transaction_throttle(trans, root);
2646 btrfs_unreserve_metadata_space(root, 5);
2595 btrfs_btree_balance_dirty(root, nr); 2647 btrfs_btree_balance_dirty(root, nr);
2596 2648
2597 if (ret && !err) 2649 if (ret && !err)
@@ -3032,12 +3084,22 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3032 3084
3033 if ((offset & (blocksize - 1)) == 0) 3085 if ((offset & (blocksize - 1)) == 0)
3034 goto out; 3086 goto out;
3087 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3088 if (ret)
3089 goto out;
3090
3091 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3092 if (ret)
3093 goto out;
3035 3094
3036 ret = -ENOMEM; 3095 ret = -ENOMEM;
3037again: 3096again:
3038 page = grab_cache_page(mapping, index); 3097 page = grab_cache_page(mapping, index);
3039 if (!page) 3098 if (!page) {
3099 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3100 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3040 goto out; 3101 goto out;
3102 }
3041 3103
3042 page_start = page_offset(page); 3104 page_start = page_offset(page);
3043 page_end = page_start + PAGE_CACHE_SIZE - 1; 3105 page_end = page_start + PAGE_CACHE_SIZE - 1;
@@ -3070,6 +3132,10 @@ again:
3070 goto again; 3132 goto again;
3071 } 3133 }
3072 3134
3135 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
3136 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3137 GFP_NOFS);
3138
3073 ret = btrfs_set_extent_delalloc(inode, page_start, page_end); 3139 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3074 if (ret) { 3140 if (ret) {
3075 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3141 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
@@ -3088,6 +3154,9 @@ again:
3088 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3154 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3089 3155
3090out_unlock: 3156out_unlock:
3157 if (ret)
3158 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3159 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3091 unlock_page(page); 3160 unlock_page(page);
3092 page_cache_release(page); 3161 page_cache_release(page);
3093out: 3162out:
@@ -3111,7 +3180,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3111 if (size <= hole_start) 3180 if (size <= hole_start)
3112 return 0; 3181 return 0;
3113 3182
3114 btrfs_truncate_page(inode->i_mapping, inode->i_size); 3183 err = btrfs_truncate_page(inode->i_mapping, inode->i_size);
3184 if (err)
3185 return err;
3115 3186
3116 while (1) { 3187 while (1) {
3117 struct btrfs_ordered_extent *ordered; 3188 struct btrfs_ordered_extent *ordered;
@@ -3480,6 +3551,7 @@ static noinline void init_btrfs_i(struct inode *inode)
3480 bi->generation = 0; 3551 bi->generation = 0;
3481 bi->sequence = 0; 3552 bi->sequence = 0;
3482 bi->last_trans = 0; 3553 bi->last_trans = 0;
3554 bi->last_sub_trans = 0;
3483 bi->logged_trans = 0; 3555 bi->logged_trans = 0;
3484 bi->delalloc_bytes = 0; 3556 bi->delalloc_bytes = 0;
3485 bi->reserved_bytes = 0; 3557 bi->reserved_bytes = 0;
@@ -4980,7 +5052,9 @@ again:
4980 set_page_dirty(page); 5052 set_page_dirty(page);
4981 SetPageUptodate(page); 5053 SetPageUptodate(page);
4982 5054
4983 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 5055 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5056 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5057
4984 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5058 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4985 5059
4986out_unlock: 5060out_unlock:
@@ -5005,7 +5079,9 @@ static void btrfs_truncate(struct inode *inode)
5005 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 5079 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
5006 return; 5080 return;
5007 5081
5008 btrfs_truncate_page(inode->i_mapping, inode->i_size); 5082 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5083 if (ret)
5084 return;
5009 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 5085 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5010 5086
5011 trans = btrfs_start_transaction(root, 1); 5087 trans = btrfs_start_transaction(root, 1);
@@ -5100,9 +5176,11 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
5100 if (!ei) 5176 if (!ei)
5101 return NULL; 5177 return NULL;
5102 ei->last_trans = 0; 5178 ei->last_trans = 0;
5179 ei->last_sub_trans = 0;
5103 ei->logged_trans = 0; 5180 ei->logged_trans = 0;
5104 ei->outstanding_extents = 0; 5181 ei->outstanding_extents = 0;
5105 ei->reserved_extents = 0; 5182 ei->reserved_extents = 0;
5183 ei->root = NULL;
5106 spin_lock_init(&ei->accounting_lock); 5184 spin_lock_init(&ei->accounting_lock);
5107 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 5185 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5108 INIT_LIST_HEAD(&ei->i_orphan); 5186 INIT_LIST_HEAD(&ei->i_orphan);
@@ -5119,6 +5197,14 @@ void btrfs_destroy_inode(struct inode *inode)
5119 WARN_ON(inode->i_data.nrpages); 5197 WARN_ON(inode->i_data.nrpages);
5120 5198
5121 /* 5199 /*
5200 * This can happen where we create an inode, but somebody else also
5201 * created the same inode and we need to destroy the one we already
5202 * created.
5203 */
5204 if (!root)
5205 goto free;
5206
5207 /*
5122 * Make sure we're properly removed from the ordered operation 5208 * Make sure we're properly removed from the ordered operation
5123 * lists. 5209 * lists.
5124 */ 5210 */
@@ -5153,6 +5239,7 @@ void btrfs_destroy_inode(struct inode *inode)
5153 } 5239 }
5154 inode_tree_del(inode); 5240 inode_tree_del(inode);
5155 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 5241 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5242free:
5156 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 5243 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5157} 5244}
5158 5245
@@ -5258,11 +5345,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5258 return -ENOTEMPTY; 5345 return -ENOTEMPTY;
5259 5346
5260 /* 5347 /*
5261 * 2 items for dir items 5348 * We want to reserve the absolute worst case amount of items. So if
5262 * 1 item for orphan entry 5349 * both inodes are subvols and we need to unlink them then that would
5263 * 1 item for ref 5350 * require 4 item modifications, but if they are both normal inodes it
5351 * would require 5 item modifications, so we'll assume their normal
5352 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5353 * should cover the worst case number of items we'll modify.
5264 */ 5354 */
5265 ret = btrfs_reserve_metadata_space(root, 4); 5355 ret = btrfs_reserve_metadata_space(root, 11);
5266 if (ret) 5356 if (ret)
5267 return ret; 5357 return ret;
5268 5358
@@ -5378,7 +5468,7 @@ out_fail:
5378 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 5468 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5379 up_read(&root->fs_info->subvol_sem); 5469 up_read(&root->fs_info->subvol_sem);
5380 5470
5381 btrfs_unreserve_metadata_space(root, 4); 5471 btrfs_unreserve_metadata_space(root, 11);
5382 return ret; 5472 return ret;
5383} 5473}
5384 5474
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 9351428f30e2..67fa2d29d663 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -159,7 +159,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
159 write_extent_buffer(l, item, ptr, sizeof(*item)); 159 write_extent_buffer(l, item, ptr, sizeof(*item));
160 btrfs_mark_buffer_dirty(path->nodes[0]); 160 btrfs_mark_buffer_dirty(path->nodes[0]);
161out: 161out:
162 btrfs_release_path(root, path);
163 btrfs_free_path(path); 162 btrfs_free_path(path);
164 return ret; 163 return ret;
165} 164}
@@ -332,7 +331,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
332 BUG_ON(refs != 0); 331 BUG_ON(refs != 0);
333 ret = btrfs_del_item(trans, root, path); 332 ret = btrfs_del_item(trans, root, path);
334out: 333out:
335 btrfs_release_path(root, path);
336 btrfs_free_path(path); 334 btrfs_free_path(path);
337 return ret; 335 return ret;
338} 336}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9de9b2236419..752a5463bf53 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -66,7 +66,8 @@ enum {
66 Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, 66 Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
67 Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, 67 Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
68 Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, 68 Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl,
69 Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_err, 69 Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
70 Opt_discard, Opt_err,
70}; 71};
71 72
72static match_table_t tokens = { 73static match_table_t tokens = {
@@ -88,6 +89,7 @@ static match_table_t tokens = {
88 {Opt_notreelog, "notreelog"}, 89 {Opt_notreelog, "notreelog"},
89 {Opt_flushoncommit, "flushoncommit"}, 90 {Opt_flushoncommit, "flushoncommit"},
90 {Opt_ratio, "metadata_ratio=%d"}, 91 {Opt_ratio, "metadata_ratio=%d"},
92 {Opt_discard, "discard"},
91 {Opt_err, NULL}, 93 {Opt_err, NULL},
92}; 94};
93 95
@@ -257,6 +259,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
257 info->metadata_ratio); 259 info->metadata_ratio);
258 } 260 }
259 break; 261 break;
262 case Opt_discard:
263 btrfs_set_opt(info->mount_opt, DISCARD);
264 break;
260 default: 265 default:
261 break; 266 break;
262 } 267 }
@@ -344,7 +349,7 @@ static int btrfs_fill_super(struct super_block *sb,
344 sb->s_export_op = &btrfs_export_ops; 349 sb->s_export_op = &btrfs_export_ops;
345 sb->s_xattr = btrfs_xattr_handlers; 350 sb->s_xattr = btrfs_xattr_handlers;
346 sb->s_time_gran = 1; 351 sb->s_time_gran = 1;
347#ifdef CONFIG_BTRFS_POSIX_ACL 352#ifdef CONFIG_BTRFS_FS_POSIX_ACL
348 sb->s_flags |= MS_POSIXACL; 353 sb->s_flags |= MS_POSIXACL;
349#endif 354#endif
350 355
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 0b8f36d4400a..c207e8c32c9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -163,8 +163,14 @@ static void wait_current_trans(struct btrfs_root *root)
163 } 163 }
164} 164}
165 165
166enum btrfs_trans_type {
167 TRANS_START,
168 TRANS_JOIN,
169 TRANS_USERSPACE,
170};
171
166static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 172static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
167 int num_blocks, int wait) 173 int num_blocks, int type)
168{ 174{
169 struct btrfs_trans_handle *h = 175 struct btrfs_trans_handle *h =
170 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 176 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
@@ -172,7 +178,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
172 178
173 mutex_lock(&root->fs_info->trans_mutex); 179 mutex_lock(&root->fs_info->trans_mutex);
174 if (!root->fs_info->log_root_recovering && 180 if (!root->fs_info->log_root_recovering &&
175 ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2)) 181 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
182 type == TRANS_USERSPACE))
176 wait_current_trans(root); 183 wait_current_trans(root);
177 ret = join_transaction(root); 184 ret = join_transaction(root);
178 BUG_ON(ret); 185 BUG_ON(ret);
@@ -186,7 +193,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
186 h->alloc_exclude_start = 0; 193 h->alloc_exclude_start = 0;
187 h->delayed_ref_updates = 0; 194 h->delayed_ref_updates = 0;
188 195
189 if (!current->journal_info) 196 if (!current->journal_info && type != TRANS_USERSPACE)
190 current->journal_info = h; 197 current->journal_info = h;
191 198
192 root->fs_info->running_transaction->use_count++; 199 root->fs_info->running_transaction->use_count++;
@@ -198,18 +205,18 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
198struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 205struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
199 int num_blocks) 206 int num_blocks)
200{ 207{
201 return start_transaction(root, num_blocks, 1); 208 return start_transaction(root, num_blocks, TRANS_START);
202} 209}
203struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 210struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
204 int num_blocks) 211 int num_blocks)
205{ 212{
206 return start_transaction(root, num_blocks, 0); 213 return start_transaction(root, num_blocks, TRANS_JOIN);
207} 214}
208 215
209struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 216struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
210 int num_blocks) 217 int num_blocks)
211{ 218{
212 return start_transaction(r, num_blocks, 2); 219 return start_transaction(r, num_blocks, TRANS_USERSPACE);
213} 220}
214 221
215/* wait for a transaction commit to be fully complete */ 222/* wait for a transaction commit to be fully complete */
@@ -344,10 +351,10 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
344/* 351/*
345 * when btree blocks are allocated, they have some corresponding bits set for 352 * when btree blocks are allocated, they have some corresponding bits set for
346 * them in one of two extent_io trees. This is used to make sure all of 353 * them in one of two extent_io trees. This is used to make sure all of
347 * those extents are on disk for transaction or log commit 354 * those extents are sent to disk but does not wait on them
348 */ 355 */
349int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 356int btrfs_write_marked_extents(struct btrfs_root *root,
350 struct extent_io_tree *dirty_pages) 357 struct extent_io_tree *dirty_pages)
351{ 358{
352 int ret; 359 int ret;
353 int err = 0; 360 int err = 0;
@@ -394,6 +401,29 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
394 page_cache_release(page); 401 page_cache_release(page);
395 } 402 }
396 } 403 }
404 if (err)
405 werr = err;
406 return werr;
407}
408
409/*
410 * when btree blocks are allocated, they have some corresponding bits set for
411 * them in one of two extent_io trees. This is used to make sure all of
412 * those extents are on disk for transaction or log commit. We wait
413 * on all the pages and clear them from the dirty pages state tree
414 */
415int btrfs_wait_marked_extents(struct btrfs_root *root,
416 struct extent_io_tree *dirty_pages)
417{
418 int ret;
419 int err = 0;
420 int werr = 0;
421 struct page *page;
422 struct inode *btree_inode = root->fs_info->btree_inode;
423 u64 start = 0;
424 u64 end;
425 unsigned long index;
426
397 while (1) { 427 while (1) {
398 ret = find_first_extent_bit(dirty_pages, 0, &start, &end, 428 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
399 EXTENT_DIRTY); 429 EXTENT_DIRTY);
@@ -424,6 +454,22 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
424 return werr; 454 return werr;
425} 455}
426 456
457/*
458 * when btree blocks are allocated, they have some corresponding bits set for
459 * them in one of two extent_io trees. This is used to make sure all of
460 * those extents are on disk for transaction or log commit
461 */
462int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
463 struct extent_io_tree *dirty_pages)
464{
465 int ret;
466 int ret2;
467
468 ret = btrfs_write_marked_extents(root, dirty_pages);
469 ret2 = btrfs_wait_marked_extents(root, dirty_pages);
470 return ret || ret2;
471}
472
427int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 473int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
428 struct btrfs_root *root) 474 struct btrfs_root *root)
429{ 475{
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 663c67404918..d4e3e7a6938c 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -79,6 +79,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
79 struct inode *inode) 79 struct inode *inode)
80{ 80{
81 BTRFS_I(inode)->last_trans = trans->transaction->transid; 81 BTRFS_I(inode)->last_trans = trans->transaction->transid;
82 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
82} 83}
83 84
84int btrfs_end_transaction(struct btrfs_trans_handle *trans, 85int btrfs_end_transaction(struct btrfs_trans_handle *trans,
@@ -107,5 +108,9 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root); 108 struct btrfs_root *root);
108int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 109int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
109 struct extent_io_tree *dirty_pages); 110 struct extent_io_tree *dirty_pages);
111int btrfs_write_marked_extents(struct btrfs_root *root,
112 struct extent_io_tree *dirty_pages);
113int btrfs_wait_marked_extents(struct btrfs_root *root,
114 struct extent_io_tree *dirty_pages);
110int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 115int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
111#endif 116#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 4edfdc2acc5f..741666a7676a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1980,6 +1980,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1980 int ret; 1980 int ret;
1981 struct btrfs_root *log = root->log_root; 1981 struct btrfs_root *log = root->log_root;
1982 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; 1982 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
1983 u64 log_transid = 0;
1983 1984
1984 mutex_lock(&root->log_mutex); 1985 mutex_lock(&root->log_mutex);
1985 index1 = root->log_transid % 2; 1986 index1 = root->log_transid % 2;
@@ -1994,12 +1995,13 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1994 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 1995 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
1995 wait_log_commit(trans, root, root->log_transid - 1); 1996 wait_log_commit(trans, root, root->log_transid - 1);
1996 1997
1997 while (root->log_multiple_pids) { 1998 while (1) {
1998 unsigned long batch = root->log_batch; 1999 unsigned long batch = root->log_batch;
1999 mutex_unlock(&root->log_mutex); 2000 if (root->log_multiple_pids) {
2000 schedule_timeout_uninterruptible(1); 2001 mutex_unlock(&root->log_mutex);
2001 mutex_lock(&root->log_mutex); 2002 schedule_timeout_uninterruptible(1);
2002 2003 mutex_lock(&root->log_mutex);
2004 }
2003 wait_for_writer(trans, root); 2005 wait_for_writer(trans, root);
2004 if (batch == root->log_batch) 2006 if (batch == root->log_batch)
2005 break; 2007 break;
@@ -2012,12 +2014,16 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2012 goto out; 2014 goto out;
2013 } 2015 }
2014 2016
2015 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages); 2017 /* we start IO on all the marked extents here, but we don't actually
2018 * wait for them until later.
2019 */
2020 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages);
2016 BUG_ON(ret); 2021 BUG_ON(ret);
2017 2022
2018 btrfs_set_root_node(&log->root_item, log->node); 2023 btrfs_set_root_node(&log->root_item, log->node);
2019 2024
2020 root->log_batch = 0; 2025 root->log_batch = 0;
2026 log_transid = root->log_transid;
2021 root->log_transid++; 2027 root->log_transid++;
2022 log->log_transid = root->log_transid; 2028 log->log_transid = root->log_transid;
2023 root->log_start_pid = 0; 2029 root->log_start_pid = 0;
@@ -2046,6 +2052,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2046 2052
2047 index2 = log_root_tree->log_transid % 2; 2053 index2 = log_root_tree->log_transid % 2;
2048 if (atomic_read(&log_root_tree->log_commit[index2])) { 2054 if (atomic_read(&log_root_tree->log_commit[index2])) {
2055 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2049 wait_log_commit(trans, log_root_tree, 2056 wait_log_commit(trans, log_root_tree,
2050 log_root_tree->log_transid); 2057 log_root_tree->log_transid);
2051 mutex_unlock(&log_root_tree->log_mutex); 2058 mutex_unlock(&log_root_tree->log_mutex);
@@ -2065,6 +2072,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2065 * check the full commit flag again 2072 * check the full commit flag again
2066 */ 2073 */
2067 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2074 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2075 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2068 mutex_unlock(&log_root_tree->log_mutex); 2076 mutex_unlock(&log_root_tree->log_mutex);
2069 ret = -EAGAIN; 2077 ret = -EAGAIN;
2070 goto out_wake_log_root; 2078 goto out_wake_log_root;
@@ -2073,6 +2081,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2073 ret = btrfs_write_and_wait_marked_extents(log_root_tree, 2081 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2074 &log_root_tree->dirty_log_pages); 2082 &log_root_tree->dirty_log_pages);
2075 BUG_ON(ret); 2083 BUG_ON(ret);
2084 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2076 2085
2077 btrfs_set_super_log_root(&root->fs_info->super_for_commit, 2086 btrfs_set_super_log_root(&root->fs_info->super_for_commit,
2078 log_root_tree->node->start); 2087 log_root_tree->node->start);
@@ -2092,9 +2101,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2092 * the running transaction open, so a full commit can't hop 2101 * the running transaction open, so a full commit can't hop
2093 * in and cause problems either. 2102 * in and cause problems either.
2094 */ 2103 */
2095 write_ctree_super(trans, root->fs_info->tree_root, 2); 2104 write_ctree_super(trans, root->fs_info->tree_root, 1);
2096 ret = 0; 2105 ret = 0;
2097 2106
2107 mutex_lock(&root->log_mutex);
2108 if (root->last_log_commit < log_transid)
2109 root->last_log_commit = log_transid;
2110 mutex_unlock(&root->log_mutex);
2111
2098out_wake_log_root: 2112out_wake_log_root:
2099 atomic_set(&log_root_tree->log_commit[index2], 0); 2113 atomic_set(&log_root_tree->log_commit[index2], 0);
2100 smp_mb(); 2114 smp_mb();
@@ -2862,6 +2876,21 @@ out:
2862 return ret; 2876 return ret;
2863} 2877}
2864 2878
2879static int inode_in_log(struct btrfs_trans_handle *trans,
2880 struct inode *inode)
2881{
2882 struct btrfs_root *root = BTRFS_I(inode)->root;
2883 int ret = 0;
2884
2885 mutex_lock(&root->log_mutex);
2886 if (BTRFS_I(inode)->logged_trans == trans->transid &&
2887 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
2888 ret = 1;
2889 mutex_unlock(&root->log_mutex);
2890 return ret;
2891}
2892
2893
2865/* 2894/*
2866 * helper function around btrfs_log_inode to make sure newly created 2895 * helper function around btrfs_log_inode to make sure newly created
2867 * parent directories also end up in the log. A minimal inode and backref 2896 * parent directories also end up in the log. A minimal inode and backref
@@ -2901,6 +2930,11 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
2901 if (ret) 2930 if (ret)
2902 goto end_no_trans; 2931 goto end_no_trans;
2903 2932
2933 if (inode_in_log(trans, inode)) {
2934 ret = BTRFS_NO_LOG_SYNC;
2935 goto end_no_trans;
2936 }
2937
2904 start_log_trans(trans, root); 2938 start_log_trans(trans, root);
2905 2939
2906 ret = btrfs_log_inode(trans, root, inode, inode_only); 2940 ret = btrfs_log_inode(trans, root, inode, inode_only);
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index d09c7609e16b..0776eacb5083 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -19,6 +19,9 @@
19#ifndef __TREE_LOG_ 19#ifndef __TREE_LOG_
20#define __TREE_LOG_ 20#define __TREE_LOG_
21 21
22/* return value for btrfs_log_dentry_safe that means we don't need to log it at all */
23#define BTRFS_NO_LOG_SYNC 256
24
22int btrfs_sync_log(struct btrfs_trans_handle *trans, 25int btrfs_sync_log(struct btrfs_trans_handle *trans,
23 struct btrfs_root *root); 26 struct btrfs_root *root);
24int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root); 27int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index b0fc93f95fd0..b6dd5967c48a 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -260,7 +260,7 @@ err:
260 * attributes are handled directly. 260 * attributes are handled directly.
261 */ 261 */
262struct xattr_handler *btrfs_xattr_handlers[] = { 262struct xattr_handler *btrfs_xattr_handlers[] = {
263#ifdef CONFIG_BTRFS_POSIX_ACL 263#ifdef CONFIG_BTRFS_FS_POSIX_ACL
264 &btrfs_xattr_acl_access_handler, 264 &btrfs_xattr_acl_access_handler,
265 &btrfs_xattr_acl_default_handler, 265 &btrfs_xattr_acl_default_handler,
266#endif 266#endif