aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/async-thread.c81
-rw-r--r--fs/btrfs/async-thread.h10
-rw-r--r--fs/btrfs/btrfs_inode.h16
-rw-r--r--fs/btrfs/ctree.h38
-rw-r--r--fs/btrfs/disk-io.c60
-rw-r--r--fs/btrfs/extent-tree.c724
-rw-r--r--fs/btrfs/extent_io.c134
-rw-r--r--fs/btrfs/extent_io.h31
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file.c79
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c442
-rw-r--r--fs/btrfs/ioctl.c69
-rw-r--r--fs/btrfs/ordered-data.c99
-rw-r--r--fs/btrfs/ordered-data.h4
-rw-r--r--fs/btrfs/relocation.c4
-rw-r--r--fs/btrfs/root-tree.c2
-rw-r--r--fs/btrfs/super.c9
-rw-r--r--fs/btrfs/transaction.c72
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-log.c56
-rw-r--r--fs/btrfs/tree-log.h3
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/btrfs/xattr.c2
25 files changed, 1507 insertions, 447 deletions
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index f128427b995b..361604244271 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -27,7 +27,7 @@
27#include "btrfs_inode.h" 27#include "btrfs_inode.h"
28#include "xattr.h" 28#include "xattr.h"
29 29
30#ifdef CONFIG_FS_POSIX_ACL 30#ifdef CONFIG_BTRFS_FS_POSIX_ACL
31 31
32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) 32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
33{ 33{
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = {
313 .set = btrfs_xattr_acl_access_set, 313 .set = btrfs_xattr_acl_access_set,
314}; 314};
315 315
316#else /* CONFIG_FS_POSIX_ACL */ 316#else /* CONFIG_BTRFS_FS_POSIX_ACL */
317 317
318int btrfs_acl_chmod(struct inode *inode) 318int btrfs_acl_chmod(struct inode *inode)
319{ 319{
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
325 return 0; 325 return 0;
326} 326}
327 327
328#endif /* CONFIG_FS_POSIX_ACL */ 328#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 282ca085c2fb..c0861e781cdb 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,51 @@ struct btrfs_worker_thread {
64}; 64};
65 65
66/* 66/*
67 * btrfs_start_workers uses kthread_run, which can block waiting for memory
68 * for a very long time. It will actually throttle on page writeback,
69 * and so it may not make progress until after our btrfs worker threads
70 * process all of the pending work structs in their queue
71 *
72 * This means we can't use btrfs_start_workers from inside a btrfs worker
73 * thread that is used as part of cleaning dirty memory, which pretty much
74 * involves all of the worker threads.
75 *
76 * Instead we have a helper queue who never has more than one thread
77 * where we scheduler thread start operations. This worker_start struct
78 * is used to contain the work and hold a pointer to the queue that needs
79 * another worker.
80 */
81struct worker_start {
82 struct btrfs_work work;
83 struct btrfs_workers *queue;
84};
85
86static void start_new_worker_func(struct btrfs_work *work)
87{
88 struct worker_start *start;
89 start = container_of(work, struct worker_start, work);
90 btrfs_start_workers(start->queue, 1);
91 kfree(start);
92}
93
94static int start_new_worker(struct btrfs_workers *queue)
95{
96 struct worker_start *start;
97 int ret;
98
99 start = kzalloc(sizeof(*start), GFP_NOFS);
100 if (!start)
101 return -ENOMEM;
102
103 start->work.func = start_new_worker_func;
104 start->queue = queue;
105 ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
106 if (ret)
107 kfree(start);
108 return ret;
109}
110
111/*
67 * helper function to move a thread onto the idle list after it 112 * helper function to move a thread onto the idle list after it
68 * has finished some requests. 113 * has finished some requests.
69 */ 114 */
@@ -118,11 +163,13 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
118 goto out; 163 goto out;
119 164
120 workers->atomic_start_pending = 0; 165 workers->atomic_start_pending = 0;
121 if (workers->num_workers >= workers->max_workers) 166 if (workers->num_workers + workers->num_workers_starting >=
167 workers->max_workers)
122 goto out; 168 goto out;
123 169
170 workers->num_workers_starting += 1;
124 spin_unlock_irqrestore(&workers->lock, flags); 171 spin_unlock_irqrestore(&workers->lock, flags);
125 btrfs_start_workers(workers, 1); 172 start_new_worker(workers);
126 return; 173 return;
127 174
128out: 175out:
@@ -390,9 +437,11 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
390/* 437/*
391 * simple init on struct btrfs_workers 438 * simple init on struct btrfs_workers
392 */ 439 */
393void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) 440void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
441 struct btrfs_workers *async_helper)
394{ 442{
395 workers->num_workers = 0; 443 workers->num_workers = 0;
444 workers->num_workers_starting = 0;
396 INIT_LIST_HEAD(&workers->worker_list); 445 INIT_LIST_HEAD(&workers->worker_list);
397 INIT_LIST_HEAD(&workers->idle_list); 446 INIT_LIST_HEAD(&workers->idle_list);
398 INIT_LIST_HEAD(&workers->order_list); 447 INIT_LIST_HEAD(&workers->order_list);
@@ -404,14 +453,15 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
404 workers->name = name; 453 workers->name = name;
405 workers->ordered = 0; 454 workers->ordered = 0;
406 workers->atomic_start_pending = 0; 455 workers->atomic_start_pending = 0;
407 workers->atomic_worker_start = 0; 456 workers->atomic_worker_start = async_helper;
408} 457}
409 458
410/* 459/*
411 * starts new worker threads. This does not enforce the max worker 460 * starts new worker threads. This does not enforce the max worker
412 * count in case you need to temporarily go past it. 461 * count in case you need to temporarily go past it.
413 */ 462 */
414int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) 463static int __btrfs_start_workers(struct btrfs_workers *workers,
464 int num_workers)
415{ 465{
416 struct btrfs_worker_thread *worker; 466 struct btrfs_worker_thread *worker;
417 int ret = 0; 467 int ret = 0;
@@ -444,6 +494,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
444 list_add_tail(&worker->worker_list, &workers->idle_list); 494 list_add_tail(&worker->worker_list, &workers->idle_list);
445 worker->idle = 1; 495 worker->idle = 1;
446 workers->num_workers++; 496 workers->num_workers++;
497 workers->num_workers_starting--;
498 WARN_ON(workers->num_workers_starting < 0);
447 spin_unlock_irq(&workers->lock); 499 spin_unlock_irq(&workers->lock);
448 } 500 }
449 return 0; 501 return 0;
@@ -452,6 +504,14 @@ fail:
452 return ret; 504 return ret;
453} 505}
454 506
507int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
508{
509 spin_lock_irq(&workers->lock);
510 workers->num_workers_starting += num_workers;
511 spin_unlock_irq(&workers->lock);
512 return __btrfs_start_workers(workers, num_workers);
513}
514
455/* 515/*
456 * run through the list and find a worker thread that doesn't have a lot 516 * run through the list and find a worker thread that doesn't have a lot
457 * to do right now. This can return null if we aren't yet at the thread 517 * to do right now. This can return null if we aren't yet at the thread
@@ -461,7 +521,10 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
461{ 521{
462 struct btrfs_worker_thread *worker; 522 struct btrfs_worker_thread *worker;
463 struct list_head *next; 523 struct list_head *next;
464 int enforce_min = workers->num_workers < workers->max_workers; 524 int enforce_min;
525
526 enforce_min = (workers->num_workers + workers->num_workers_starting) <
527 workers->max_workers;
465 528
466 /* 529 /*
467 * if we find an idle thread, don't move it to the end of the 530 * if we find an idle thread, don't move it to the end of the
@@ -509,15 +572,17 @@ again:
509 worker = next_worker(workers); 572 worker = next_worker(workers);
510 573
511 if (!worker) { 574 if (!worker) {
512 if (workers->num_workers >= workers->max_workers) { 575 if (workers->num_workers + workers->num_workers_starting >=
576 workers->max_workers) {
513 goto fallback; 577 goto fallback;
514 } else if (workers->atomic_worker_start) { 578 } else if (workers->atomic_worker_start) {
515 workers->atomic_start_pending = 1; 579 workers->atomic_start_pending = 1;
516 goto fallback; 580 goto fallback;
517 } else { 581 } else {
582 workers->num_workers_starting++;
518 spin_unlock_irqrestore(&workers->lock, flags); 583 spin_unlock_irqrestore(&workers->lock, flags);
519 /* we're below the limit, start another worker */ 584 /* we're below the limit, start another worker */
520 btrfs_start_workers(workers, 1); 585 __btrfs_start_workers(workers, 1);
521 goto again; 586 goto again;
522 } 587 }
523 } 588 }
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index fc089b95ec14..5077746cf85e 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -64,6 +64,8 @@ struct btrfs_workers {
64 /* current number of running workers */ 64 /* current number of running workers */
65 int num_workers; 65 int num_workers;
66 66
67 int num_workers_starting;
68
67 /* max number of workers allowed. changed by btrfs_start_workers */ 69 /* max number of workers allowed. changed by btrfs_start_workers */
68 int max_workers; 70 int max_workers;
69 71
@@ -78,9 +80,10 @@ struct btrfs_workers {
78 80
79 /* 81 /*
80 * are we allowed to sleep while starting workers or are we required 82 * are we allowed to sleep while starting workers or are we required
81 * to start them at a later time? 83 * to start them at a later time? If we can't sleep, this indicates
84 * which queue we need to use to schedule thread creation.
82 */ 85 */
83 int atomic_worker_start; 86 struct btrfs_workers *atomic_worker_start;
84 87
85 /* list with all the work threads. The workers on the idle thread 88 /* list with all the work threads. The workers on the idle thread
86 * may be actively servicing jobs, but they haven't yet hit the 89 * may be actively servicing jobs, but they haven't yet hit the
@@ -109,7 +112,8 @@ struct btrfs_workers {
109int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 112int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
110int btrfs_start_workers(struct btrfs_workers *workers, int num_workers); 113int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
111int btrfs_stop_workers(struct btrfs_workers *workers); 114int btrfs_stop_workers(struct btrfs_workers *workers);
112void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max); 115void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
116 struct btrfs_workers *async_starter);
113int btrfs_requeue_work(struct btrfs_work *work); 117int btrfs_requeue_work(struct btrfs_work *work);
114void btrfs_set_work_high_prio(struct btrfs_work *work); 118void btrfs_set_work_high_prio(struct btrfs_work *work);
115#endif 119#endif
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 82ee56bba299..f6783a42f010 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -86,6 +86,12 @@ struct btrfs_inode {
86 * transid of the trans_handle that last modified this inode 86 * transid of the trans_handle that last modified this inode
87 */ 87 */
88 u64 last_trans; 88 u64 last_trans;
89
90 /*
91 * log transid when this inode was last modified
92 */
93 u64 last_sub_trans;
94
89 /* 95 /*
90 * transid that last logged this inode 96 * transid that last logged this inode
91 */ 97 */
@@ -128,6 +134,16 @@ struct btrfs_inode {
128 u64 last_unlink_trans; 134 u64 last_unlink_trans;
129 135
130 /* 136 /*
137 * Counters to keep track of the number of extent item's we may use due
138 * to delalloc and such. outstanding_extents is the number of extent
139 * items we think we'll end up using, and reserved_extents is the number
140 * of extent items we've reserved metadata for.
141 */
142 spinlock_t accounting_lock;
143 int reserved_extents;
144 int outstanding_extents;
145
146 /*
131 * ordered_data_close is set by truncate when a file that used 147 * ordered_data_close is set by truncate when a file that used
132 * to have good data has been truncated to zero. When it is set 148 * to have good data has been truncated to zero. When it is set
133 * the btrfs file release call will add this inode to the 149 * the btrfs file release call will add this inode to the
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80599b4e42bd..444b3e9b92a4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -675,21 +675,28 @@ struct btrfs_space_info {
675 current allocations */ 675 current allocations */
676 u64 bytes_readonly; /* total bytes that are read only */ 676 u64 bytes_readonly; /* total bytes that are read only */
677 u64 bytes_super; /* total bytes reserved for the super blocks */ 677 u64 bytes_super; /* total bytes reserved for the super blocks */
678 678 u64 bytes_root; /* the number of bytes needed to commit a
679 /* delalloc accounting */ 679 transaction */
680 u64 bytes_delalloc; /* number of bytes reserved for allocation,
681 this space is not necessarily reserved yet
682 by the allocator */
683 u64 bytes_may_use; /* number of bytes that may be used for 680 u64 bytes_may_use; /* number of bytes that may be used for
684 delalloc */ 681 delalloc/allocations */
682 u64 bytes_delalloc; /* number of bytes currently reserved for
683 delayed allocation */
685 684
686 int full; /* indicates that we cannot allocate any more 685 int full; /* indicates that we cannot allocate any more
687 chunks for this space */ 686 chunks for this space */
688 int force_alloc; /* set if we need to force a chunk alloc for 687 int force_alloc; /* set if we need to force a chunk alloc for
689 this space */ 688 this space */
689 int force_delalloc; /* make people start doing filemap_flush until
690 we're under a threshold */
690 691
691 struct list_head list; 692 struct list_head list;
692 693
694 /* for controlling how we free up space for allocations */
695 wait_queue_head_t allocate_wait;
696 wait_queue_head_t flush_wait;
697 int allocating_chunk;
698 int flushing;
699
693 /* for block groups in our same type */ 700 /* for block groups in our same type */
694 struct list_head block_groups; 701 struct list_head block_groups;
695 spinlock_t lock; 702 spinlock_t lock;
@@ -903,6 +910,7 @@ struct btrfs_fs_info {
903 * A third pool does submit_bio to avoid deadlocking with the other 910 * A third pool does submit_bio to avoid deadlocking with the other
904 * two 911 * two
905 */ 912 */
913 struct btrfs_workers generic_worker;
906 struct btrfs_workers workers; 914 struct btrfs_workers workers;
907 struct btrfs_workers delalloc_workers; 915 struct btrfs_workers delalloc_workers;
908 struct btrfs_workers endio_workers; 916 struct btrfs_workers endio_workers;
@@ -910,6 +918,7 @@ struct btrfs_fs_info {
910 struct btrfs_workers endio_meta_write_workers; 918 struct btrfs_workers endio_meta_write_workers;
911 struct btrfs_workers endio_write_workers; 919 struct btrfs_workers endio_write_workers;
912 struct btrfs_workers submit_workers; 920 struct btrfs_workers submit_workers;
921 struct btrfs_workers enospc_workers;
913 /* 922 /*
914 * fixup workers take dirty pages that didn't properly go through 923 * fixup workers take dirty pages that didn't properly go through
915 * the cow mechanism and make them safe to write. It happens 924 * the cow mechanism and make them safe to write. It happens
@@ -1000,7 +1009,10 @@ struct btrfs_root {
1000 atomic_t log_writers; 1009 atomic_t log_writers;
1001 atomic_t log_commit[2]; 1010 atomic_t log_commit[2];
1002 unsigned long log_transid; 1011 unsigned long log_transid;
1012 unsigned long last_log_commit;
1003 unsigned long log_batch; 1013 unsigned long log_batch;
1014 pid_t log_start_pid;
1015 bool log_multiple_pids;
1004 1016
1005 u64 objectid; 1017 u64 objectid;
1006 u64 last_trans; 1018 u64 last_trans;
@@ -1141,6 +1153,7 @@ struct btrfs_root {
1141#define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 1153#define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7)
1142#define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 1154#define BTRFS_MOUNT_SSD_SPREAD (1 << 8)
1143#define BTRFS_MOUNT_NOSSD (1 << 9) 1155#define BTRFS_MOUNT_NOSSD (1 << 9)
1156#define BTRFS_MOUNT_DISCARD (1 << 10)
1144 1157
1145#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1158#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1146#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1159#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2022,7 +2035,12 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
2022void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); 2035void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
2023void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 2036void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
2024 2037
2025int btrfs_check_metadata_free_space(struct btrfs_root *root); 2038int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items);
2039int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items);
2040int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2041 struct inode *inode, int num_items);
2042int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
2043 struct inode *inode, int num_items);
2026int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, 2044int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2027 u64 bytes); 2045 u64 bytes);
2028void btrfs_free_reserved_data_space(struct btrfs_root *root, 2046void btrfs_free_reserved_data_space(struct btrfs_root *root,
@@ -2314,7 +2332,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
2314void btrfs_orphan_cleanup(struct btrfs_root *root); 2332void btrfs_orphan_cleanup(struct btrfs_root *root);
2315int btrfs_cont_expand(struct inode *inode, loff_t size); 2333int btrfs_cont_expand(struct inode *inode, loff_t size);
2316int btrfs_invalidate_inodes(struct btrfs_root *root); 2334int btrfs_invalidate_inodes(struct btrfs_root *root);
2317extern struct dentry_operations btrfs_dentry_operations; 2335extern const struct dentry_operations btrfs_dentry_operations;
2318 2336
2319/* ioctl.c */ 2337/* ioctl.c */
2320long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 2338long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -2326,7 +2344,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync);
2326int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 2344int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
2327 int skip_pinned); 2345 int skip_pinned);
2328int btrfs_check_file(struct btrfs_root *root, struct inode *inode); 2346int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
2329extern struct file_operations btrfs_file_operations; 2347extern const struct file_operations btrfs_file_operations;
2330int btrfs_drop_extents(struct btrfs_trans_handle *trans, 2348int btrfs_drop_extents(struct btrfs_trans_handle *trans,
2331 struct btrfs_root *root, struct inode *inode, 2349 struct btrfs_root *root, struct inode *inode,
2332 u64 start, u64 end, u64 locked_end, 2350 u64 start, u64 end, u64 locked_end,
@@ -2357,7 +2375,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options);
2357int btrfs_sync_fs(struct super_block *sb, int wait); 2375int btrfs_sync_fs(struct super_block *sb, int wait);
2358 2376
2359/* acl.c */ 2377/* acl.c */
2360#ifdef CONFIG_FS_POSIX_ACL 2378#ifdef CONFIG_BTRFS_FS_POSIX_ACL
2361int btrfs_check_acl(struct inode *inode, int mask); 2379int btrfs_check_acl(struct inode *inode, int mask);
2362#else 2380#else
2363#define btrfs_check_acl NULL 2381#define btrfs_check_acl NULL
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 644e796fd643..02b6afbd7450 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -822,14 +822,14 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
822 822
823int btrfs_write_tree_block(struct extent_buffer *buf) 823int btrfs_write_tree_block(struct extent_buffer *buf)
824{ 824{
825 return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start, 825 return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
826 buf->start + buf->len - 1, WB_SYNC_ALL); 826 buf->start + buf->len - 1);
827} 827}
828 828
829int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 829int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
830{ 830{
831 return btrfs_wait_on_page_writeback_range(buf->first_page->mapping, 831 return filemap_fdatawait_range(buf->first_page->mapping,
832 buf->start, buf->start + buf->len - 1); 832 buf->start, buf->start + buf->len - 1);
833} 833}
834 834
835struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, 835struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
@@ -917,6 +917,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
917 atomic_set(&root->log_writers, 0); 917 atomic_set(&root->log_writers, 0);
918 root->log_batch = 0; 918 root->log_batch = 0;
919 root->log_transid = 0; 919 root->log_transid = 0;
920 root->last_log_commit = 0;
920 extent_io_tree_init(&root->dirty_log_pages, 921 extent_io_tree_init(&root->dirty_log_pages,
921 fs_info->btree_inode->i_mapping, GFP_NOFS); 922 fs_info->btree_inode->i_mapping, GFP_NOFS);
922 923
@@ -1087,6 +1088,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1087 WARN_ON(root->log_root); 1088 WARN_ON(root->log_root);
1088 root->log_root = log_root; 1089 root->log_root = log_root;
1089 root->log_transid = 0; 1090 root->log_transid = 0;
1091 root->last_log_commit = 0;
1090 return 0; 1092 return 0;
1091} 1093}
1092 1094
@@ -1630,7 +1632,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1630 fs_info->sb = sb; 1632 fs_info->sb = sb;
1631 fs_info->max_extent = (u64)-1; 1633 fs_info->max_extent = (u64)-1;
1632 fs_info->max_inline = 8192 * 1024; 1634 fs_info->max_inline = 8192 * 1024;
1633 fs_info->metadata_ratio = 8; 1635 fs_info->metadata_ratio = 0;
1634 1636
1635 fs_info->thread_pool_size = min_t(unsigned long, 1637 fs_info->thread_pool_size = min_t(unsigned long,
1636 num_online_cpus() + 2, 8); 1638 num_online_cpus() + 2, 8);
@@ -1746,21 +1748,25 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1746 err = -EINVAL; 1748 err = -EINVAL;
1747 goto fail_iput; 1749 goto fail_iput;
1748 } 1750 }
1749printk("thread pool is %d\n", fs_info->thread_pool_size); 1751
1750 /* 1752 btrfs_init_workers(&fs_info->generic_worker,
1751 * we need to start all the end_io workers up front because the 1753 "genwork", 1, NULL);
1752 * queue work function gets called at interrupt time, and so it 1754
1753 * cannot dynamically grow.
1754 */
1755 btrfs_init_workers(&fs_info->workers, "worker", 1755 btrfs_init_workers(&fs_info->workers, "worker",
1756 fs_info->thread_pool_size); 1756 fs_info->thread_pool_size,
1757 &fs_info->generic_worker);
1757 1758
1758 btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", 1759 btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1759 fs_info->thread_pool_size); 1760 fs_info->thread_pool_size,
1761 &fs_info->generic_worker);
1760 1762
1761 btrfs_init_workers(&fs_info->submit_workers, "submit", 1763 btrfs_init_workers(&fs_info->submit_workers, "submit",
1762 min_t(u64, fs_devices->num_devices, 1764 min_t(u64, fs_devices->num_devices,
1763 fs_info->thread_pool_size)); 1765 fs_info->thread_pool_size),
1766 &fs_info->generic_worker);
1767 btrfs_init_workers(&fs_info->enospc_workers, "enospc",
1768 fs_info->thread_pool_size,
1769 &fs_info->generic_worker);
1764 1770
1765 /* a higher idle thresh on the submit workers makes it much more 1771 /* a higher idle thresh on the submit workers makes it much more
1766 * likely that bios will be send down in a sane order to the 1772 * likely that bios will be send down in a sane order to the
@@ -1774,15 +1780,20 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
1774 fs_info->delalloc_workers.idle_thresh = 2; 1780 fs_info->delalloc_workers.idle_thresh = 2;
1775 fs_info->delalloc_workers.ordered = 1; 1781 fs_info->delalloc_workers.ordered = 1;
1776 1782
1777 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1); 1783 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
1784 &fs_info->generic_worker);
1778 btrfs_init_workers(&fs_info->endio_workers, "endio", 1785 btrfs_init_workers(&fs_info->endio_workers, "endio",
1779 fs_info->thread_pool_size); 1786 fs_info->thread_pool_size,
1787 &fs_info->generic_worker);
1780 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", 1788 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1781 fs_info->thread_pool_size); 1789 fs_info->thread_pool_size,
1790 &fs_info->generic_worker);
1782 btrfs_init_workers(&fs_info->endio_meta_write_workers, 1791 btrfs_init_workers(&fs_info->endio_meta_write_workers,
1783 "endio-meta-write", fs_info->thread_pool_size); 1792 "endio-meta-write", fs_info->thread_pool_size,
1793 &fs_info->generic_worker);
1784 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", 1794 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1785 fs_info->thread_pool_size); 1795 fs_info->thread_pool_size,
1796 &fs_info->generic_worker);
1786 1797
1787 /* 1798 /*
1788 * endios are largely parallel and should have a very 1799 * endios are largely parallel and should have a very
@@ -1794,12 +1805,8 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
1794 fs_info->endio_write_workers.idle_thresh = 2; 1805 fs_info->endio_write_workers.idle_thresh = 2;
1795 fs_info->endio_meta_write_workers.idle_thresh = 2; 1806 fs_info->endio_meta_write_workers.idle_thresh = 2;
1796 1807
1797 fs_info->endio_workers.atomic_worker_start = 1;
1798 fs_info->endio_meta_workers.atomic_worker_start = 1;
1799 fs_info->endio_write_workers.atomic_worker_start = 1;
1800 fs_info->endio_meta_write_workers.atomic_worker_start = 1;
1801
1802 btrfs_start_workers(&fs_info->workers, 1); 1808 btrfs_start_workers(&fs_info->workers, 1);
1809 btrfs_start_workers(&fs_info->generic_worker, 1);
1803 btrfs_start_workers(&fs_info->submit_workers, 1); 1810 btrfs_start_workers(&fs_info->submit_workers, 1);
1804 btrfs_start_workers(&fs_info->delalloc_workers, 1); 1811 btrfs_start_workers(&fs_info->delalloc_workers, 1);
1805 btrfs_start_workers(&fs_info->fixup_workers, 1); 1812 btrfs_start_workers(&fs_info->fixup_workers, 1);
@@ -1807,6 +1814,7 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
1807 btrfs_start_workers(&fs_info->endio_meta_workers, 1); 1814 btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1808 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); 1815 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1809 btrfs_start_workers(&fs_info->endio_write_workers, 1); 1816 btrfs_start_workers(&fs_info->endio_write_workers, 1);
1817 btrfs_start_workers(&fs_info->enospc_workers, 1);
1810 1818
1811 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 1819 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1812 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 1820 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2012,6 +2020,7 @@ fail_chunk_root:
2012 free_extent_buffer(chunk_root->node); 2020 free_extent_buffer(chunk_root->node);
2013 free_extent_buffer(chunk_root->commit_root); 2021 free_extent_buffer(chunk_root->commit_root);
2014fail_sb_buffer: 2022fail_sb_buffer:
2023 btrfs_stop_workers(&fs_info->generic_worker);
2015 btrfs_stop_workers(&fs_info->fixup_workers); 2024 btrfs_stop_workers(&fs_info->fixup_workers);
2016 btrfs_stop_workers(&fs_info->delalloc_workers); 2025 btrfs_stop_workers(&fs_info->delalloc_workers);
2017 btrfs_stop_workers(&fs_info->workers); 2026 btrfs_stop_workers(&fs_info->workers);
@@ -2020,6 +2029,7 @@ fail_sb_buffer:
2020 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2029 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2021 btrfs_stop_workers(&fs_info->endio_write_workers); 2030 btrfs_stop_workers(&fs_info->endio_write_workers);
2022 btrfs_stop_workers(&fs_info->submit_workers); 2031 btrfs_stop_workers(&fs_info->submit_workers);
2032 btrfs_stop_workers(&fs_info->enospc_workers);
2023fail_iput: 2033fail_iput:
2024 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2034 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2025 iput(fs_info->btree_inode); 2035 iput(fs_info->btree_inode);
@@ -2437,6 +2447,7 @@ int close_ctree(struct btrfs_root *root)
2437 2447
2438 iput(fs_info->btree_inode); 2448 iput(fs_info->btree_inode);
2439 2449
2450 btrfs_stop_workers(&fs_info->generic_worker);
2440 btrfs_stop_workers(&fs_info->fixup_workers); 2451 btrfs_stop_workers(&fs_info->fixup_workers);
2441 btrfs_stop_workers(&fs_info->delalloc_workers); 2452 btrfs_stop_workers(&fs_info->delalloc_workers);
2442 btrfs_stop_workers(&fs_info->workers); 2453 btrfs_stop_workers(&fs_info->workers);
@@ -2445,6 +2456,7 @@ int close_ctree(struct btrfs_root *root)
2445 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2456 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2446 btrfs_stop_workers(&fs_info->endio_write_workers); 2457 btrfs_stop_workers(&fs_info->endio_write_workers);
2447 btrfs_stop_workers(&fs_info->submit_workers); 2458 btrfs_stop_workers(&fs_info->submit_workers);
2459 btrfs_stop_workers(&fs_info->enospc_workers);
2448 2460
2449 btrfs_close_devices(fs_info->fs_devices); 2461 btrfs_close_devices(fs_info->fs_devices);
2450 btrfs_mapping_tree_free(&fs_info->mapping_tree); 2462 btrfs_mapping_tree_free(&fs_info->mapping_tree);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 993f93ff7ba6..94627c4cc193 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -68,6 +68,8 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
68 struct extent_buffer **must_clean); 68 struct extent_buffer **must_clean);
69static int find_next_key(struct btrfs_path *path, int level, 69static int find_next_key(struct btrfs_path *path, int level,
70 struct btrfs_key *key); 70 struct btrfs_key *key);
71static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
72 int dump_block_groups);
71 73
72static noinline int 74static noinline int
73block_group_cache_done(struct btrfs_block_group_cache *cache) 75block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -1566,23 +1568,23 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
1566 return ret; 1568 return ret;
1567} 1569}
1568 1570
1569#ifdef BIO_RW_DISCARD
1570static void btrfs_issue_discard(struct block_device *bdev, 1571static void btrfs_issue_discard(struct block_device *bdev,
1571 u64 start, u64 len) 1572 u64 start, u64 len)
1572{ 1573{
1573 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 1574 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1574 DISCARD_FL_BARRIER); 1575 DISCARD_FL_BARRIER);
1575} 1576}
1576#endif
1577 1577
1578static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1578static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1579 u64 num_bytes) 1579 u64 num_bytes)
1580{ 1580{
1581#ifdef BIO_RW_DISCARD
1582 int ret; 1581 int ret;
1583 u64 map_length = num_bytes; 1582 u64 map_length = num_bytes;
1584 struct btrfs_multi_bio *multi = NULL; 1583 struct btrfs_multi_bio *multi = NULL;
1585 1584
1585 if (!btrfs_test_opt(root, DISCARD))
1586 return 0;
1587
1586 /* Tell the block device(s) that the sectors can be discarded */ 1588 /* Tell the block device(s) that the sectors can be discarded */
1587 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, 1589 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1588 bytenr, &map_length, &multi, 0); 1590 bytenr, &map_length, &multi, 0);
@@ -1602,9 +1604,6 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1602 } 1604 }
1603 1605
1604 return ret; 1606 return ret;
1605#else
1606 return 0;
1607#endif
1608} 1607}
1609 1608
1610int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1609int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
@@ -2765,67 +2764,448 @@ void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2765 alloc_target); 2764 alloc_target);
2766} 2765}
2767 2766
2767static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
2768{
2769 u64 num_bytes;
2770 int level;
2771
2772 level = BTRFS_MAX_LEVEL - 2;
2773 /*
2774 * NOTE: these calculations are absolutely the worst possible case.
2775 * This assumes that _every_ item we insert will require a new leaf, and
2776 * that the tree has grown to its maximum level size.
2777 */
2778
2779 /*
2780 * for every item we insert we could insert both an extent item and a
2781 * extent ref item. Then for ever item we insert, we will need to cow
2782 * both the original leaf, plus the leaf to the left and right of it.
2783 *
2784 * Unless we are talking about the extent root, then we just want the
2785 * number of items * 2, since we just need the extent item plus its ref.
2786 */
2787 if (root == root->fs_info->extent_root)
2788 num_bytes = num_items * 2;
2789 else
2790 num_bytes = (num_items + (2 * num_items)) * 3;
2791
2792 /*
2793 * num_bytes is total number of leaves we could need times the leaf
2794 * size, and then for every leaf we could end up cow'ing 2 nodes per
2795 * level, down to the leaf level.
2796 */
2797 num_bytes = (num_bytes * root->leafsize) +
2798 (num_bytes * (level * 2)) * root->nodesize;
2799
2800 return num_bytes;
2801}
2802
2768/* 2803/*
2769 * for now this just makes sure we have at least 5% of our metadata space free 2804 * Unreserve metadata space for delalloc. If we have less reserved credits than
2770 * for use. 2805 * we have extents, this function does nothing.
2771 */ 2806 */
2772int btrfs_check_metadata_free_space(struct btrfs_root *root) 2807int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2808 struct inode *inode, int num_items)
2773{ 2809{
2774 struct btrfs_fs_info *info = root->fs_info; 2810 struct btrfs_fs_info *info = root->fs_info;
2775 struct btrfs_space_info *meta_sinfo; 2811 struct btrfs_space_info *meta_sinfo;
2776 u64 alloc_target, thresh; 2812 u64 num_bytes;
2777 int committed = 0, ret; 2813 u64 alloc_target;
2814 bool bug = false;
2778 2815
2779 /* get the space info for where the metadata will live */ 2816 /* get the space info for where the metadata will live */
2780 alloc_target = btrfs_get_alloc_profile(root, 0); 2817 alloc_target = btrfs_get_alloc_profile(root, 0);
2781 meta_sinfo = __find_space_info(info, alloc_target); 2818 meta_sinfo = __find_space_info(info, alloc_target);
2782 if (!meta_sinfo)
2783 goto alloc;
2784 2819
2785again: 2820 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2821 num_items);
2822
2786 spin_lock(&meta_sinfo->lock); 2823 spin_lock(&meta_sinfo->lock);
2787 if (!meta_sinfo->full) 2824 spin_lock(&BTRFS_I(inode)->accounting_lock);
2788 thresh = meta_sinfo->total_bytes * 80; 2825 if (BTRFS_I(inode)->reserved_extents <=
2789 else 2826 BTRFS_I(inode)->outstanding_extents) {
2790 thresh = meta_sinfo->total_bytes * 95; 2827 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2828 spin_unlock(&meta_sinfo->lock);
2829 return 0;
2830 }
2831 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2832
2833 BTRFS_I(inode)->reserved_extents--;
2834 BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
2835
2836 if (meta_sinfo->bytes_delalloc < num_bytes) {
2837 bug = true;
2838 meta_sinfo->bytes_delalloc = 0;
2839 } else {
2840 meta_sinfo->bytes_delalloc -= num_bytes;
2841 }
2842 spin_unlock(&meta_sinfo->lock);
2843
2844 BUG_ON(bug);
2791 2845
2846 return 0;
2847}
2848
2849static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2850{
2851 u64 thresh;
2852
2853 thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2854 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2855 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2856 meta_sinfo->bytes_may_use;
2857
2858 thresh = meta_sinfo->total_bytes - thresh;
2859 thresh *= 80;
2792 do_div(thresh, 100); 2860 do_div(thresh, 100);
2861 if (thresh <= meta_sinfo->bytes_delalloc)
2862 meta_sinfo->force_delalloc = 1;
2863 else
2864 meta_sinfo->force_delalloc = 0;
2865}
2793 2866
2794 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + 2867struct async_flush {
2795 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + 2868 struct btrfs_root *root;
2796 meta_sinfo->bytes_super > thresh) { 2869 struct btrfs_space_info *info;
2797 struct btrfs_trans_handle *trans; 2870 struct btrfs_work work;
2798 if (!meta_sinfo->full) { 2871};
2799 meta_sinfo->force_alloc = 1; 2872
2873static noinline void flush_delalloc_async(struct btrfs_work *work)
2874{
2875 struct async_flush *async;
2876 struct btrfs_root *root;
2877 struct btrfs_space_info *info;
2878
2879 async = container_of(work, struct async_flush, work);
2880 root = async->root;
2881 info = async->info;
2882
2883 btrfs_start_delalloc_inodes(root);
2884 wake_up(&info->flush_wait);
2885 btrfs_wait_ordered_extents(root, 0);
2886
2887 spin_lock(&info->lock);
2888 info->flushing = 0;
2889 spin_unlock(&info->lock);
2890 wake_up(&info->flush_wait);
2891
2892 kfree(async);
2893}
2894
2895static void wait_on_flush(struct btrfs_space_info *info)
2896{
2897 DEFINE_WAIT(wait);
2898 u64 used;
2899
2900 while (1) {
2901 prepare_to_wait(&info->flush_wait, &wait,
2902 TASK_UNINTERRUPTIBLE);
2903 spin_lock(&info->lock);
2904 if (!info->flushing) {
2905 spin_unlock(&info->lock);
2906 break;
2907 }
2908
2909 used = info->bytes_used + info->bytes_reserved +
2910 info->bytes_pinned + info->bytes_readonly +
2911 info->bytes_super + info->bytes_root +
2912 info->bytes_may_use + info->bytes_delalloc;
2913 if (used < info->total_bytes) {
2914 spin_unlock(&info->lock);
2915 break;
2916 }
2917 spin_unlock(&info->lock);
2918 schedule();
2919 }
2920 finish_wait(&info->flush_wait, &wait);
2921}
2922
2923static void flush_delalloc(struct btrfs_root *root,
2924 struct btrfs_space_info *info)
2925{
2926 struct async_flush *async;
2927 bool wait = false;
2928
2929 spin_lock(&info->lock);
2930
2931 if (!info->flushing) {
2932 info->flushing = 1;
2933 init_waitqueue_head(&info->flush_wait);
2934 } else {
2935 wait = true;
2936 }
2937
2938 spin_unlock(&info->lock);
2939
2940 if (wait) {
2941 wait_on_flush(info);
2942 return;
2943 }
2944
2945 async = kzalloc(sizeof(*async), GFP_NOFS);
2946 if (!async)
2947 goto flush;
2948
2949 async->root = root;
2950 async->info = info;
2951 async->work.func = flush_delalloc_async;
2952
2953 btrfs_queue_worker(&root->fs_info->enospc_workers,
2954 &async->work);
2955 wait_on_flush(info);
2956 return;
2957
2958flush:
2959 btrfs_start_delalloc_inodes(root);
2960 btrfs_wait_ordered_extents(root, 0);
2961
2962 spin_lock(&info->lock);
2963 info->flushing = 0;
2964 spin_unlock(&info->lock);
2965 wake_up(&info->flush_wait);
2966}
2967
2968static int maybe_allocate_chunk(struct btrfs_root *root,
2969 struct btrfs_space_info *info)
2970{
2971 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
2972 struct btrfs_trans_handle *trans;
2973 bool wait = false;
2974 int ret = 0;
2975 u64 min_metadata;
2976 u64 free_space;
2977
2978 free_space = btrfs_super_total_bytes(disk_super);
2979 /*
2980 * we allow the metadata to grow to a max of either 10gb or 5% of the
2981 * space in the volume.
2982 */
2983 min_metadata = min((u64)10 * 1024 * 1024 * 1024,
2984 div64_u64(free_space * 5, 100));
2985 if (info->total_bytes >= min_metadata) {
2986 spin_unlock(&info->lock);
2987 return 0;
2988 }
2989
2990 if (info->full) {
2991 spin_unlock(&info->lock);
2992 return 0;
2993 }
2994
2995 if (!info->allocating_chunk) {
2996 info->force_alloc = 1;
2997 info->allocating_chunk = 1;
2998 init_waitqueue_head(&info->allocate_wait);
2999 } else {
3000 wait = true;
3001 }
3002
3003 spin_unlock(&info->lock);
3004
3005 if (wait) {
3006 wait_event(info->allocate_wait,
3007 !info->allocating_chunk);
3008 return 1;
3009 }
3010
3011 trans = btrfs_start_transaction(root, 1);
3012 if (!trans) {
3013 ret = -ENOMEM;
3014 goto out;
3015 }
3016
3017 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3018 4096 + 2 * 1024 * 1024,
3019 info->flags, 0);
3020 btrfs_end_transaction(trans, root);
3021 if (ret)
3022 goto out;
3023out:
3024 spin_lock(&info->lock);
3025 info->allocating_chunk = 0;
3026 spin_unlock(&info->lock);
3027 wake_up(&info->allocate_wait);
3028
3029 if (ret)
3030 return 0;
3031 return 1;
3032}
3033
3034/*
3035 * Reserve metadata space for delalloc.
3036 */
3037int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
3038 struct inode *inode, int num_items)
3039{
3040 struct btrfs_fs_info *info = root->fs_info;
3041 struct btrfs_space_info *meta_sinfo;
3042 u64 num_bytes;
3043 u64 used;
3044 u64 alloc_target;
3045 int flushed = 0;
3046 int force_delalloc;
3047
3048 /* get the space info for where the metadata will live */
3049 alloc_target = btrfs_get_alloc_profile(root, 0);
3050 meta_sinfo = __find_space_info(info, alloc_target);
3051
3052 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
3053 num_items);
3054again:
3055 spin_lock(&meta_sinfo->lock);
3056
3057 force_delalloc = meta_sinfo->force_delalloc;
3058
3059 if (unlikely(!meta_sinfo->bytes_root))
3060 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3061
3062 if (!flushed)
3063 meta_sinfo->bytes_delalloc += num_bytes;
3064
3065 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3066 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3067 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3068 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3069
3070 if (used > meta_sinfo->total_bytes) {
3071 flushed++;
3072
3073 if (flushed == 1) {
3074 if (maybe_allocate_chunk(root, meta_sinfo))
3075 goto again;
3076 flushed++;
3077 } else {
2800 spin_unlock(&meta_sinfo->lock); 3078 spin_unlock(&meta_sinfo->lock);
2801alloc: 3079 }
2802 trans = btrfs_start_transaction(root, 1);
2803 if (!trans)
2804 return -ENOMEM;
2805 3080
2806 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 3081 if (flushed == 2) {
2807 2 * 1024 * 1024, alloc_target, 0); 3082 filemap_flush(inode->i_mapping);
2808 btrfs_end_transaction(trans, root); 3083 goto again;
2809 if (!meta_sinfo) { 3084 } else if (flushed == 3) {
2810 meta_sinfo = __find_space_info(info, 3085 flush_delalloc(root, meta_sinfo);
2811 alloc_target);
2812 }
2813 goto again; 3086 goto again;
2814 } 3087 }
3088 spin_lock(&meta_sinfo->lock);
3089 meta_sinfo->bytes_delalloc -= num_bytes;
2815 spin_unlock(&meta_sinfo->lock); 3090 spin_unlock(&meta_sinfo->lock);
3091 printk(KERN_ERR "enospc, has %d, reserved %d\n",
3092 BTRFS_I(inode)->outstanding_extents,
3093 BTRFS_I(inode)->reserved_extents);
3094 dump_space_info(meta_sinfo, 0, 0);
3095 return -ENOSPC;
3096 }
2816 3097
2817 if (!committed) { 3098 BTRFS_I(inode)->reserved_extents++;
2818 committed = 1; 3099 check_force_delalloc(meta_sinfo);
2819 trans = btrfs_join_transaction(root, 1); 3100 spin_unlock(&meta_sinfo->lock);
2820 if (!trans) 3101
2821 return -ENOMEM; 3102 if (!flushed && force_delalloc)
2822 ret = btrfs_commit_transaction(trans, root); 3103 filemap_flush(inode->i_mapping);
2823 if (ret) 3104
2824 return ret; 3105 return 0;
3106}
3107
3108/*
3109 * unreserve num_items number of items worth of metadata space. This needs to
3110 * be paired with btrfs_reserve_metadata_space.
3111 *
3112 * NOTE: if you have the option, run this _AFTER_ you do a
3113 * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
3114 * oprations which will result in more used metadata, so we want to make sure we
3115 * can do that without issue.
3116 */
3117int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
3118{
3119 struct btrfs_fs_info *info = root->fs_info;
3120 struct btrfs_space_info *meta_sinfo;
3121 u64 num_bytes;
3122 u64 alloc_target;
3123 bool bug = false;
3124
3125 /* get the space info for where the metadata will live */
3126 alloc_target = btrfs_get_alloc_profile(root, 0);
3127 meta_sinfo = __find_space_info(info, alloc_target);
3128
3129 num_bytes = calculate_bytes_needed(root, num_items);
3130
3131 spin_lock(&meta_sinfo->lock);
3132 if (meta_sinfo->bytes_may_use < num_bytes) {
3133 bug = true;
3134 meta_sinfo->bytes_may_use = 0;
3135 } else {
3136 meta_sinfo->bytes_may_use -= num_bytes;
3137 }
3138 spin_unlock(&meta_sinfo->lock);
3139
3140 BUG_ON(bug);
3141
3142 return 0;
3143}
3144
3145/*
3146 * Reserve some metadata space for use. We'll calculate the worste case number
3147 * of bytes that would be needed to modify num_items number of items. If we
3148 * have space, fantastic, if not, you get -ENOSPC. Please call
3149 * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3150 * items you reserved, since whatever metadata you needed should have already
3151 * been allocated.
3152 *
3153 * This will commit the transaction to make more space if we don't have enough
3154 * metadata space. THe only time we don't do this is if we're reserving space
3155 * inside of a transaction, then we will just return -ENOSPC and it is the
3156 * callers responsibility to handle it properly.
3157 */
3158int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
3159{
3160 struct btrfs_fs_info *info = root->fs_info;
3161 struct btrfs_space_info *meta_sinfo;
3162 u64 num_bytes;
3163 u64 used;
3164 u64 alloc_target;
3165 int retries = 0;
3166
3167 /* get the space info for where the metadata will live */
3168 alloc_target = btrfs_get_alloc_profile(root, 0);
3169 meta_sinfo = __find_space_info(info, alloc_target);
3170
3171 num_bytes = calculate_bytes_needed(root, num_items);
3172again:
3173 spin_lock(&meta_sinfo->lock);
3174
3175 if (unlikely(!meta_sinfo->bytes_root))
3176 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3177
3178 if (!retries)
3179 meta_sinfo->bytes_may_use += num_bytes;
3180
3181 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3182 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3183 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3184 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3185
3186 if (used > meta_sinfo->total_bytes) {
3187 retries++;
3188 if (retries == 1) {
3189 if (maybe_allocate_chunk(root, meta_sinfo))
3190 goto again;
3191 retries++;
3192 } else {
3193 spin_unlock(&meta_sinfo->lock);
3194 }
3195
3196 if (retries == 2) {
3197 flush_delalloc(root, meta_sinfo);
2825 goto again; 3198 goto again;
2826 } 3199 }
3200 spin_lock(&meta_sinfo->lock);
3201 meta_sinfo->bytes_may_use -= num_bytes;
3202 spin_unlock(&meta_sinfo->lock);
3203
3204 dump_space_info(meta_sinfo, 0, 0);
2827 return -ENOSPC; 3205 return -ENOSPC;
2828 } 3206 }
3207
3208 check_force_delalloc(meta_sinfo);
2829 spin_unlock(&meta_sinfo->lock); 3209 spin_unlock(&meta_sinfo->lock);
2830 3210
2831 return 0; 3211 return 0;
@@ -2888,7 +3268,7 @@ alloc:
2888 spin_unlock(&data_sinfo->lock); 3268 spin_unlock(&data_sinfo->lock);
2889 3269
2890 /* commit the current transaction and try again */ 3270 /* commit the current transaction and try again */
2891 if (!committed) { 3271 if (!committed && !root->fs_info->open_ioctl_trans) {
2892 committed = 1; 3272 committed = 1;
2893 trans = btrfs_join_transaction(root, 1); 3273 trans = btrfs_join_transaction(root, 1);
2894 if (!trans) 3274 if (!trans)
@@ -2916,7 +3296,7 @@ alloc:
2916 BTRFS_I(inode)->reserved_bytes += bytes; 3296 BTRFS_I(inode)->reserved_bytes += bytes;
2917 spin_unlock(&data_sinfo->lock); 3297 spin_unlock(&data_sinfo->lock);
2918 3298
2919 return btrfs_check_metadata_free_space(root); 3299 return 0;
2920} 3300}
2921 3301
2922/* 3302/*
@@ -3015,17 +3395,15 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3015 BUG_ON(!space_info); 3395 BUG_ON(!space_info);
3016 3396
3017 spin_lock(&space_info->lock); 3397 spin_lock(&space_info->lock);
3018 if (space_info->force_alloc) { 3398 if (space_info->force_alloc)
3019 force = 1; 3399 force = 1;
3020 space_info->force_alloc = 0;
3021 }
3022 if (space_info->full) { 3400 if (space_info->full) {
3023 spin_unlock(&space_info->lock); 3401 spin_unlock(&space_info->lock);
3024 goto out; 3402 goto out;
3025 } 3403 }
3026 3404
3027 thresh = space_info->total_bytes - space_info->bytes_readonly; 3405 thresh = space_info->total_bytes - space_info->bytes_readonly;
3028 thresh = div_factor(thresh, 6); 3406 thresh = div_factor(thresh, 8);
3029 if (!force && 3407 if (!force &&
3030 (space_info->bytes_used + space_info->bytes_pinned + 3408 (space_info->bytes_used + space_info->bytes_pinned +
3031 space_info->bytes_reserved + alloc_bytes) < thresh) { 3409 space_info->bytes_reserved + alloc_bytes) < thresh) {
@@ -3039,7 +3417,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3039 * we keep a reasonable number of metadata chunks allocated in the 3417 * we keep a reasonable number of metadata chunks allocated in the
3040 * FS as well. 3418 * FS as well.
3041 */ 3419 */
3042 if (flags & BTRFS_BLOCK_GROUP_DATA) { 3420 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3043 fs_info->data_chunk_allocations++; 3421 fs_info->data_chunk_allocations++;
3044 if (!(fs_info->data_chunk_allocations % 3422 if (!(fs_info->data_chunk_allocations %
3045 fs_info->metadata_ratio)) 3423 fs_info->metadata_ratio))
@@ -3047,8 +3425,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3047 } 3425 }
3048 3426
3049 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3427 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3428 spin_lock(&space_info->lock);
3050 if (ret) 3429 if (ret)
3051 space_info->full = 1; 3430 space_info->full = 1;
3431 space_info->force_alloc = 0;
3432 spin_unlock(&space_info->lock);
3052out: 3433out:
3053 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3434 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3054 return ret; 3435 return ret;
@@ -3306,6 +3687,14 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
3306 if (is_data) 3687 if (is_data)
3307 goto pinit; 3688 goto pinit;
3308 3689
3690 /*
3691 * discard is sloooow, and so triggering discards on
3692 * individual btree blocks isn't a good plan. Just
3693 * pin everything in discard mode.
3694 */
3695 if (btrfs_test_opt(root, DISCARD))
3696 goto pinit;
3697
3309 buf = btrfs_find_tree_block(root, bytenr, num_bytes); 3698 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3310 if (!buf) 3699 if (!buf)
3311 goto pinit; 3700 goto pinit;
@@ -3713,7 +4102,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
3713} 4102}
3714 4103
3715enum btrfs_loop_type { 4104enum btrfs_loop_type {
3716 LOOP_CACHED_ONLY = 0, 4105 LOOP_FIND_IDEAL = 0,
3717 LOOP_CACHING_NOWAIT = 1, 4106 LOOP_CACHING_NOWAIT = 1,
3718 LOOP_CACHING_WAIT = 2, 4107 LOOP_CACHING_WAIT = 2,
3719 LOOP_ALLOC_CHUNK = 3, 4108 LOOP_ALLOC_CHUNK = 3,
@@ -3742,11 +4131,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3742 struct btrfs_block_group_cache *block_group = NULL; 4131 struct btrfs_block_group_cache *block_group = NULL;
3743 int empty_cluster = 2 * 1024 * 1024; 4132 int empty_cluster = 2 * 1024 * 1024;
3744 int allowed_chunk_alloc = 0; 4133 int allowed_chunk_alloc = 0;
4134 int done_chunk_alloc = 0;
3745 struct btrfs_space_info *space_info; 4135 struct btrfs_space_info *space_info;
3746 int last_ptr_loop = 0; 4136 int last_ptr_loop = 0;
3747 int loop = 0; 4137 int loop = 0;
3748 bool found_uncached_bg = false; 4138 bool found_uncached_bg = false;
3749 bool failed_cluster_refill = false; 4139 bool failed_cluster_refill = false;
4140 bool failed_alloc = false;
4141 u64 ideal_cache_percent = 0;
4142 u64 ideal_cache_offset = 0;
3750 4143
3751 WARN_ON(num_bytes < root->sectorsize); 4144 WARN_ON(num_bytes < root->sectorsize);
3752 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 4145 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -3782,14 +4175,19 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3782 empty_cluster = 0; 4175 empty_cluster = 0;
3783 4176
3784 if (search_start == hint_byte) { 4177 if (search_start == hint_byte) {
4178ideal_cache:
3785 block_group = btrfs_lookup_block_group(root->fs_info, 4179 block_group = btrfs_lookup_block_group(root->fs_info,
3786 search_start); 4180 search_start);
3787 /* 4181 /*
3788 * we don't want to use the block group if it doesn't match our 4182 * we don't want to use the block group if it doesn't match our
3789 * allocation bits, or if its not cached. 4183 * allocation bits, or if its not cached.
4184 *
4185 * However if we are re-searching with an ideal block group
4186 * picked out then we don't care that the block group is cached.
3790 */ 4187 */
3791 if (block_group && block_group_bits(block_group, data) && 4188 if (block_group && block_group_bits(block_group, data) &&
3792 block_group_cache_done(block_group)) { 4189 (block_group->cached != BTRFS_CACHE_NO ||
4190 search_start == ideal_cache_offset)) {
3793 down_read(&space_info->groups_sem); 4191 down_read(&space_info->groups_sem);
3794 if (list_empty(&block_group->list) || 4192 if (list_empty(&block_group->list) ||
3795 block_group->ro) { 4193 block_group->ro) {
@@ -3801,13 +4199,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3801 */ 4199 */
3802 btrfs_put_block_group(block_group); 4200 btrfs_put_block_group(block_group);
3803 up_read(&space_info->groups_sem); 4201 up_read(&space_info->groups_sem);
3804 } else 4202 } else {
3805 goto have_block_group; 4203 goto have_block_group;
4204 }
3806 } else if (block_group) { 4205 } else if (block_group) {
3807 btrfs_put_block_group(block_group); 4206 btrfs_put_block_group(block_group);
3808 } 4207 }
3809 } 4208 }
3810
3811search: 4209search:
3812 down_read(&space_info->groups_sem); 4210 down_read(&space_info->groups_sem);
3813 list_for_each_entry(block_group, &space_info->block_groups, list) { 4211 list_for_each_entry(block_group, &space_info->block_groups, list) {
@@ -3819,28 +4217,45 @@ search:
3819 4217
3820have_block_group: 4218have_block_group:
3821 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4219 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4220 u64 free_percent;
4221
4222 free_percent = btrfs_block_group_used(&block_group->item);
4223 free_percent *= 100;
4224 free_percent = div64_u64(free_percent,
4225 block_group->key.offset);
4226 free_percent = 100 - free_percent;
4227 if (free_percent > ideal_cache_percent &&
4228 likely(!block_group->ro)) {
4229 ideal_cache_offset = block_group->key.objectid;
4230 ideal_cache_percent = free_percent;
4231 }
4232
3822 /* 4233 /*
3823 * we want to start caching kthreads, but not too many 4234 * We only want to start kthread caching if we are at
3824 * right off the bat so we don't overwhelm the system, 4235 * the point where we will wait for caching to make
3825 * so only start them if there are less than 2 and we're 4236 * progress, or if our ideal search is over and we've
3826 * in the initial allocation phase. 4237 * found somebody to start caching.
3827 */ 4238 */
3828 if (loop > LOOP_CACHING_NOWAIT || 4239 if (loop > LOOP_CACHING_NOWAIT ||
3829 atomic_read(&space_info->caching_threads) < 2) { 4240 (loop > LOOP_FIND_IDEAL &&
4241 atomic_read(&space_info->caching_threads) < 2)) {
3830 ret = cache_block_group(block_group); 4242 ret = cache_block_group(block_group);
3831 BUG_ON(ret); 4243 BUG_ON(ret);
3832 } 4244 }
3833 }
3834
3835 cached = block_group_cache_done(block_group);
3836 if (unlikely(!cached)) {
3837 found_uncached_bg = true; 4245 found_uncached_bg = true;
3838 4246
3839 /* if we only want cached bgs, loop */ 4247 /*
3840 if (loop == LOOP_CACHED_ONLY) 4248 * If loop is set for cached only, try the next block
4249 * group.
4250 */
4251 if (loop == LOOP_FIND_IDEAL)
3841 goto loop; 4252 goto loop;
3842 } 4253 }
3843 4254
4255 cached = block_group_cache_done(block_group);
4256 if (unlikely(!cached))
4257 found_uncached_bg = true;
4258
3844 if (unlikely(block_group->ro)) 4259 if (unlikely(block_group->ro))
3845 goto loop; 4260 goto loop;
3846 4261
@@ -3951,14 +4366,23 @@ refill_cluster:
3951 4366
3952 offset = btrfs_find_space_for_alloc(block_group, search_start, 4367 offset = btrfs_find_space_for_alloc(block_group, search_start,
3953 num_bytes, empty_size); 4368 num_bytes, empty_size);
3954 if (!offset && (cached || (!cached && 4369 /*
3955 loop == LOOP_CACHING_NOWAIT))) { 4370 * If we didn't find a chunk, and we haven't failed on this
3956 goto loop; 4371 * block group before, and this block group is in the middle of
3957 } else if (!offset && (!cached && 4372 * caching and we are ok with waiting, then go ahead and wait
3958 loop > LOOP_CACHING_NOWAIT)) { 4373 * for progress to be made, and set failed_alloc to true.
4374 *
4375 * If failed_alloc is true then we've already waited on this
4376 * block group once and should move on to the next block group.
4377 */
4378 if (!offset && !failed_alloc && !cached &&
4379 loop > LOOP_CACHING_NOWAIT) {
3959 wait_block_group_cache_progress(block_group, 4380 wait_block_group_cache_progress(block_group,
3960 num_bytes + empty_size); 4381 num_bytes + empty_size);
4382 failed_alloc = true;
3961 goto have_block_group; 4383 goto have_block_group;
4384 } else if (!offset) {
4385 goto loop;
3962 } 4386 }
3963checks: 4387checks:
3964 search_start = stripe_align(root, offset); 4388 search_start = stripe_align(root, offset);
@@ -4006,13 +4430,16 @@ checks:
4006 break; 4430 break;
4007loop: 4431loop:
4008 failed_cluster_refill = false; 4432 failed_cluster_refill = false;
4433 failed_alloc = false;
4009 btrfs_put_block_group(block_group); 4434 btrfs_put_block_group(block_group);
4010 } 4435 }
4011 up_read(&space_info->groups_sem); 4436 up_read(&space_info->groups_sem);
4012 4437
4013 /* LOOP_CACHED_ONLY, only search fully cached block groups 4438 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4014 * LOOP_CACHING_NOWAIT, search partially cached block groups, but 4439 * for them to make caching progress. Also
4015 * dont wait foR them to finish caching 4440 * determine the best possible bg to cache
4441 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4442 * caching kthreads as we move along
4016 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 4443 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4017 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 4444 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4018 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 4445 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
@@ -4021,12 +4448,47 @@ loop:
4021 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && 4448 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4022 (found_uncached_bg || empty_size || empty_cluster || 4449 (found_uncached_bg || empty_size || empty_cluster ||
4023 allowed_chunk_alloc)) { 4450 allowed_chunk_alloc)) {
4024 if (found_uncached_bg) { 4451 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
4025 found_uncached_bg = false; 4452 found_uncached_bg = false;
4026 if (loop < LOOP_CACHING_WAIT) { 4453 loop++;
4027 loop++; 4454 if (!ideal_cache_percent &&
4455 atomic_read(&space_info->caching_threads))
4028 goto search; 4456 goto search;
4029 } 4457
4458 /*
4459 * 1 of the following 2 things have happened so far
4460 *
4461 * 1) We found an ideal block group for caching that
4462 * is mostly full and will cache quickly, so we might
4463 * as well wait for it.
4464 *
4465 * 2) We searched for cached only and we didn't find
4466 * anything, and we didn't start any caching kthreads
4467 * either, so chances are we will loop through and
4468 * start a couple caching kthreads, and then come back
4469 * around and just wait for them. This will be slower
4470 * because we will have 2 caching kthreads reading at
4471 * the same time when we could have just started one
4472 * and waited for it to get far enough to give us an
4473 * allocation, so go ahead and go to the wait caching
4474 * loop.
4475 */
4476 loop = LOOP_CACHING_WAIT;
4477 search_start = ideal_cache_offset;
4478 ideal_cache_percent = 0;
4479 goto ideal_cache;
4480 } else if (loop == LOOP_FIND_IDEAL) {
4481 /*
4482 * Didn't find a uncached bg, wait on anything we find
4483 * next.
4484 */
4485 loop = LOOP_CACHING_WAIT;
4486 goto search;
4487 }
4488
4489 if (loop < LOOP_CACHING_WAIT) {
4490 loop++;
4491 goto search;
4030 } 4492 }
4031 4493
4032 if (loop == LOOP_ALLOC_CHUNK) { 4494 if (loop == LOOP_ALLOC_CHUNK) {
@@ -4038,7 +4500,8 @@ loop:
4038 ret = do_chunk_alloc(trans, root, num_bytes + 4500 ret = do_chunk_alloc(trans, root, num_bytes +
4039 2 * 1024 * 1024, data, 1); 4501 2 * 1024 * 1024, data, 1);
4040 allowed_chunk_alloc = 0; 4502 allowed_chunk_alloc = 0;
4041 } else { 4503 done_chunk_alloc = 1;
4504 } else if (!done_chunk_alloc) {
4042 space_info->force_alloc = 1; 4505 space_info->force_alloc = 1;
4043 } 4506 }
4044 4507
@@ -4063,21 +4526,32 @@ loop:
4063 return ret; 4526 return ret;
4064} 4527}
4065 4528
4066static void dump_space_info(struct btrfs_space_info *info, u64 bytes) 4529static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4530 int dump_block_groups)
4067{ 4531{
4068 struct btrfs_block_group_cache *cache; 4532 struct btrfs_block_group_cache *cache;
4069 4533
4534 spin_lock(&info->lock);
4070 printk(KERN_INFO "space_info has %llu free, is %sfull\n", 4535 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4071 (unsigned long long)(info->total_bytes - info->bytes_used - 4536 (unsigned long long)(info->total_bytes - info->bytes_used -
4072 info->bytes_pinned - info->bytes_reserved), 4537 info->bytes_pinned - info->bytes_reserved -
4538 info->bytes_super),
4073 (info->full) ? "" : "not "); 4539 (info->full) ? "" : "not ");
4074 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu," 4540 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
4075 " may_use=%llu, used=%llu\n", 4541 " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
4542 "\n",
4076 (unsigned long long)info->total_bytes, 4543 (unsigned long long)info->total_bytes,
4077 (unsigned long long)info->bytes_pinned, 4544 (unsigned long long)info->bytes_pinned,
4078 (unsigned long long)info->bytes_delalloc, 4545 (unsigned long long)info->bytes_delalloc,
4079 (unsigned long long)info->bytes_may_use, 4546 (unsigned long long)info->bytes_may_use,
4080 (unsigned long long)info->bytes_used); 4547 (unsigned long long)info->bytes_used,
4548 (unsigned long long)info->bytes_root,
4549 (unsigned long long)info->bytes_super,
4550 (unsigned long long)info->bytes_reserved);
4551 spin_unlock(&info->lock);
4552
4553 if (!dump_block_groups)
4554 return;
4081 4555
4082 down_read(&info->groups_sem); 4556 down_read(&info->groups_sem);
4083 list_for_each_entry(cache, &info->block_groups, list) { 4557 list_for_each_entry(cache, &info->block_groups, list) {
@@ -4145,7 +4619,7 @@ again:
4145 printk(KERN_ERR "btrfs allocation failed flags %llu, " 4619 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4146 "wanted %llu\n", (unsigned long long)data, 4620 "wanted %llu\n", (unsigned long long)data,
4147 (unsigned long long)num_bytes); 4621 (unsigned long long)num_bytes);
4148 dump_space_info(sinfo, num_bytes); 4622 dump_space_info(sinfo, num_bytes, 1);
4149 } 4623 }
4150 4624
4151 return ret; 4625 return ret;
@@ -4506,6 +4980,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4506 u64 bytenr; 4980 u64 bytenr;
4507 u64 generation; 4981 u64 generation;
4508 u64 refs; 4982 u64 refs;
4983 u64 flags;
4509 u64 last = 0; 4984 u64 last = 0;
4510 u32 nritems; 4985 u32 nritems;
4511 u32 blocksize; 4986 u32 blocksize;
@@ -4543,15 +5018,19 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4543 generation <= root->root_key.offset) 5018 generation <= root->root_key.offset)
4544 continue; 5019 continue;
4545 5020
5021 /* We don't lock the tree block, it's OK to be racy here */
5022 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5023 &refs, &flags);
5024 BUG_ON(ret);
5025 BUG_ON(refs == 0);
5026
4546 if (wc->stage == DROP_REFERENCE) { 5027 if (wc->stage == DROP_REFERENCE) {
4547 ret = btrfs_lookup_extent_info(trans, root,
4548 bytenr, blocksize,
4549 &refs, NULL);
4550 BUG_ON(ret);
4551 BUG_ON(refs == 0);
4552 if (refs == 1) 5028 if (refs == 1)
4553 goto reada; 5029 goto reada;
4554 5030
5031 if (wc->level == 1 &&
5032 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5033 continue;
4555 if (!wc->update_ref || 5034 if (!wc->update_ref ||
4556 generation <= root->root_key.offset) 5035 generation <= root->root_key.offset)
4557 continue; 5036 continue;
@@ -4560,6 +5039,10 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4560 &wc->update_progress); 5039 &wc->update_progress);
4561 if (ret < 0) 5040 if (ret < 0)
4562 continue; 5041 continue;
5042 } else {
5043 if (wc->level == 1 &&
5044 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5045 continue;
4563 } 5046 }
4564reada: 5047reada:
4565 ret = readahead_tree_block(root, bytenr, blocksize, 5048 ret = readahead_tree_block(root, bytenr, blocksize,
@@ -4583,7 +5066,7 @@ reada:
4583static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5066static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4584 struct btrfs_root *root, 5067 struct btrfs_root *root,
4585 struct btrfs_path *path, 5068 struct btrfs_path *path,
4586 struct walk_control *wc) 5069 struct walk_control *wc, int lookup_info)
4587{ 5070{
4588 int level = wc->level; 5071 int level = wc->level;
4589 struct extent_buffer *eb = path->nodes[level]; 5072 struct extent_buffer *eb = path->nodes[level];
@@ -4598,8 +5081,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4598 * when reference count of tree block is 1, it won't increase 5081 * when reference count of tree block is 1, it won't increase
4599 * again. once full backref flag is set, we never clear it. 5082 * again. once full backref flag is set, we never clear it.
4600 */ 5083 */
4601 if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5084 if (lookup_info &&
4602 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) { 5085 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5086 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
4603 BUG_ON(!path->locks[level]); 5087 BUG_ON(!path->locks[level]);
4604 ret = btrfs_lookup_extent_info(trans, root, 5088 ret = btrfs_lookup_extent_info(trans, root,
4605 eb->start, eb->len, 5089 eb->start, eb->len,
@@ -4660,7 +5144,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4660static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5144static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4661 struct btrfs_root *root, 5145 struct btrfs_root *root,
4662 struct btrfs_path *path, 5146 struct btrfs_path *path,
4663 struct walk_control *wc) 5147 struct walk_control *wc, int *lookup_info)
4664{ 5148{
4665 u64 bytenr; 5149 u64 bytenr;
4666 u64 generation; 5150 u64 generation;
@@ -4680,8 +5164,10 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4680 * for the subtree 5164 * for the subtree
4681 */ 5165 */
4682 if (wc->stage == UPDATE_BACKREF && 5166 if (wc->stage == UPDATE_BACKREF &&
4683 generation <= root->root_key.offset) 5167 generation <= root->root_key.offset) {
5168 *lookup_info = 1;
4684 return 1; 5169 return 1;
5170 }
4685 5171
4686 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5172 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
4687 blocksize = btrfs_level_size(root, level - 1); 5173 blocksize = btrfs_level_size(root, level - 1);
@@ -4694,14 +5180,19 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4694 btrfs_tree_lock(next); 5180 btrfs_tree_lock(next);
4695 btrfs_set_lock_blocking(next); 5181 btrfs_set_lock_blocking(next);
4696 5182
4697 if (wc->stage == DROP_REFERENCE) { 5183 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
4698 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 5184 &wc->refs[level - 1],
4699 &wc->refs[level - 1], 5185 &wc->flags[level - 1]);
4700 &wc->flags[level - 1]); 5186 BUG_ON(ret);
4701 BUG_ON(ret); 5187 BUG_ON(wc->refs[level - 1] == 0);
4702 BUG_ON(wc->refs[level - 1] == 0); 5188 *lookup_info = 0;
4703 5189
5190 if (wc->stage == DROP_REFERENCE) {
4704 if (wc->refs[level - 1] > 1) { 5191 if (wc->refs[level - 1] > 1) {
5192 if (level == 1 &&
5193 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5194 goto skip;
5195
4705 if (!wc->update_ref || 5196 if (!wc->update_ref ||
4706 generation <= root->root_key.offset) 5197 generation <= root->root_key.offset)
4707 goto skip; 5198 goto skip;
@@ -4715,12 +5206,17 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4715 wc->stage = UPDATE_BACKREF; 5206 wc->stage = UPDATE_BACKREF;
4716 wc->shared_level = level - 1; 5207 wc->shared_level = level - 1;
4717 } 5208 }
5209 } else {
5210 if (level == 1 &&
5211 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5212 goto skip;
4718 } 5213 }
4719 5214
4720 if (!btrfs_buffer_uptodate(next, generation)) { 5215 if (!btrfs_buffer_uptodate(next, generation)) {
4721 btrfs_tree_unlock(next); 5216 btrfs_tree_unlock(next);
4722 free_extent_buffer(next); 5217 free_extent_buffer(next);
4723 next = NULL; 5218 next = NULL;
5219 *lookup_info = 1;
4724 } 5220 }
4725 5221
4726 if (!next) { 5222 if (!next) {
@@ -4743,21 +5239,22 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4743skip: 5239skip:
4744 wc->refs[level - 1] = 0; 5240 wc->refs[level - 1] = 0;
4745 wc->flags[level - 1] = 0; 5241 wc->flags[level - 1] = 0;
5242 if (wc->stage == DROP_REFERENCE) {
5243 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5244 parent = path->nodes[level]->start;
5245 } else {
5246 BUG_ON(root->root_key.objectid !=
5247 btrfs_header_owner(path->nodes[level]));
5248 parent = 0;
5249 }
4746 5250
4747 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 5251 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
4748 parent = path->nodes[level]->start; 5252 root->root_key.objectid, level - 1, 0);
4749 } else { 5253 BUG_ON(ret);
4750 BUG_ON(root->root_key.objectid !=
4751 btrfs_header_owner(path->nodes[level]));
4752 parent = 0;
4753 } 5254 }
4754
4755 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
4756 root->root_key.objectid, level - 1, 0);
4757 BUG_ON(ret);
4758
4759 btrfs_tree_unlock(next); 5255 btrfs_tree_unlock(next);
4760 free_extent_buffer(next); 5256 free_extent_buffer(next);
5257 *lookup_info = 1;
4761 return 1; 5258 return 1;
4762} 5259}
4763 5260
@@ -4871,6 +5368,7 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4871 struct walk_control *wc) 5368 struct walk_control *wc)
4872{ 5369{
4873 int level = wc->level; 5370 int level = wc->level;
5371 int lookup_info = 1;
4874 int ret; 5372 int ret;
4875 5373
4876 while (level >= 0) { 5374 while (level >= 0) {
@@ -4878,14 +5376,14 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4878 btrfs_header_nritems(path->nodes[level])) 5376 btrfs_header_nritems(path->nodes[level]))
4879 break; 5377 break;
4880 5378
4881 ret = walk_down_proc(trans, root, path, wc); 5379 ret = walk_down_proc(trans, root, path, wc, lookup_info);
4882 if (ret > 0) 5380 if (ret > 0)
4883 break; 5381 break;
4884 5382
4885 if (level == 0) 5383 if (level == 0)
4886 break; 5384 break;
4887 5385
4888 ret = do_walk_down(trans, root, path, wc); 5386 ret = do_walk_down(trans, root, path, wc, &lookup_info);
4889 if (ret > 0) { 5387 if (ret > 0) {
4890 path->slots[level]++; 5388 path->slots[level]++;
4891 continue; 5389 continue;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 0cb88f8146ea..96577e8bf9fd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -280,6 +280,14 @@ static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
280 return NULL; 280 return NULL;
281} 281}
282 282
283static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
284 struct extent_state *other)
285{
286 if (tree->ops && tree->ops->merge_extent_hook)
287 tree->ops->merge_extent_hook(tree->mapping->host, new,
288 other);
289}
290
283/* 291/*
284 * utility function to look for merge candidates inside a given range. 292 * utility function to look for merge candidates inside a given range.
285 * Any extents with matching state are merged together into a single 293 * Any extents with matching state are merged together into a single
@@ -303,6 +311,7 @@ static int merge_state(struct extent_io_tree *tree,
303 other = rb_entry(other_node, struct extent_state, rb_node); 311 other = rb_entry(other_node, struct extent_state, rb_node);
304 if (other->end == state->start - 1 && 312 if (other->end == state->start - 1 &&
305 other->state == state->state) { 313 other->state == state->state) {
314 merge_cb(tree, state, other);
306 state->start = other->start; 315 state->start = other->start;
307 other->tree = NULL; 316 other->tree = NULL;
308 rb_erase(&other->rb_node, &tree->state); 317 rb_erase(&other->rb_node, &tree->state);
@@ -314,33 +323,37 @@ static int merge_state(struct extent_io_tree *tree,
314 other = rb_entry(other_node, struct extent_state, rb_node); 323 other = rb_entry(other_node, struct extent_state, rb_node);
315 if (other->start == state->end + 1 && 324 if (other->start == state->end + 1 &&
316 other->state == state->state) { 325 other->state == state->state) {
326 merge_cb(tree, state, other);
317 other->start = state->start; 327 other->start = state->start;
318 state->tree = NULL; 328 state->tree = NULL;
319 rb_erase(&state->rb_node, &tree->state); 329 rb_erase(&state->rb_node, &tree->state);
320 free_extent_state(state); 330 free_extent_state(state);
331 state = NULL;
321 } 332 }
322 } 333 }
334
323 return 0; 335 return 0;
324} 336}
325 337
326static void set_state_cb(struct extent_io_tree *tree, 338static int set_state_cb(struct extent_io_tree *tree,
327 struct extent_state *state, 339 struct extent_state *state,
328 unsigned long bits) 340 unsigned long bits)
329{ 341{
330 if (tree->ops && tree->ops->set_bit_hook) { 342 if (tree->ops && tree->ops->set_bit_hook) {
331 tree->ops->set_bit_hook(tree->mapping->host, state->start, 343 return tree->ops->set_bit_hook(tree->mapping->host,
332 state->end, state->state, bits); 344 state->start, state->end,
345 state->state, bits);
333 } 346 }
347
348 return 0;
334} 349}
335 350
336static void clear_state_cb(struct extent_io_tree *tree, 351static void clear_state_cb(struct extent_io_tree *tree,
337 struct extent_state *state, 352 struct extent_state *state,
338 unsigned long bits) 353 unsigned long bits)
339{ 354{
340 if (tree->ops && tree->ops->clear_bit_hook) { 355 if (tree->ops && tree->ops->clear_bit_hook)
341 tree->ops->clear_bit_hook(tree->mapping->host, state->start, 356 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
342 state->end, state->state, bits);
343 }
344} 357}
345 358
346/* 359/*
@@ -358,6 +371,7 @@ static int insert_state(struct extent_io_tree *tree,
358 int bits) 371 int bits)
359{ 372{
360 struct rb_node *node; 373 struct rb_node *node;
374 int ret;
361 375
362 if (end < start) { 376 if (end < start) {
363 printk(KERN_ERR "btrfs end < start %llu %llu\n", 377 printk(KERN_ERR "btrfs end < start %llu %llu\n",
@@ -365,11 +379,14 @@ static int insert_state(struct extent_io_tree *tree,
365 (unsigned long long)start); 379 (unsigned long long)start);
366 WARN_ON(1); 380 WARN_ON(1);
367 } 381 }
368 if (bits & EXTENT_DIRTY)
369 tree->dirty_bytes += end - start + 1;
370 state->start = start; 382 state->start = start;
371 state->end = end; 383 state->end = end;
372 set_state_cb(tree, state, bits); 384 ret = set_state_cb(tree, state, bits);
385 if (ret)
386 return ret;
387
388 if (bits & EXTENT_DIRTY)
389 tree->dirty_bytes += end - start + 1;
373 state->state |= bits; 390 state->state |= bits;
374 node = tree_insert(&tree->state, end, &state->rb_node); 391 node = tree_insert(&tree->state, end, &state->rb_node);
375 if (node) { 392 if (node) {
@@ -387,6 +404,15 @@ static int insert_state(struct extent_io_tree *tree,
387 return 0; 404 return 0;
388} 405}
389 406
407static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
408 u64 split)
409{
410 if (tree->ops && tree->ops->split_extent_hook)
411 return tree->ops->split_extent_hook(tree->mapping->host,
412 orig, split);
413 return 0;
414}
415
390/* 416/*
391 * split a given extent state struct in two, inserting the preallocated 417 * split a given extent state struct in two, inserting the preallocated
392 * struct 'prealloc' as the newly created second half. 'split' indicates an 418 * struct 'prealloc' as the newly created second half. 'split' indicates an
@@ -405,6 +431,9 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
405 struct extent_state *prealloc, u64 split) 431 struct extent_state *prealloc, u64 split)
406{ 432{
407 struct rb_node *node; 433 struct rb_node *node;
434
435 split_cb(tree, orig, split);
436
408 prealloc->start = orig->start; 437 prealloc->start = orig->start;
409 prealloc->end = split - 1; 438 prealloc->end = split - 1;
410 prealloc->state = orig->state; 439 prealloc->state = orig->state;
@@ -431,7 +460,8 @@ static int clear_state_bit(struct extent_io_tree *tree,
431 struct extent_state *state, int bits, int wake, 460 struct extent_state *state, int bits, int wake,
432 int delete) 461 int delete)
433{ 462{
434 int ret = state->state & bits; 463 int bits_to_clear = bits & ~EXTENT_DO_ACCOUNTING;
464 int ret = state->state & bits_to_clear;
435 465
436 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 466 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
437 u64 range = state->end - state->start + 1; 467 u64 range = state->end - state->start + 1;
@@ -439,7 +469,7 @@ static int clear_state_bit(struct extent_io_tree *tree,
439 tree->dirty_bytes -= range; 469 tree->dirty_bytes -= range;
440 } 470 }
441 clear_state_cb(tree, state, bits); 471 clear_state_cb(tree, state, bits);
442 state->state &= ~bits; 472 state->state &= ~bits_to_clear;
443 if (wake) 473 if (wake)
444 wake_up(&state->wq); 474 wake_up(&state->wq);
445 if (delete || state->state == 0) { 475 if (delete || state->state == 0) {
@@ -542,8 +572,8 @@ hit_next:
542 if (err) 572 if (err)
543 goto out; 573 goto out;
544 if (state->end <= end) { 574 if (state->end <= end) {
545 set |= clear_state_bit(tree, state, bits, 575 set |= clear_state_bit(tree, state, bits, wake,
546 wake, delete); 576 delete);
547 if (last_end == (u64)-1) 577 if (last_end == (u64)-1)
548 goto out; 578 goto out;
549 start = last_end + 1; 579 start = last_end + 1;
@@ -561,12 +591,11 @@ hit_next:
561 prealloc = alloc_extent_state(GFP_ATOMIC); 591 prealloc = alloc_extent_state(GFP_ATOMIC);
562 err = split_state(tree, state, prealloc, end + 1); 592 err = split_state(tree, state, prealloc, end + 1);
563 BUG_ON(err == -EEXIST); 593 BUG_ON(err == -EEXIST);
564
565 if (wake) 594 if (wake)
566 wake_up(&state->wq); 595 wake_up(&state->wq);
567 596
568 set |= clear_state_bit(tree, prealloc, bits, 597 set |= clear_state_bit(tree, prealloc, bits, wake, delete);
569 wake, delete); 598
570 prealloc = NULL; 599 prealloc = NULL;
571 goto out; 600 goto out;
572 } 601 }
@@ -667,16 +696,23 @@ out:
667 return 0; 696 return 0;
668} 697}
669 698
670static void set_state_bits(struct extent_io_tree *tree, 699static int set_state_bits(struct extent_io_tree *tree,
671 struct extent_state *state, 700 struct extent_state *state,
672 int bits) 701 int bits)
673{ 702{
703 int ret;
704
705 ret = set_state_cb(tree, state, bits);
706 if (ret)
707 return ret;
708
674 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 709 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
675 u64 range = state->end - state->start + 1; 710 u64 range = state->end - state->start + 1;
676 tree->dirty_bytes += range; 711 tree->dirty_bytes += range;
677 } 712 }
678 set_state_cb(tree, state, bits);
679 state->state |= bits; 713 state->state |= bits;
714
715 return 0;
680} 716}
681 717
682static void cache_state(struct extent_state *state, 718static void cache_state(struct extent_state *state,
@@ -758,7 +794,10 @@ hit_next:
758 goto out; 794 goto out;
759 } 795 }
760 796
761 set_state_bits(tree, state, bits); 797 err = set_state_bits(tree, state, bits);
798 if (err)
799 goto out;
800
762 cache_state(state, cached_state); 801 cache_state(state, cached_state);
763 merge_state(tree, state); 802 merge_state(tree, state);
764 if (last_end == (u64)-1) 803 if (last_end == (u64)-1)
@@ -805,7 +844,9 @@ hit_next:
805 if (err) 844 if (err)
806 goto out; 845 goto out;
807 if (state->end <= end) { 846 if (state->end <= end) {
808 set_state_bits(tree, state, bits); 847 err = set_state_bits(tree, state, bits);
848 if (err)
849 goto out;
809 cache_state(state, cached_state); 850 cache_state(state, cached_state);
810 merge_state(tree, state); 851 merge_state(tree, state);
811 if (last_end == (u64)-1) 852 if (last_end == (u64)-1)
@@ -829,11 +870,13 @@ hit_next:
829 this_end = last_start - 1; 870 this_end = last_start - 1;
830 err = insert_state(tree, prealloc, start, this_end, 871 err = insert_state(tree, prealloc, start, this_end,
831 bits); 872 bits);
832 cache_state(prealloc, cached_state);
833 prealloc = NULL;
834 BUG_ON(err == -EEXIST); 873 BUG_ON(err == -EEXIST);
835 if (err) 874 if (err) {
875 prealloc = NULL;
836 goto out; 876 goto out;
877 }
878 cache_state(prealloc, cached_state);
879 prealloc = NULL;
837 start = this_end + 1; 880 start = this_end + 1;
838 goto search_again; 881 goto search_again;
839 } 882 }
@@ -852,7 +895,11 @@ hit_next:
852 err = split_state(tree, state, prealloc, end + 1); 895 err = split_state(tree, state, prealloc, end + 1);
853 BUG_ON(err == -EEXIST); 896 BUG_ON(err == -EEXIST);
854 897
855 set_state_bits(tree, prealloc, bits); 898 err = set_state_bits(tree, prealloc, bits);
899 if (err) {
900 prealloc = NULL;
901 goto out;
902 }
856 cache_state(prealloc, cached_state); 903 cache_state(prealloc, cached_state);
857 merge_state(tree, prealloc); 904 merge_state(tree, prealloc);
858 prealloc = NULL; 905 prealloc = NULL;
@@ -910,7 +957,8 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
910 gfp_t mask) 957 gfp_t mask)
911{ 958{
912 return clear_extent_bit(tree, start, end, 959 return clear_extent_bit(tree, start, end,
913 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, 960 EXTENT_DIRTY | EXTENT_DELALLOC |
961 EXTENT_DO_ACCOUNTING, 0, 0,
914 NULL, mask); 962 NULL, mask);
915} 963}
916 964
@@ -1355,12 +1403,7 @@ out_failed:
1355int extent_clear_unlock_delalloc(struct inode *inode, 1403int extent_clear_unlock_delalloc(struct inode *inode,
1356 struct extent_io_tree *tree, 1404 struct extent_io_tree *tree,
1357 u64 start, u64 end, struct page *locked_page, 1405 u64 start, u64 end, struct page *locked_page,
1358 int unlock_pages, 1406 unsigned long op)
1359 int clear_unlock,
1360 int clear_delalloc, int clear_dirty,
1361 int set_writeback,
1362 int end_writeback,
1363 int set_private2)
1364{ 1407{
1365 int ret; 1408 int ret;
1366 struct page *pages[16]; 1409 struct page *pages[16];
@@ -1370,17 +1413,21 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1370 int i; 1413 int i;
1371 int clear_bits = 0; 1414 int clear_bits = 0;
1372 1415
1373 if (clear_unlock) 1416 if (op & EXTENT_CLEAR_UNLOCK)
1374 clear_bits |= EXTENT_LOCKED; 1417 clear_bits |= EXTENT_LOCKED;
1375 if (clear_dirty) 1418 if (op & EXTENT_CLEAR_DIRTY)
1376 clear_bits |= EXTENT_DIRTY; 1419 clear_bits |= EXTENT_DIRTY;
1377 1420
1378 if (clear_delalloc) 1421 if (op & EXTENT_CLEAR_DELALLOC)
1379 clear_bits |= EXTENT_DELALLOC; 1422 clear_bits |= EXTENT_DELALLOC;
1380 1423
1424 if (op & EXTENT_CLEAR_ACCOUNTING)
1425 clear_bits |= EXTENT_DO_ACCOUNTING;
1426
1381 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); 1427 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1382 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback || 1428 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1383 set_private2)) 1429 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1430 EXTENT_SET_PRIVATE2)))
1384 return 0; 1431 return 0;
1385 1432
1386 while (nr_pages > 0) { 1433 while (nr_pages > 0) {
@@ -1389,20 +1436,20 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1389 nr_pages, ARRAY_SIZE(pages)), pages); 1436 nr_pages, ARRAY_SIZE(pages)), pages);
1390 for (i = 0; i < ret; i++) { 1437 for (i = 0; i < ret; i++) {
1391 1438
1392 if (set_private2) 1439 if (op & EXTENT_SET_PRIVATE2)
1393 SetPagePrivate2(pages[i]); 1440 SetPagePrivate2(pages[i]);
1394 1441
1395 if (pages[i] == locked_page) { 1442 if (pages[i] == locked_page) {
1396 page_cache_release(pages[i]); 1443 page_cache_release(pages[i]);
1397 continue; 1444 continue;
1398 } 1445 }
1399 if (clear_dirty) 1446 if (op & EXTENT_CLEAR_DIRTY)
1400 clear_page_dirty_for_io(pages[i]); 1447 clear_page_dirty_for_io(pages[i]);
1401 if (set_writeback) 1448 if (op & EXTENT_SET_WRITEBACK)
1402 set_page_writeback(pages[i]); 1449 set_page_writeback(pages[i]);
1403 if (end_writeback) 1450 if (op & EXTENT_END_WRITEBACK)
1404 end_page_writeback(pages[i]); 1451 end_page_writeback(pages[i]);
1405 if (unlock_pages) 1452 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1406 unlock_page(pages[i]); 1453 unlock_page(pages[i]);
1407 page_cache_release(pages[i]); 1454 page_cache_release(pages[i]);
1408 } 1455 }
@@ -2668,7 +2715,8 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2668 lock_extent(tree, start, end, GFP_NOFS); 2715 lock_extent(tree, start, end, GFP_NOFS);
2669 wait_on_page_writeback(page); 2716 wait_on_page_writeback(page);
2670 clear_extent_bit(tree, start, end, 2717 clear_extent_bit(tree, start, end,
2671 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 2718 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2719 EXTENT_DO_ACCOUNTING,
2672 1, 1, NULL, GFP_NOFS); 2720 1, 1, NULL, GFP_NOFS);
2673 return 0; 2721 return 0;
2674} 2722}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 14ed16fd862d..36de250a7b2b 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -15,6 +15,7 @@
15#define EXTENT_BUFFER_FILLED (1 << 8) 15#define EXTENT_BUFFER_FILLED (1 << 8)
16#define EXTENT_BOUNDARY (1 << 9) 16#define EXTENT_BOUNDARY (1 << 9)
17#define EXTENT_NODATASUM (1 << 10) 17#define EXTENT_NODATASUM (1 << 10)
18#define EXTENT_DO_ACCOUNTING (1 << 11)
18#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) 19#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
19 20
20/* flags for bio submission */ 21/* flags for bio submission */
@@ -25,6 +26,16 @@
25#define EXTENT_BUFFER_BLOCKING 1 26#define EXTENT_BUFFER_BLOCKING 1
26#define EXTENT_BUFFER_DIRTY 2 27#define EXTENT_BUFFER_DIRTY 2
27 28
29/* these are flags for extent_clear_unlock_delalloc */
30#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
31#define EXTENT_CLEAR_UNLOCK 0x2
32#define EXTENT_CLEAR_DELALLOC 0x4
33#define EXTENT_CLEAR_DIRTY 0x8
34#define EXTENT_SET_WRITEBACK 0x10
35#define EXTENT_END_WRITEBACK 0x20
36#define EXTENT_SET_PRIVATE2 0x40
37#define EXTENT_CLEAR_ACCOUNTING 0x80
38
28/* 39/*
29 * page->private values. Every page that is controlled by the extent 40 * page->private values. Every page that is controlled by the extent
30 * map has page->private set to one. 41 * map has page->private set to one.
@@ -60,8 +71,13 @@ struct extent_io_ops {
60 struct extent_state *state, int uptodate); 71 struct extent_state *state, int uptodate);
61 int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, 72 int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
62 unsigned long old, unsigned long bits); 73 unsigned long old, unsigned long bits);
63 int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, 74 int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
64 unsigned long old, unsigned long bits); 75 unsigned long bits);
76 int (*merge_extent_hook)(struct inode *inode,
77 struct extent_state *new,
78 struct extent_state *other);
79 int (*split_extent_hook)(struct inode *inode,
80 struct extent_state *orig, u64 split);
65 int (*write_cache_pages_lock_hook)(struct page *page); 81 int (*write_cache_pages_lock_hook)(struct page *page);
66}; 82};
67 83
@@ -79,10 +95,14 @@ struct extent_state {
79 u64 start; 95 u64 start;
80 u64 end; /* inclusive */ 96 u64 end; /* inclusive */
81 struct rb_node rb_node; 97 struct rb_node rb_node;
98
99 /* ADD NEW ELEMENTS AFTER THIS */
82 struct extent_io_tree *tree; 100 struct extent_io_tree *tree;
83 wait_queue_head_t wq; 101 wait_queue_head_t wq;
84 atomic_t refs; 102 atomic_t refs;
85 unsigned long state; 103 unsigned long state;
104 u64 split_start;
105 u64 split_end;
86 106
87 /* for use by the FS */ 107 /* for use by the FS */
88 u64 private; 108 u64 private;
@@ -279,10 +299,5 @@ int extent_range_uptodate(struct extent_io_tree *tree,
279int extent_clear_unlock_delalloc(struct inode *inode, 299int extent_clear_unlock_delalloc(struct inode *inode,
280 struct extent_io_tree *tree, 300 struct extent_io_tree *tree,
281 u64 start, u64 end, struct page *locked_page, 301 u64 start, u64 end, struct page *locked_page,
282 int unlock_page, 302 unsigned long op);
283 int clear_unlock,
284 int clear_delalloc, int clear_dirty,
285 int set_writeback,
286 int end_writeback,
287 int set_private2);
288#endif 303#endif
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2c726b7b9faa..ccbdcb54ec5d 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -208,7 +208,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
208 write_lock(&tree->lock); 208 write_lock(&tree->lock);
209 em = lookup_extent_mapping(tree, start, len); 209 em = lookup_extent_mapping(tree, start, len);
210 210
211 WARN_ON(em->start != start || !em); 211 WARN_ON(!em || em->start != start);
212 212
213 if (!em) 213 if (!em)
214 goto out; 214 goto out;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a3492a3ad96b..06550affbd27 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -123,7 +123,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
124 124
125 end_of_last_block = start_pos + num_bytes - 1; 125 end_of_last_block = start_pos + num_bytes - 1;
126 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); 126 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
127 if (err)
128 return err;
129
127 for (i = 0; i < num_pages; i++) { 130 for (i = 0; i < num_pages; i++) {
128 struct page *p = pages[i]; 131 struct page *p = pages[i];
129 SetPageUptodate(p); 132 SetPageUptodate(p);
@@ -875,7 +878,8 @@ again:
875 btrfs_put_ordered_extent(ordered); 878 btrfs_put_ordered_extent(ordered);
876 879
877 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, 880 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
878 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC, 881 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
882 EXTENT_DO_ACCOUNTING,
879 GFP_NOFS); 883 GFP_NOFS);
880 unlock_extent(&BTRFS_I(inode)->io_tree, 884 unlock_extent(&BTRFS_I(inode)->io_tree,
881 start_pos, last_pos - 1, GFP_NOFS); 885 start_pos, last_pos - 1, GFP_NOFS);
@@ -917,21 +921,35 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
917 start_pos = pos; 921 start_pos = pos;
918 922
919 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 923 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
924
925 /* do the reserve before the mutex lock in case we have to do some
926 * flushing. We wouldn't deadlock, but this is more polite.
927 */
928 err = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
929 if (err)
930 goto out_nolock;
931
932 mutex_lock(&inode->i_mutex);
933
920 current->backing_dev_info = inode->i_mapping->backing_dev_info; 934 current->backing_dev_info = inode->i_mapping->backing_dev_info;
921 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 935 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
922 if (err) 936 if (err)
923 goto out_nolock; 937 goto out;
938
924 if (count == 0) 939 if (count == 0)
925 goto out_nolock; 940 goto out;
926 941
927 err = file_remove_suid(file); 942 err = file_remove_suid(file);
928 if (err) 943 if (err)
929 goto out_nolock; 944 goto out;
945
930 file_update_time(file); 946 file_update_time(file);
931 947
932 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 948 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
933 949
934 mutex_lock(&inode->i_mutex); 950 /* generic_write_checks can change our pos */
951 start_pos = pos;
952
935 BTRFS_I(inode)->sequence++; 953 BTRFS_I(inode)->sequence++;
936 first_index = pos >> PAGE_CACHE_SHIFT; 954 first_index = pos >> PAGE_CACHE_SHIFT;
937 last_index = (pos + count) >> PAGE_CACHE_SHIFT; 955 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
@@ -1005,9 +1023,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1005 } 1023 }
1006 1024
1007 if (will_write) { 1025 if (will_write) {
1008 btrfs_fdatawrite_range(inode->i_mapping, pos, 1026 filemap_fdatawrite_range(inode->i_mapping, pos,
1009 pos + write_bytes - 1, 1027 pos + write_bytes - 1);
1010 WB_SYNC_ALL);
1011 } else { 1028 } else {
1012 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1029 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1013 num_pages); 1030 num_pages);
@@ -1028,6 +1045,7 @@ out:
1028 mutex_unlock(&inode->i_mutex); 1045 mutex_unlock(&inode->i_mutex);
1029 if (ret) 1046 if (ret)
1030 err = ret; 1047 err = ret;
1048 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1031 1049
1032out_nolock: 1050out_nolock:
1033 kfree(pages); 1051 kfree(pages);
@@ -1068,8 +1086,10 @@ out_nolock:
1068 btrfs_end_transaction(trans, root); 1086 btrfs_end_transaction(trans, root);
1069 else 1087 else
1070 btrfs_commit_transaction(trans, root); 1088 btrfs_commit_transaction(trans, root);
1071 } else { 1089 } else if (ret != BTRFS_NO_LOG_SYNC) {
1072 btrfs_commit_transaction(trans, root); 1090 btrfs_commit_transaction(trans, root);
1091 } else {
1092 btrfs_end_transaction(trans, root);
1073 } 1093 }
1074 } 1094 }
1075 if (file->f_flags & O_DIRECT) { 1095 if (file->f_flags & O_DIRECT) {
@@ -1119,6 +1139,13 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1119 int ret = 0; 1139 int ret = 0;
1120 struct btrfs_trans_handle *trans; 1140 struct btrfs_trans_handle *trans;
1121 1141
1142
1143 /* we wait first, since the writeback may change the inode */
1144 root->log_batch++;
1145 /* the VFS called filemap_fdatawrite for us */
1146 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1147 root->log_batch++;
1148
1122 /* 1149 /*
1123 * check the transaction that last modified this inode 1150 * check the transaction that last modified this inode
1124 * and see if its already been committed 1151 * and see if its already been committed
@@ -1126,6 +1153,11 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1126 if (!BTRFS_I(inode)->last_trans) 1153 if (!BTRFS_I(inode)->last_trans)
1127 goto out; 1154 goto out;
1128 1155
1156 /*
1157 * if the last transaction that changed this file was before
1158 * the current transaction, we can bail out now without any
1159 * syncing
1160 */
1129 mutex_lock(&root->fs_info->trans_mutex); 1161 mutex_lock(&root->fs_info->trans_mutex);
1130 if (BTRFS_I(inode)->last_trans <= 1162 if (BTRFS_I(inode)->last_trans <=
1131 root->fs_info->last_trans_committed) { 1163 root->fs_info->last_trans_committed) {
@@ -1135,13 +1167,6 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1135 } 1167 }
1136 mutex_unlock(&root->fs_info->trans_mutex); 1168 mutex_unlock(&root->fs_info->trans_mutex);
1137 1169
1138 root->log_batch++;
1139 filemap_fdatawrite(inode->i_mapping);
1140 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1141 root->log_batch++;
1142
1143 if (datasync && !(inode->i_state & I_DIRTY_PAGES))
1144 goto out;
1145 /* 1170 /*
1146 * ok we haven't committed the transaction yet, lets do a commit 1171 * ok we haven't committed the transaction yet, lets do a commit
1147 */ 1172 */
@@ -1170,14 +1195,18 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1170 */ 1195 */
1171 mutex_unlock(&dentry->d_inode->i_mutex); 1196 mutex_unlock(&dentry->d_inode->i_mutex);
1172 1197
1173 if (ret > 0) { 1198 if (ret != BTRFS_NO_LOG_SYNC) {
1174 ret = btrfs_commit_transaction(trans, root); 1199 if (ret > 0) {
1175 } else {
1176 ret = btrfs_sync_log(trans, root);
1177 if (ret == 0)
1178 ret = btrfs_end_transaction(trans, root);
1179 else
1180 ret = btrfs_commit_transaction(trans, root); 1200 ret = btrfs_commit_transaction(trans, root);
1201 } else {
1202 ret = btrfs_sync_log(trans, root);
1203 if (ret == 0)
1204 ret = btrfs_end_transaction(trans, root);
1205 else
1206 ret = btrfs_commit_transaction(trans, root);
1207 }
1208 } else {
1209 ret = btrfs_end_transaction(trans, root);
1181 } 1210 }
1182 mutex_lock(&dentry->d_inode->i_mutex); 1211 mutex_lock(&dentry->d_inode->i_mutex);
1183out: 1212out:
@@ -1196,7 +1225,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1196 return 0; 1225 return 0;
1197} 1226}
1198 1227
1199struct file_operations btrfs_file_operations = { 1228const struct file_operations btrfs_file_operations = {
1200 .llseek = generic_file_llseek, 1229 .llseek = generic_file_llseek,
1201 .read = do_sync_read, 1230 .read = do_sync_read,
1202 .aio_read = generic_file_aio_read, 1231 .aio_read = generic_file_aio_read,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5c2caad76212..cb2849f03251 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1296,7 +1296,7 @@ again:
1296 window_start = entry->offset; 1296 window_start = entry->offset;
1297 window_free = entry->bytes; 1297 window_free = entry->bytes;
1298 last = entry; 1298 last = entry;
1299 max_extent = 0; 1299 max_extent = entry->bytes;
1300 } else { 1300 } else {
1301 last = next; 1301 last = next;
1302 window_free += next->bytes; 1302 window_free += next->bytes;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e9b76bcd1c12..b3ad168a0bfc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -62,7 +62,7 @@ static const struct inode_operations btrfs_special_inode_operations;
62static const struct inode_operations btrfs_file_inode_operations; 62static const struct inode_operations btrfs_file_inode_operations;
63static const struct address_space_operations btrfs_aops; 63static const struct address_space_operations btrfs_aops;
64static const struct address_space_operations btrfs_symlink_aops; 64static const struct address_space_operations btrfs_symlink_aops;
65static struct file_operations btrfs_dir_file_operations; 65static const struct file_operations btrfs_dir_file_operations;
66static struct extent_io_ops btrfs_extent_io_ops; 66static struct extent_io_ops btrfs_extent_io_ops;
67 67
68static struct kmem_cache *btrfs_inode_cachep; 68static struct kmem_cache *btrfs_inode_cachep;
@@ -424,9 +424,12 @@ again:
424 * and free up our temp pages. 424 * and free up our temp pages.
425 */ 425 */
426 extent_clear_unlock_delalloc(inode, 426 extent_clear_unlock_delalloc(inode,
427 &BTRFS_I(inode)->io_tree, 427 &BTRFS_I(inode)->io_tree,
428 start, end, NULL, 1, 0, 428 start, end, NULL,
429 0, 1, 1, 1, 0); 429 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
430 EXTENT_CLEAR_DELALLOC |
431 EXTENT_CLEAR_ACCOUNTING |
432 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
430 ret = 0; 433 ret = 0;
431 goto free_pages_out; 434 goto free_pages_out;
432 } 435 }
@@ -535,7 +538,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
535 struct btrfs_root *root = BTRFS_I(inode)->root; 538 struct btrfs_root *root = BTRFS_I(inode)->root;
536 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
537 struct extent_io_tree *io_tree; 540 struct extent_io_tree *io_tree;
538 int ret; 541 int ret = 0;
539 542
540 if (list_empty(&async_cow->extents)) 543 if (list_empty(&async_cow->extents))
541 return 0; 544 return 0;
@@ -549,6 +552,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
549 552
550 io_tree = &BTRFS_I(inode)->io_tree; 553 io_tree = &BTRFS_I(inode)->io_tree;
551 554
555retry:
552 /* did the compression code fall back to uncompressed IO? */ 556 /* did the compression code fall back to uncompressed IO? */
553 if (!async_extent->pages) { 557 if (!async_extent->pages) {
554 int page_started = 0; 558 int page_started = 0;
@@ -559,11 +563,11 @@ static noinline int submit_compressed_extents(struct inode *inode,
559 async_extent->ram_size - 1, GFP_NOFS); 563 async_extent->ram_size - 1, GFP_NOFS);
560 564
561 /* allocate blocks */ 565 /* allocate blocks */
562 cow_file_range(inode, async_cow->locked_page, 566 ret = cow_file_range(inode, async_cow->locked_page,
563 async_extent->start, 567 async_extent->start,
564 async_extent->start + 568 async_extent->start +
565 async_extent->ram_size - 1, 569 async_extent->ram_size - 1,
566 &page_started, &nr_written, 0); 570 &page_started, &nr_written, 0);
567 571
568 /* 572 /*
569 * if page_started, cow_file_range inserted an 573 * if page_started, cow_file_range inserted an
@@ -571,7 +575,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
571 * and IO for us. Otherwise, we need to submit 575 * and IO for us. Otherwise, we need to submit
572 * all those pages down to the drive. 576 * all those pages down to the drive.
573 */ 577 */
574 if (!page_started) 578 if (!page_started && !ret)
575 extent_write_locked_range(io_tree, 579 extent_write_locked_range(io_tree,
576 inode, async_extent->start, 580 inode, async_extent->start,
577 async_extent->start + 581 async_extent->start +
@@ -599,7 +603,21 @@ static noinline int submit_compressed_extents(struct inode *inode,
599 async_extent->compressed_size, 603 async_extent->compressed_size,
600 0, alloc_hint, 604 0, alloc_hint,
601 (u64)-1, &ins, 1); 605 (u64)-1, &ins, 1);
602 BUG_ON(ret); 606 if (ret) {
607 int i;
608 for (i = 0; i < async_extent->nr_pages; i++) {
609 WARN_ON(async_extent->pages[i]->mapping);
610 page_cache_release(async_extent->pages[i]);
611 }
612 kfree(async_extent->pages);
613 async_extent->nr_pages = 0;
614 async_extent->pages = NULL;
615 unlock_extent(io_tree, async_extent->start,
616 async_extent->start +
617 async_extent->ram_size - 1, GFP_NOFS);
618 goto retry;
619 }
620
603 em = alloc_extent_map(GFP_NOFS); 621 em = alloc_extent_map(GFP_NOFS);
604 em->start = async_extent->start; 622 em->start = async_extent->start;
605 em->len = async_extent->ram_size; 623 em->len = async_extent->ram_size;
@@ -637,11 +655,14 @@ static noinline int submit_compressed_extents(struct inode *inode,
637 * clear dirty, set writeback and unlock the pages. 655 * clear dirty, set writeback and unlock the pages.
638 */ 656 */
639 extent_clear_unlock_delalloc(inode, 657 extent_clear_unlock_delalloc(inode,
640 &BTRFS_I(inode)->io_tree, 658 &BTRFS_I(inode)->io_tree,
641 async_extent->start, 659 async_extent->start,
642 async_extent->start + 660 async_extent->start +
643 async_extent->ram_size - 1, 661 async_extent->ram_size - 1,
644 NULL, 1, 1, 0, 1, 1, 0, 0); 662 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
663 EXTENT_CLEAR_UNLOCK |
664 EXTENT_CLEAR_DELALLOC |
665 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
645 666
646 ret = btrfs_submit_compressed_write(inode, 667 ret = btrfs_submit_compressed_write(inode,
647 async_extent->start, 668 async_extent->start,
@@ -712,9 +733,15 @@ static noinline int cow_file_range(struct inode *inode,
712 start, end, 0, NULL); 733 start, end, 0, NULL);
713 if (ret == 0) { 734 if (ret == 0) {
714 extent_clear_unlock_delalloc(inode, 735 extent_clear_unlock_delalloc(inode,
715 &BTRFS_I(inode)->io_tree, 736 &BTRFS_I(inode)->io_tree,
716 start, end, NULL, 1, 1, 737 start, end, NULL,
717 1, 1, 1, 1, 0); 738 EXTENT_CLEAR_UNLOCK_PAGE |
739 EXTENT_CLEAR_UNLOCK |
740 EXTENT_CLEAR_DELALLOC |
741 EXTENT_CLEAR_ACCOUNTING |
742 EXTENT_CLEAR_DIRTY |
743 EXTENT_SET_WRITEBACK |
744 EXTENT_END_WRITEBACK);
718 *nr_written = *nr_written + 745 *nr_written = *nr_written +
719 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 746 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
720 *page_started = 1; 747 *page_started = 1;
@@ -731,13 +758,29 @@ static noinline int cow_file_range(struct inode *inode,
731 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree, 758 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
732 start, num_bytes); 759 start, num_bytes);
733 if (em) { 760 if (em) {
734 alloc_hint = em->block_start; 761 /*
735 free_extent_map(em); 762 * if block start isn't an actual block number then find the
763 * first block in this inode and use that as a hint. If that
764 * block is also bogus then just don't worry about it.
765 */
766 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
767 free_extent_map(em);
768 em = search_extent_mapping(em_tree, 0, 0);
769 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
770 alloc_hint = em->block_start;
771 if (em)
772 free_extent_map(em);
773 } else {
774 alloc_hint = em->block_start;
775 free_extent_map(em);
776 }
736 } 777 }
737 read_unlock(&BTRFS_I(inode)->extent_tree.lock); 778 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
738 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 779 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
739 780
740 while (disk_num_bytes > 0) { 781 while (disk_num_bytes > 0) {
782 unsigned long op;
783
741 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); 784 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
742 ret = btrfs_reserve_extent(trans, root, cur_alloc_size, 785 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
743 root->sectorsize, 0, alloc_hint, 786 root->sectorsize, 0, alloc_hint,
@@ -789,10 +832,13 @@ static noinline int cow_file_range(struct inode *inode,
789 * Do set the Private2 bit so we know this page was properly 832 * Do set the Private2 bit so we know this page was properly
790 * setup for writepage 833 * setup for writepage
791 */ 834 */
835 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
836 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
837 EXTENT_SET_PRIVATE2;
838
792 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 839 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
793 start, start + ram_size - 1, 840 start, start + ram_size - 1,
794 locked_page, unlock, 1, 841 locked_page, op);
795 1, 0, 0, 0, 1);
796 disk_num_bytes -= cur_alloc_size; 842 disk_num_bytes -= cur_alloc_size;
797 num_bytes -= cur_alloc_size; 843 num_bytes -= cur_alloc_size;
798 alloc_hint = ins.objectid + ins.offset; 844 alloc_hint = ins.objectid + ins.offset;
@@ -864,8 +910,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
864 u64 cur_end; 910 u64 cur_end;
865 int limit = 10 * 1024 * 1042; 911 int limit = 10 * 1024 * 1042;
866 912
867 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | 913 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
868 EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS); 914 1, 0, NULL, GFP_NOFS);
869 while (start < end) { 915 while (start < end) {
870 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 916 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
871 async_cow->inode = inode; 917 async_cow->inode = inode;
@@ -1006,6 +1052,7 @@ next_slot:
1006 1052
1007 if (found_key.offset > cur_offset) { 1053 if (found_key.offset > cur_offset) {
1008 extent_end = found_key.offset; 1054 extent_end = found_key.offset;
1055 extent_type = 0;
1009 goto out_check; 1056 goto out_check;
1010 } 1057 }
1011 1058
@@ -1112,8 +1159,10 @@ out_check:
1112 BUG_ON(ret); 1159 BUG_ON(ret);
1113 1160
1114 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1161 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1115 cur_offset, cur_offset + num_bytes - 1, 1162 cur_offset, cur_offset + num_bytes - 1,
1116 locked_page, 1, 1, 1, 0, 0, 0, 1); 1163 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1164 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1165 EXTENT_SET_PRIVATE2);
1117 cur_offset = extent_end; 1166 cur_offset = extent_end;
1118 if (cur_offset > end) 1167 if (cur_offset > end)
1119 break; 1168 break;
@@ -1159,6 +1208,89 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1159 return ret; 1208 return ret;
1160} 1209}
1161 1210
1211static int btrfs_split_extent_hook(struct inode *inode,
1212 struct extent_state *orig, u64 split)
1213{
1214 struct btrfs_root *root = BTRFS_I(inode)->root;
1215 u64 size;
1216
1217 if (!(orig->state & EXTENT_DELALLOC))
1218 return 0;
1219
1220 size = orig->end - orig->start + 1;
1221 if (size > root->fs_info->max_extent) {
1222 u64 num_extents;
1223 u64 new_size;
1224
1225 new_size = orig->end - split + 1;
1226 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1227 root->fs_info->max_extent);
1228
1229 /*
1230 * if we break a large extent up then leave oustanding_extents
1231 * be, since we've already accounted for the large extent.
1232 */
1233 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1234 root->fs_info->max_extent) < num_extents)
1235 return 0;
1236 }
1237
1238 spin_lock(&BTRFS_I(inode)->accounting_lock);
1239 BTRFS_I(inode)->outstanding_extents++;
1240 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1241
1242 return 0;
1243}
1244
1245/*
1246 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1247 * extents so we can keep track of new extents that are just merged onto old
1248 * extents, such as when we are doing sequential writes, so we can properly
1249 * account for the metadata space we'll need.
1250 */
1251static int btrfs_merge_extent_hook(struct inode *inode,
1252 struct extent_state *new,
1253 struct extent_state *other)
1254{
1255 struct btrfs_root *root = BTRFS_I(inode)->root;
1256 u64 new_size, old_size;
1257 u64 num_extents;
1258
1259 /* not delalloc, ignore it */
1260 if (!(other->state & EXTENT_DELALLOC))
1261 return 0;
1262
1263 old_size = other->end - other->start + 1;
1264 if (new->start < other->start)
1265 new_size = other->end - new->start + 1;
1266 else
1267 new_size = new->end - other->start + 1;
1268
1269 /* we're not bigger than the max, unreserve the space and go */
1270 if (new_size <= root->fs_info->max_extent) {
1271 spin_lock(&BTRFS_I(inode)->accounting_lock);
1272 BTRFS_I(inode)->outstanding_extents--;
1273 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1274 return 0;
1275 }
1276
1277 /*
1278 * If we grew by another max_extent, just return, we want to keep that
1279 * reserved amount.
1280 */
1281 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1282 root->fs_info->max_extent);
1283 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1284 root->fs_info->max_extent) > num_extents)
1285 return 0;
1286
1287 spin_lock(&BTRFS_I(inode)->accounting_lock);
1288 BTRFS_I(inode)->outstanding_extents--;
1289 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1290
1291 return 0;
1292}
1293
1162/* 1294/*
1163 * extent_io.c set_bit_hook, used to track delayed allocation 1295 * extent_io.c set_bit_hook, used to track delayed allocation
1164 * bytes in this file, and to maintain the list of inodes that 1296 * bytes in this file, and to maintain the list of inodes that
@@ -1167,6 +1299,7 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1167static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, 1299static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1168 unsigned long old, unsigned long bits) 1300 unsigned long old, unsigned long bits)
1169{ 1301{
1302
1170 /* 1303 /*
1171 * set_bit and clear bit hooks normally require _irqsave/restore 1304 * set_bit and clear bit hooks normally require _irqsave/restore
1172 * but in this case, we are only testeing for the DELALLOC 1305 * but in this case, we are only testeing for the DELALLOC
@@ -1174,6 +1307,10 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1174 */ 1307 */
1175 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1308 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1176 struct btrfs_root *root = BTRFS_I(inode)->root; 1309 struct btrfs_root *root = BTRFS_I(inode)->root;
1310
1311 spin_lock(&BTRFS_I(inode)->accounting_lock);
1312 BTRFS_I(inode)->outstanding_extents++;
1313 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1177 btrfs_delalloc_reserve_space(root, inode, end - start + 1); 1314 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1178 spin_lock(&root->fs_info->delalloc_lock); 1315 spin_lock(&root->fs_info->delalloc_lock);
1179 BTRFS_I(inode)->delalloc_bytes += end - start + 1; 1316 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
@@ -1190,22 +1327,31 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1190/* 1327/*
1191 * extent_io.c clear_bit_hook, see set_bit_hook for why 1328 * extent_io.c clear_bit_hook, see set_bit_hook for why
1192 */ 1329 */
1193static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, 1330static int btrfs_clear_bit_hook(struct inode *inode,
1194 unsigned long old, unsigned long bits) 1331 struct extent_state *state, unsigned long bits)
1195{ 1332{
1196 /* 1333 /*
1197 * set_bit and clear bit hooks normally require _irqsave/restore 1334 * set_bit and clear bit hooks normally require _irqsave/restore
1198 * but in this case, we are only testeing for the DELALLOC 1335 * but in this case, we are only testeing for the DELALLOC
1199 * bit, which is only set or cleared with irqs on 1336 * bit, which is only set or cleared with irqs on
1200 */ 1337 */
1201 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1338 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1202 struct btrfs_root *root = BTRFS_I(inode)->root; 1339 struct btrfs_root *root = BTRFS_I(inode)->root;
1203 1340
1341 if (bits & EXTENT_DO_ACCOUNTING) {
1342 spin_lock(&BTRFS_I(inode)->accounting_lock);
1343 BTRFS_I(inode)->outstanding_extents--;
1344 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1345 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1346 }
1347
1204 spin_lock(&root->fs_info->delalloc_lock); 1348 spin_lock(&root->fs_info->delalloc_lock);
1205 if (end - start + 1 > root->fs_info->delalloc_bytes) { 1349 if (state->end - state->start + 1 >
1350 root->fs_info->delalloc_bytes) {
1206 printk(KERN_INFO "btrfs warning: delalloc account " 1351 printk(KERN_INFO "btrfs warning: delalloc account "
1207 "%llu %llu\n", 1352 "%llu %llu\n",
1208 (unsigned long long)end - start + 1, 1353 (unsigned long long)
1354 state->end - state->start + 1,
1209 (unsigned long long) 1355 (unsigned long long)
1210 root->fs_info->delalloc_bytes); 1356 root->fs_info->delalloc_bytes);
1211 btrfs_delalloc_free_space(root, inode, (u64)-1); 1357 btrfs_delalloc_free_space(root, inode, (u64)-1);
@@ -1213,9 +1359,12 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1213 BTRFS_I(inode)->delalloc_bytes = 0; 1359 BTRFS_I(inode)->delalloc_bytes = 0;
1214 } else { 1360 } else {
1215 btrfs_delalloc_free_space(root, inode, 1361 btrfs_delalloc_free_space(root, inode,
1216 end - start + 1); 1362 state->end -
1217 root->fs_info->delalloc_bytes -= end - start + 1; 1363 state->start + 1);
1218 BTRFS_I(inode)->delalloc_bytes -= end - start + 1; 1364 root->fs_info->delalloc_bytes -= state->end -
1365 state->start + 1;
1366 BTRFS_I(inode)->delalloc_bytes -= state->end -
1367 state->start + 1;
1219 } 1368 }
1220 if (BTRFS_I(inode)->delalloc_bytes == 0 && 1369 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1221 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1370 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
@@ -2354,7 +2503,19 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2354 2503
2355 root = BTRFS_I(dir)->root; 2504 root = BTRFS_I(dir)->root;
2356 2505
2506 /*
2507 * 5 items for unlink inode
2508 * 1 for orphan
2509 */
2510 ret = btrfs_reserve_metadata_space(root, 6);
2511 if (ret)
2512 return ret;
2513
2357 trans = btrfs_start_transaction(root, 1); 2514 trans = btrfs_start_transaction(root, 1);
2515 if (IS_ERR(trans)) {
2516 btrfs_unreserve_metadata_space(root, 6);
2517 return PTR_ERR(trans);
2518 }
2358 2519
2359 btrfs_set_trans_block_group(trans, dir); 2520 btrfs_set_trans_block_group(trans, dir);
2360 2521
@@ -2369,6 +2530,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2369 nr = trans->blocks_used; 2530 nr = trans->blocks_used;
2370 2531
2371 btrfs_end_transaction_throttle(trans, root); 2532 btrfs_end_transaction_throttle(trans, root);
2533 btrfs_unreserve_metadata_space(root, 6);
2372 btrfs_btree_balance_dirty(root, nr); 2534 btrfs_btree_balance_dirty(root, nr);
2373 return ret; 2535 return ret;
2374} 2536}
@@ -2449,7 +2611,16 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2449 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 2611 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2450 return -ENOTEMPTY; 2612 return -ENOTEMPTY;
2451 2613
2614 ret = btrfs_reserve_metadata_space(root, 5);
2615 if (ret)
2616 return ret;
2617
2452 trans = btrfs_start_transaction(root, 1); 2618 trans = btrfs_start_transaction(root, 1);
2619 if (IS_ERR(trans)) {
2620 btrfs_unreserve_metadata_space(root, 5);
2621 return PTR_ERR(trans);
2622 }
2623
2453 btrfs_set_trans_block_group(trans, dir); 2624 btrfs_set_trans_block_group(trans, dir);
2454 2625
2455 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 2626 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -2472,6 +2643,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2472out: 2643out:
2473 nr = trans->blocks_used; 2644 nr = trans->blocks_used;
2474 ret = btrfs_end_transaction_throttle(trans, root); 2645 ret = btrfs_end_transaction_throttle(trans, root);
2646 btrfs_unreserve_metadata_space(root, 5);
2475 btrfs_btree_balance_dirty(root, nr); 2647 btrfs_btree_balance_dirty(root, nr);
2476 2648
2477 if (ret && !err) 2649 if (ret && !err)
@@ -2912,12 +3084,22 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2912 3084
2913 if ((offset & (blocksize - 1)) == 0) 3085 if ((offset & (blocksize - 1)) == 0)
2914 goto out; 3086 goto out;
3087 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3088 if (ret)
3089 goto out;
3090
3091 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3092 if (ret)
3093 goto out;
2915 3094
2916 ret = -ENOMEM; 3095 ret = -ENOMEM;
2917again: 3096again:
2918 page = grab_cache_page(mapping, index); 3097 page = grab_cache_page(mapping, index);
2919 if (!page) 3098 if (!page) {
3099 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3100 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
2920 goto out; 3101 goto out;
3102 }
2921 3103
2922 page_start = page_offset(page); 3104 page_start = page_offset(page);
2923 page_end = page_start + PAGE_CACHE_SIZE - 1; 3105 page_end = page_start + PAGE_CACHE_SIZE - 1;
@@ -2950,7 +3132,16 @@ again:
2950 goto again; 3132 goto again;
2951 } 3133 }
2952 3134
2953 btrfs_set_extent_delalloc(inode, page_start, page_end); 3135 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
3136 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3137 GFP_NOFS);
3138
3139 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3140 if (ret) {
3141 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3142 goto out_unlock;
3143 }
3144
2954 ret = 0; 3145 ret = 0;
2955 if (offset != PAGE_CACHE_SIZE) { 3146 if (offset != PAGE_CACHE_SIZE) {
2956 kaddr = kmap(page); 3147 kaddr = kmap(page);
@@ -2963,6 +3154,9 @@ again:
2963 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3154 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2964 3155
2965out_unlock: 3156out_unlock:
3157 if (ret)
3158 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3159 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
2966 unlock_page(page); 3160 unlock_page(page);
2967 page_cache_release(page); 3161 page_cache_release(page);
2968out: 3162out:
@@ -2981,17 +3175,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
2981 u64 last_byte; 3175 u64 last_byte;
2982 u64 cur_offset; 3176 u64 cur_offset;
2983 u64 hole_size; 3177 u64 hole_size;
2984 int err; 3178 int err = 0;
2985 3179
2986 if (size <= hole_start) 3180 if (size <= hole_start)
2987 return 0; 3181 return 0;
2988 3182
2989 err = btrfs_check_metadata_free_space(root); 3183 err = btrfs_truncate_page(inode->i_mapping, inode->i_size);
2990 if (err) 3184 if (err)
2991 return err; 3185 return err;
2992 3186
2993 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2994
2995 while (1) { 3187 while (1) {
2996 struct btrfs_ordered_extent *ordered; 3188 struct btrfs_ordered_extent *ordered;
2997 btrfs_wait_ordered_range(inode, hole_start, 3189 btrfs_wait_ordered_range(inode, hole_start,
@@ -3024,12 +3216,18 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3024 cur_offset, &hint_byte, 1); 3216 cur_offset, &hint_byte, 1);
3025 if (err) 3217 if (err)
3026 break; 3218 break;
3219
3220 err = btrfs_reserve_metadata_space(root, 1);
3221 if (err)
3222 break;
3223
3027 err = btrfs_insert_file_extent(trans, root, 3224 err = btrfs_insert_file_extent(trans, root,
3028 inode->i_ino, cur_offset, 0, 3225 inode->i_ino, cur_offset, 0,
3029 0, hole_size, 0, hole_size, 3226 0, hole_size, 0, hole_size,
3030 0, 0, 0); 3227 0, 0, 0);
3031 btrfs_drop_extent_cache(inode, hole_start, 3228 btrfs_drop_extent_cache(inode, hole_start,
3032 last_byte - 1, 0); 3229 last_byte - 1, 0);
3230 btrfs_unreserve_metadata_space(root, 1);
3033 } 3231 }
3034 free_extent_map(em); 3232 free_extent_map(em);
3035 cur_offset = last_byte; 3233 cur_offset = last_byte;
@@ -3353,6 +3551,7 @@ static noinline void init_btrfs_i(struct inode *inode)
3353 bi->generation = 0; 3551 bi->generation = 0;
3354 bi->sequence = 0; 3552 bi->sequence = 0;
3355 bi->last_trans = 0; 3553 bi->last_trans = 0;
3554 bi->last_sub_trans = 0;
3356 bi->logged_trans = 0; 3555 bi->logged_trans = 0;
3357 bi->delalloc_bytes = 0; 3556 bi->delalloc_bytes = 0;
3358 bi->reserved_bytes = 0; 3557 bi->reserved_bytes = 0;
@@ -3503,12 +3702,14 @@ static int btrfs_dentry_delete(struct dentry *dentry)
3503{ 3702{
3504 struct btrfs_root *root; 3703 struct btrfs_root *root;
3505 3704
3506 if (!dentry->d_inode) 3705 if (!dentry->d_inode && !IS_ROOT(dentry))
3507 return 0; 3706 dentry = dentry->d_parent;
3508 3707
3509 root = BTRFS_I(dentry->d_inode)->root; 3708 if (dentry->d_inode) {
3510 if (btrfs_root_refs(&root->root_item) == 0) 3709 root = BTRFS_I(dentry->d_inode)->root;
3511 return 1; 3710 if (btrfs_root_refs(&root->root_item) == 0)
3711 return 1;
3712 }
3512 return 0; 3713 return 0;
3513} 3714}
3514 3715
@@ -3990,11 +4191,18 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3990 if (!new_valid_dev(rdev)) 4191 if (!new_valid_dev(rdev))
3991 return -EINVAL; 4192 return -EINVAL;
3992 4193
3993 err = btrfs_check_metadata_free_space(root); 4194 /*
4195 * 2 for inode item and ref
4196 * 2 for dir items
4197 * 1 for xattr if selinux is on
4198 */
4199 err = btrfs_reserve_metadata_space(root, 5);
3994 if (err) 4200 if (err)
3995 goto fail; 4201 return err;
3996 4202
3997 trans = btrfs_start_transaction(root, 1); 4203 trans = btrfs_start_transaction(root, 1);
4204 if (!trans)
4205 goto fail;
3998 btrfs_set_trans_block_group(trans, dir); 4206 btrfs_set_trans_block_group(trans, dir);
3999 4207
4000 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4208 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -4032,6 +4240,7 @@ out_unlock:
4032 nr = trans->blocks_used; 4240 nr = trans->blocks_used;
4033 btrfs_end_transaction_throttle(trans, root); 4241 btrfs_end_transaction_throttle(trans, root);
4034fail: 4242fail:
4243 btrfs_unreserve_metadata_space(root, 5);
4035 if (drop_inode) { 4244 if (drop_inode) {
4036 inode_dec_link_count(inode); 4245 inode_dec_link_count(inode);
4037 iput(inode); 4246 iput(inode);
@@ -4052,10 +4261,18 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4052 u64 objectid; 4261 u64 objectid;
4053 u64 index = 0; 4262 u64 index = 0;
4054 4263
4055 err = btrfs_check_metadata_free_space(root); 4264 /*
4265 * 2 for inode item and ref
4266 * 2 for dir items
4267 * 1 for xattr if selinux is on
4268 */
4269 err = btrfs_reserve_metadata_space(root, 5);
4056 if (err) 4270 if (err)
4057 goto fail; 4271 return err;
4272
4058 trans = btrfs_start_transaction(root, 1); 4273 trans = btrfs_start_transaction(root, 1);
4274 if (!trans)
4275 goto fail;
4059 btrfs_set_trans_block_group(trans, dir); 4276 btrfs_set_trans_block_group(trans, dir);
4060 4277
4061 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4278 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -4096,6 +4313,7 @@ out_unlock:
4096 nr = trans->blocks_used; 4313 nr = trans->blocks_used;
4097 btrfs_end_transaction_throttle(trans, root); 4314 btrfs_end_transaction_throttle(trans, root);
4098fail: 4315fail:
4316 btrfs_unreserve_metadata_space(root, 5);
4099 if (drop_inode) { 4317 if (drop_inode) {
4100 inode_dec_link_count(inode); 4318 inode_dec_link_count(inode);
4101 iput(inode); 4319 iput(inode);
@@ -4118,10 +4336,16 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4118 if (inode->i_nlink == 0) 4336 if (inode->i_nlink == 0)
4119 return -ENOENT; 4337 return -ENOENT;
4120 4338
4121 btrfs_inc_nlink(inode); 4339 /*
4122 err = btrfs_check_metadata_free_space(root); 4340 * 1 item for inode ref
4341 * 2 items for dir items
4342 */
4343 err = btrfs_reserve_metadata_space(root, 3);
4123 if (err) 4344 if (err)
4124 goto fail; 4345 return err;
4346
4347 btrfs_inc_nlink(inode);
4348
4125 err = btrfs_set_inode_index(dir, &index); 4349 err = btrfs_set_inode_index(dir, &index);
4126 if (err) 4350 if (err)
4127 goto fail; 4351 goto fail;
@@ -4145,6 +4369,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4145 nr = trans->blocks_used; 4369 nr = trans->blocks_used;
4146 btrfs_end_transaction_throttle(trans, root); 4370 btrfs_end_transaction_throttle(trans, root);
4147fail: 4371fail:
4372 btrfs_unreserve_metadata_space(root, 3);
4148 if (drop_inode) { 4373 if (drop_inode) {
4149 inode_dec_link_count(inode); 4374 inode_dec_link_count(inode);
4150 iput(inode); 4375 iput(inode);
@@ -4164,17 +4389,21 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4164 u64 index = 0; 4389 u64 index = 0;
4165 unsigned long nr = 1; 4390 unsigned long nr = 1;
4166 4391
4167 err = btrfs_check_metadata_free_space(root); 4392 /*
4393 * 2 items for inode and ref
4394 * 2 items for dir items
4395 * 1 for xattr if selinux is on
4396 */
4397 err = btrfs_reserve_metadata_space(root, 5);
4168 if (err) 4398 if (err)
4169 goto out_unlock; 4399 return err;
4170 4400
4171 trans = btrfs_start_transaction(root, 1); 4401 trans = btrfs_start_transaction(root, 1);
4172 btrfs_set_trans_block_group(trans, dir); 4402 if (!trans) {
4173 4403 err = -ENOMEM;
4174 if (IS_ERR(trans)) {
4175 err = PTR_ERR(trans);
4176 goto out_unlock; 4404 goto out_unlock;
4177 } 4405 }
4406 btrfs_set_trans_block_group(trans, dir);
4178 4407
4179 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4408 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4180 if (err) { 4409 if (err) {
@@ -4223,6 +4452,7 @@ out_fail:
4223 btrfs_end_transaction_throttle(trans, root); 4452 btrfs_end_transaction_throttle(trans, root);
4224 4453
4225out_unlock: 4454out_unlock:
4455 btrfs_unreserve_metadata_space(root, 5);
4226 if (drop_on_err) 4456 if (drop_on_err)
4227 iput(inode); 4457 iput(inode);
4228 btrfs_btree_balance_dirty(root, nr); 4458 btrfs_btree_balance_dirty(root, nr);
@@ -4684,7 +4914,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4684 */ 4914 */
4685 clear_extent_bit(tree, page_start, page_end, 4915 clear_extent_bit(tree, page_start, page_end,
4686 EXTENT_DIRTY | EXTENT_DELALLOC | 4916 EXTENT_DIRTY | EXTENT_DELALLOC |
4687 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); 4917 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
4918 NULL, GFP_NOFS);
4688 /* 4919 /*
4689 * whoever cleared the private bit is responsible 4920 * whoever cleared the private bit is responsible
4690 * for the finish_ordered_io 4921 * for the finish_ordered_io
@@ -4697,8 +4928,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4697 lock_extent(tree, page_start, page_end, GFP_NOFS); 4928 lock_extent(tree, page_start, page_end, GFP_NOFS);
4698 } 4929 }
4699 clear_extent_bit(tree, page_start, page_end, 4930 clear_extent_bit(tree, page_start, page_end,
4700 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, 4931 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4701 1, 1, NULL, GFP_NOFS); 4932 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
4702 __btrfs_releasepage(page, GFP_NOFS); 4933 __btrfs_releasepage(page, GFP_NOFS);
4703 4934
4704 ClearPageChecked(page); 4935 ClearPageChecked(page);
@@ -4747,6 +4978,13 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4747 goto out; 4978 goto out;
4748 } 4979 }
4749 4980
4981 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
4982 if (ret) {
4983 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4984 ret = VM_FAULT_SIGBUS;
4985 goto out;
4986 }
4987
4750 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 4988 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4751again: 4989again:
4752 lock_page(page); 4990 lock_page(page);
@@ -4778,7 +5016,24 @@ again:
4778 goto again; 5016 goto again;
4779 } 5017 }
4780 5018
4781 btrfs_set_extent_delalloc(inode, page_start, page_end); 5019 /*
5020 * XXX - page_mkwrite gets called every time the page is dirtied, even
5021 * if it was already dirty, so for space accounting reasons we need to
5022 * clear any delalloc bits for the range we are fixing to save. There
5023 * is probably a better way to do this, but for now keep consistent with
5024 * prepare_pages in the normal write path.
5025 */
5026 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
5027 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5028 GFP_NOFS);
5029
5030 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
5031 if (ret) {
5032 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5033 ret = VM_FAULT_SIGBUS;
5034 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5035 goto out_unlock;
5036 }
4782 ret = 0; 5037 ret = 0;
4783 5038
4784 /* page is wholly or partially inside EOF */ 5039 /* page is wholly or partially inside EOF */
@@ -4797,10 +5052,13 @@ again:
4797 set_page_dirty(page); 5052 set_page_dirty(page);
4798 SetPageUptodate(page); 5053 SetPageUptodate(page);
4799 5054
4800 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 5055 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5056 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5057
4801 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5058 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4802 5059
4803out_unlock: 5060out_unlock:
5061 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
4804 if (!ret) 5062 if (!ret)
4805 return VM_FAULT_LOCKED; 5063 return VM_FAULT_LOCKED;
4806 unlock_page(page); 5064 unlock_page(page);
@@ -4821,7 +5079,9 @@ static void btrfs_truncate(struct inode *inode)
4821 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 5079 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4822 return; 5080 return;
4823 5081
4824 btrfs_truncate_page(inode->i_mapping, inode->i_size); 5082 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5083 if (ret)
5084 return;
4825 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 5085 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4826 5086
4827 trans = btrfs_start_transaction(root, 1); 5087 trans = btrfs_start_transaction(root, 1);
@@ -4916,7 +5176,12 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
4916 if (!ei) 5176 if (!ei)
4917 return NULL; 5177 return NULL;
4918 ei->last_trans = 0; 5178 ei->last_trans = 0;
5179 ei->last_sub_trans = 0;
4919 ei->logged_trans = 0; 5180 ei->logged_trans = 0;
5181 ei->outstanding_extents = 0;
5182 ei->reserved_extents = 0;
5183 ei->root = NULL;
5184 spin_lock_init(&ei->accounting_lock);
4920 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 5185 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4921 INIT_LIST_HEAD(&ei->i_orphan); 5186 INIT_LIST_HEAD(&ei->i_orphan);
4922 INIT_LIST_HEAD(&ei->ordered_operations); 5187 INIT_LIST_HEAD(&ei->ordered_operations);
@@ -4932,6 +5197,14 @@ void btrfs_destroy_inode(struct inode *inode)
4932 WARN_ON(inode->i_data.nrpages); 5197 WARN_ON(inode->i_data.nrpages);
4933 5198
4934 /* 5199 /*
5200 * This can happen where we create an inode, but somebody else also
5201 * created the same inode and we need to destroy the one we already
5202 * created.
5203 */
5204 if (!root)
5205 goto free;
5206
5207 /*
4935 * Make sure we're properly removed from the ordered operation 5208 * Make sure we're properly removed from the ordered operation
4936 * lists. 5209 * lists.
4937 */ 5210 */
@@ -4966,6 +5239,7 @@ void btrfs_destroy_inode(struct inode *inode)
4966 } 5239 }
4967 inode_tree_del(inode); 5240 inode_tree_del(inode);
4968 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 5241 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5242free:
4969 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 5243 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4970} 5244}
4971 5245
@@ -5070,7 +5344,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5070 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 5344 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5071 return -ENOTEMPTY; 5345 return -ENOTEMPTY;
5072 5346
5073 ret = btrfs_check_metadata_free_space(root); 5347 /*
5348 * We want to reserve the absolute worst case amount of items. So if
5349 * both inodes are subvols and we need to unlink them then that would
5350 * require 4 item modifications, but if they are both normal inodes it
5351 * would require 5 item modifications, so we'll assume their normal
5352 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5353 * should cover the worst case number of items we'll modify.
5354 */
5355 ret = btrfs_reserve_metadata_space(root, 11);
5074 if (ret) 5356 if (ret)
5075 return ret; 5357 return ret;
5076 5358
@@ -5185,6 +5467,8 @@ out_fail:
5185 5467
5186 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 5468 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5187 up_read(&root->fs_info->subvol_sem); 5469 up_read(&root->fs_info->subvol_sem);
5470
5471 btrfs_unreserve_metadata_space(root, 11);
5188 return ret; 5472 return ret;
5189} 5473}
5190 5474
@@ -5256,11 +5540,18 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5256 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 5540 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5257 return -ENAMETOOLONG; 5541 return -ENAMETOOLONG;
5258 5542
5259 err = btrfs_check_metadata_free_space(root); 5543 /*
5544 * 2 items for inode item and ref
5545 * 2 items for dir items
5546 * 1 item for xattr if selinux is on
5547 */
5548 err = btrfs_reserve_metadata_space(root, 5);
5260 if (err) 5549 if (err)
5261 goto out_fail; 5550 return err;
5262 5551
5263 trans = btrfs_start_transaction(root, 1); 5552 trans = btrfs_start_transaction(root, 1);
5553 if (!trans)
5554 goto out_fail;
5264 btrfs_set_trans_block_group(trans, dir); 5555 btrfs_set_trans_block_group(trans, dir);
5265 5556
5266 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 5557 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -5341,6 +5632,7 @@ out_unlock:
5341 nr = trans->blocks_used; 5632 nr = trans->blocks_used;
5342 btrfs_end_transaction_throttle(trans, root); 5633 btrfs_end_transaction_throttle(trans, root);
5343out_fail: 5634out_fail:
5635 btrfs_unreserve_metadata_space(root, 5);
5344 if (drop_inode) { 5636 if (drop_inode) {
5345 inode_dec_link_count(inode); 5637 inode_dec_link_count(inode);
5346 iput(inode); 5638 iput(inode);
@@ -5362,6 +5654,11 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
5362 5654
5363 while (num_bytes > 0) { 5655 while (num_bytes > 0) {
5364 alloc_size = min(num_bytes, root->fs_info->max_extent); 5656 alloc_size = min(num_bytes, root->fs_info->max_extent);
5657
5658 ret = btrfs_reserve_metadata_space(root, 1);
5659 if (ret)
5660 goto out;
5661
5365 ret = btrfs_reserve_extent(trans, root, alloc_size, 5662 ret = btrfs_reserve_extent(trans, root, alloc_size,
5366 root->sectorsize, 0, alloc_hint, 5663 root->sectorsize, 0, alloc_hint,
5367 (u64)-1, &ins, 1); 5664 (u64)-1, &ins, 1);
@@ -5381,6 +5678,7 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
5381 num_bytes -= ins.offset; 5678 num_bytes -= ins.offset;
5382 cur_offset += ins.offset; 5679 cur_offset += ins.offset;
5383 alloc_hint = ins.objectid + ins.offset; 5680 alloc_hint = ins.objectid + ins.offset;
5681 btrfs_unreserve_metadata_space(root, 1);
5384 } 5682 }
5385out: 5683out:
5386 if (cur_offset > start) { 5684 if (cur_offset > start) {
@@ -5544,7 +5842,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
5544 .permission = btrfs_permission, 5842 .permission = btrfs_permission,
5545}; 5843};
5546 5844
5547static struct file_operations btrfs_dir_file_operations = { 5845static const struct file_operations btrfs_dir_file_operations = {
5548 .llseek = generic_file_llseek, 5846 .llseek = generic_file_llseek,
5549 .read = generic_read_dir, 5847 .read = generic_read_dir,
5550 .readdir = btrfs_real_readdir, 5848 .readdir = btrfs_real_readdir,
@@ -5566,6 +5864,8 @@ static struct extent_io_ops btrfs_extent_io_ops = {
5566 .readpage_io_failed_hook = btrfs_io_failed_hook, 5864 .readpage_io_failed_hook = btrfs_io_failed_hook,
5567 .set_bit_hook = btrfs_set_bit_hook, 5865 .set_bit_hook = btrfs_set_bit_hook,
5568 .clear_bit_hook = btrfs_clear_bit_hook, 5866 .clear_bit_hook = btrfs_clear_bit_hook,
5867 .merge_extent_hook = btrfs_merge_extent_hook,
5868 .split_extent_hook = btrfs_split_extent_hook,
5569}; 5869};
5570 5870
5571/* 5871/*
@@ -5632,6 +5932,6 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
5632 .removexattr = btrfs_removexattr, 5932 .removexattr = btrfs_removexattr,
5633}; 5933};
5634 5934
5635struct dentry_operations btrfs_dentry_operations = { 5935const struct dentry_operations btrfs_dentry_operations = {
5636 .d_delete = btrfs_dentry_delete, 5936 .d_delete = btrfs_dentry_delete,
5637}; 5937};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a8577a7f26ab..cdbb054102b9 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -239,7 +239,13 @@ static noinline int create_subvol(struct btrfs_root *root,
239 u64 index = 0; 239 u64 index = 0;
240 unsigned long nr = 1; 240 unsigned long nr = 1;
241 241
242 ret = btrfs_check_metadata_free_space(root); 242 /*
243 * 1 - inode item
244 * 2 - refs
245 * 1 - root item
246 * 2 - dir items
247 */
248 ret = btrfs_reserve_metadata_space(root, 6);
243 if (ret) 249 if (ret)
244 return ret; 250 return ret;
245 251
@@ -340,6 +346,9 @@ fail:
340 err = btrfs_commit_transaction(trans, root); 346 err = btrfs_commit_transaction(trans, root);
341 if (err && !ret) 347 if (err && !ret)
342 ret = err; 348 ret = err;
349
350 btrfs_unreserve_metadata_space(root, 6);
351 btrfs_btree_balance_dirty(root, nr);
343 return ret; 352 return ret;
344} 353}
345 354
@@ -355,19 +364,27 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
355 if (!root->ref_cows) 364 if (!root->ref_cows)
356 return -EINVAL; 365 return -EINVAL;
357 366
358 ret = btrfs_check_metadata_free_space(root); 367 /*
368 * 1 - inode item
369 * 2 - refs
370 * 1 - root item
371 * 2 - dir items
372 */
373 ret = btrfs_reserve_metadata_space(root, 6);
359 if (ret) 374 if (ret)
360 goto fail_unlock; 375 goto fail_unlock;
361 376
362 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); 377 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
363 if (!pending_snapshot) { 378 if (!pending_snapshot) {
364 ret = -ENOMEM; 379 ret = -ENOMEM;
380 btrfs_unreserve_metadata_space(root, 6);
365 goto fail_unlock; 381 goto fail_unlock;
366 } 382 }
367 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS); 383 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
368 if (!pending_snapshot->name) { 384 if (!pending_snapshot->name) {
369 ret = -ENOMEM; 385 ret = -ENOMEM;
370 kfree(pending_snapshot); 386 kfree(pending_snapshot);
387 btrfs_unreserve_metadata_space(root, 6);
371 goto fail_unlock; 388 goto fail_unlock;
372 } 389 }
373 memcpy(pending_snapshot->name, name, namelen); 390 memcpy(pending_snapshot->name, name, namelen);
@@ -813,6 +830,7 @@ out_up_write:
813out_unlock: 830out_unlock:
814 mutex_unlock(&inode->i_mutex); 831 mutex_unlock(&inode->i_mutex);
815 if (!err) { 832 if (!err) {
833 shrink_dcache_sb(root->fs_info->sb);
816 btrfs_invalidate_inodes(dest); 834 btrfs_invalidate_inodes(dest);
817 d_delete(dentry); 835 d_delete(dentry);
818 } 836 }
@@ -1105,8 +1123,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1105 datao += off - key.offset; 1123 datao += off - key.offset;
1106 datal -= off - key.offset; 1124 datal -= off - key.offset;
1107 } 1125 }
1108 if (key.offset + datao + datal > off + len) 1126
1109 datal = off + len - key.offset - datao; 1127 if (key.offset + datal > off + len)
1128 datal = off + len - key.offset;
1129
1110 /* disko == 0 means it's a hole */ 1130 /* disko == 0 means it's a hole */
1111 if (!disko) 1131 if (!disko)
1112 datao = 0; 1132 datao = 0;
@@ -1215,15 +1235,15 @@ static long btrfs_ioctl_trans_start(struct file *file)
1215 struct inode *inode = fdentry(file)->d_inode; 1235 struct inode *inode = fdentry(file)->d_inode;
1216 struct btrfs_root *root = BTRFS_I(inode)->root; 1236 struct btrfs_root *root = BTRFS_I(inode)->root;
1217 struct btrfs_trans_handle *trans; 1237 struct btrfs_trans_handle *trans;
1218 int ret = 0; 1238 int ret;
1219 1239
1240 ret = -EPERM;
1220 if (!capable(CAP_SYS_ADMIN)) 1241 if (!capable(CAP_SYS_ADMIN))
1221 return -EPERM; 1242 goto out;
1222 1243
1223 if (file->private_data) { 1244 ret = -EINPROGRESS;
1224 ret = -EINPROGRESS; 1245 if (file->private_data)
1225 goto out; 1246 goto out;
1226 }
1227 1247
1228 ret = mnt_want_write(file->f_path.mnt); 1248 ret = mnt_want_write(file->f_path.mnt);
1229 if (ret) 1249 if (ret)
@@ -1233,12 +1253,19 @@ static long btrfs_ioctl_trans_start(struct file *file)
1233 root->fs_info->open_ioctl_trans++; 1253 root->fs_info->open_ioctl_trans++;
1234 mutex_unlock(&root->fs_info->trans_mutex); 1254 mutex_unlock(&root->fs_info->trans_mutex);
1235 1255
1256 ret = -ENOMEM;
1236 trans = btrfs_start_ioctl_transaction(root, 0); 1257 trans = btrfs_start_ioctl_transaction(root, 0);
1237 if (trans) 1258 if (!trans)
1238 file->private_data = trans; 1259 goto out_drop;
1239 else 1260
1240 ret = -ENOMEM; 1261 file->private_data = trans;
1241 /*printk(KERN_INFO "btrfs_ioctl_trans_start on %p\n", file);*/ 1262 return 0;
1263
1264out_drop:
1265 mutex_lock(&root->fs_info->trans_mutex);
1266 root->fs_info->open_ioctl_trans--;
1267 mutex_unlock(&root->fs_info->trans_mutex);
1268 mnt_drop_write(file->f_path.mnt);
1242out: 1269out:
1243 return ret; 1270 return ret;
1244} 1271}
@@ -1254,24 +1281,20 @@ long btrfs_ioctl_trans_end(struct file *file)
1254 struct inode *inode = fdentry(file)->d_inode; 1281 struct inode *inode = fdentry(file)->d_inode;
1255 struct btrfs_root *root = BTRFS_I(inode)->root; 1282 struct btrfs_root *root = BTRFS_I(inode)->root;
1256 struct btrfs_trans_handle *trans; 1283 struct btrfs_trans_handle *trans;
1257 int ret = 0;
1258 1284
1259 trans = file->private_data; 1285 trans = file->private_data;
1260 if (!trans) { 1286 if (!trans)
1261 ret = -EINVAL; 1287 return -EINVAL;
1262 goto out;
1263 }
1264 btrfs_end_transaction(trans, root);
1265 file->private_data = NULL; 1288 file->private_data = NULL;
1266 1289
1290 btrfs_end_transaction(trans, root);
1291
1267 mutex_lock(&root->fs_info->trans_mutex); 1292 mutex_lock(&root->fs_info->trans_mutex);
1268 root->fs_info->open_ioctl_trans--; 1293 root->fs_info->open_ioctl_trans--;
1269 mutex_unlock(&root->fs_info->trans_mutex); 1294 mutex_unlock(&root->fs_info->trans_mutex);
1270 1295
1271 mnt_drop_write(file->f_path.mnt); 1296 mnt_drop_write(file->f_path.mnt);
1272 1297 return 0;
1273out:
1274 return ret;
1275} 1298}
1276 1299
1277long btrfs_ioctl(struct file *file, unsigned int 1300long btrfs_ioctl(struct file *file, unsigned int
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b5d6d24726b0..5799bc46a309 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -306,6 +306,12 @@ int btrfs_remove_ordered_extent(struct inode *inode,
306 tree->last = NULL; 306 tree->last = NULL;
307 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 307 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
308 308
309 spin_lock(&BTRFS_I(inode)->accounting_lock);
310 BTRFS_I(inode)->outstanding_extents--;
311 spin_unlock(&BTRFS_I(inode)->accounting_lock);
312 btrfs_unreserve_metadata_for_delalloc(BTRFS_I(inode)->root,
313 inode, 1);
314
309 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 315 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
310 list_del_init(&entry->root_extent_list); 316 list_del_init(&entry->root_extent_list);
311 317
@@ -458,7 +464,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
458 * start IO on any dirty ones so the wait doesn't stall waiting 464 * start IO on any dirty ones so the wait doesn't stall waiting
459 * for pdflush to find them 465 * for pdflush to find them
460 */ 466 */
461 btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL); 467 filemap_fdatawrite_range(inode->i_mapping, start, end);
462 if (wait) { 468 if (wait) {
463 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 469 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
464 &entry->flags)); 470 &entry->flags));
@@ -488,17 +494,15 @@ again:
488 /* start IO across the range first to instantiate any delalloc 494 /* start IO across the range first to instantiate any delalloc
489 * extents 495 * extents
490 */ 496 */
491 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); 497 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
492 498
493 /* The compression code will leave pages locked but return from 499 /* The compression code will leave pages locked but return from
494 * writepage without setting the page writeback. Starting again 500 * writepage without setting the page writeback. Starting again
495 * with WB_SYNC_ALL will end up waiting for the IO to actually start. 501 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
496 */ 502 */
497 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); 503 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
498 504
499 btrfs_wait_on_page_writeback_range(inode->i_mapping, 505 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
500 start >> PAGE_CACHE_SHIFT,
501 orig_end >> PAGE_CACHE_SHIFT);
502 506
503 end = orig_end; 507 end = orig_end;
504 found = 0; 508 found = 0;
@@ -716,89 +720,6 @@ out:
716} 720}
717 721
718 722
719/**
720 * taken from mm/filemap.c because it isn't exported
721 *
722 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
723 * @mapping: address space structure to write
724 * @start: offset in bytes where the range starts
725 * @end: offset in bytes where the range ends (inclusive)
726 * @sync_mode: enable synchronous operation
727 *
728 * Start writeback against all of a mapping's dirty pages that lie
729 * within the byte offsets <start, end> inclusive.
730 *
731 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
732 * opposed to a regular memory cleansing writeback. The difference between
733 * these two operations is that if a dirty page/buffer is encountered, it must
734 * be waited upon, and not just skipped over.
735 */
736int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
737 loff_t end, int sync_mode)
738{
739 struct writeback_control wbc = {
740 .sync_mode = sync_mode,
741 .nr_to_write = mapping->nrpages * 2,
742 .range_start = start,
743 .range_end = end,
744 };
745 return btrfs_writepages(mapping, &wbc);
746}
747
748/**
749 * taken from mm/filemap.c because it isn't exported
750 *
751 * wait_on_page_writeback_range - wait for writeback to complete
752 * @mapping: target address_space
753 * @start: beginning page index
754 * @end: ending page index
755 *
756 * Wait for writeback to complete against pages indexed by start->end
757 * inclusive
758 */
759int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
760 pgoff_t start, pgoff_t end)
761{
762 struct pagevec pvec;
763 int nr_pages;
764 int ret = 0;
765 pgoff_t index;
766
767 if (end < start)
768 return 0;
769
770 pagevec_init(&pvec, 0);
771 index = start;
772 while ((index <= end) &&
773 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
774 PAGECACHE_TAG_WRITEBACK,
775 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
776 unsigned i;
777
778 for (i = 0; i < nr_pages; i++) {
779 struct page *page = pvec.pages[i];
780
781 /* until radix tree lookup accepts end_index */
782 if (page->index > end)
783 continue;
784
785 wait_on_page_writeback(page);
786 if (PageError(page))
787 ret = -EIO;
788 }
789 pagevec_release(&pvec);
790 cond_resched();
791 }
792
793 /* Check for outstanding write errors */
794 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
795 ret = -ENOSPC;
796 if (test_and_clear_bit(AS_EIO, &mapping->flags))
797 ret = -EIO;
798
799 return ret;
800}
801
802/* 723/*
803 * add a given inode to the list of inodes that must be fully on 724 * add a given inode to the list of inodes that must be fully on
804 * disk before a transaction commit finishes. 725 * disk before a transaction commit finishes.
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 993a7ea45c70..f82e87488ca8 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -153,10 +153,6 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
153int btrfs_ordered_update_i_size(struct inode *inode, 153int btrfs_ordered_update_i_size(struct inode *inode,
154 struct btrfs_ordered_extent *ordered); 154 struct btrfs_ordered_extent *ordered);
155int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); 155int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
156int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
157 pgoff_t start, pgoff_t end);
158int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
159 loff_t end, int sync_mode);
160int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only); 156int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
161int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); 157int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
162int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 158int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 361ad323faac..cfcc93c93a7b 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3518,7 +3518,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
3518 BUG_ON(!rc->block_group); 3518 BUG_ON(!rc->block_group);
3519 3519
3520 btrfs_init_workers(&rc->workers, "relocate", 3520 btrfs_init_workers(&rc->workers, "relocate",
3521 fs_info->thread_pool_size); 3521 fs_info->thread_pool_size, NULL);
3522 3522
3523 rc->extent_root = extent_root; 3523 rc->extent_root = extent_root;
3524 btrfs_prepare_block_group_relocation(extent_root, rc->block_group); 3524 btrfs_prepare_block_group_relocation(extent_root, rc->block_group);
@@ -3701,7 +3701,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
3701 mapping_tree_init(&rc->reloc_root_tree); 3701 mapping_tree_init(&rc->reloc_root_tree);
3702 INIT_LIST_HEAD(&rc->reloc_roots); 3702 INIT_LIST_HEAD(&rc->reloc_roots);
3703 btrfs_init_workers(&rc->workers, "relocate", 3703 btrfs_init_workers(&rc->workers, "relocate",
3704 root->fs_info->thread_pool_size); 3704 root->fs_info->thread_pool_size, NULL);
3705 rc->extent_root = root->fs_info->extent_root; 3705 rc->extent_root = root->fs_info->extent_root;
3706 3706
3707 set_reloc_control(rc); 3707 set_reloc_control(rc);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 9351428f30e2..67fa2d29d663 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -159,7 +159,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
159 write_extent_buffer(l, item, ptr, sizeof(*item)); 159 write_extent_buffer(l, item, ptr, sizeof(*item));
160 btrfs_mark_buffer_dirty(path->nodes[0]); 160 btrfs_mark_buffer_dirty(path->nodes[0]);
161out: 161out:
162 btrfs_release_path(root, path);
163 btrfs_free_path(path); 162 btrfs_free_path(path);
164 return ret; 163 return ret;
165} 164}
@@ -332,7 +331,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
332 BUG_ON(refs != 0); 331 BUG_ON(refs != 0);
333 ret = btrfs_del_item(trans, root, path); 332 ret = btrfs_del_item(trans, root, path);
334out: 333out:
335 btrfs_release_path(root, path);
336 btrfs_free_path(path); 334 btrfs_free_path(path);
337 return ret; 335 return ret;
338} 336}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 67035385444c..752a5463bf53 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -66,7 +66,8 @@ enum {
66 Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, 66 Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
67 Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, 67 Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
68 Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, 68 Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl,
69 Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_err, 69 Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
70 Opt_discard, Opt_err,
70}; 71};
71 72
72static match_table_t tokens = { 73static match_table_t tokens = {
@@ -88,6 +89,7 @@ static match_table_t tokens = {
88 {Opt_notreelog, "notreelog"}, 89 {Opt_notreelog, "notreelog"},
89 {Opt_flushoncommit, "flushoncommit"}, 90 {Opt_flushoncommit, "flushoncommit"},
90 {Opt_ratio, "metadata_ratio=%d"}, 91 {Opt_ratio, "metadata_ratio=%d"},
92 {Opt_discard, "discard"},
91 {Opt_err, NULL}, 93 {Opt_err, NULL},
92}; 94};
93 95
@@ -257,6 +259,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
257 info->metadata_ratio); 259 info->metadata_ratio);
258 } 260 }
259 break; 261 break;
262 case Opt_discard:
263 btrfs_set_opt(info->mount_opt, DISCARD);
264 break;
260 default: 265 default:
261 break; 266 break;
262 } 267 }
@@ -344,7 +349,9 @@ static int btrfs_fill_super(struct super_block *sb,
344 sb->s_export_op = &btrfs_export_ops; 349 sb->s_export_op = &btrfs_export_ops;
345 sb->s_xattr = btrfs_xattr_handlers; 350 sb->s_xattr = btrfs_xattr_handlers;
346 sb->s_time_gran = 1; 351 sb->s_time_gran = 1;
352#ifdef CONFIG_BTRFS_FS_POSIX_ACL
347 sb->s_flags |= MS_POSIXACL; 353 sb->s_flags |= MS_POSIXACL;
354#endif
348 355
349 tree_root = open_ctree(sb, fs_devices, (char *)data); 356 tree_root = open_ctree(sb, fs_devices, (char *)data);
350 357
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 88f866f85e7a..c207e8c32c9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -163,8 +163,14 @@ static void wait_current_trans(struct btrfs_root *root)
163 } 163 }
164} 164}
165 165
166enum btrfs_trans_type {
167 TRANS_START,
168 TRANS_JOIN,
169 TRANS_USERSPACE,
170};
171
166static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 172static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
167 int num_blocks, int wait) 173 int num_blocks, int type)
168{ 174{
169 struct btrfs_trans_handle *h = 175 struct btrfs_trans_handle *h =
170 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 176 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
@@ -172,7 +178,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
172 178
173 mutex_lock(&root->fs_info->trans_mutex); 179 mutex_lock(&root->fs_info->trans_mutex);
174 if (!root->fs_info->log_root_recovering && 180 if (!root->fs_info->log_root_recovering &&
175 ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2)) 181 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
182 type == TRANS_USERSPACE))
176 wait_current_trans(root); 183 wait_current_trans(root);
177 ret = join_transaction(root); 184 ret = join_transaction(root);
178 BUG_ON(ret); 185 BUG_ON(ret);
@@ -186,6 +193,9 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
186 h->alloc_exclude_start = 0; 193 h->alloc_exclude_start = 0;
187 h->delayed_ref_updates = 0; 194 h->delayed_ref_updates = 0;
188 195
196 if (!current->journal_info && type != TRANS_USERSPACE)
197 current->journal_info = h;
198
189 root->fs_info->running_transaction->use_count++; 199 root->fs_info->running_transaction->use_count++;
190 record_root_in_trans(h, root); 200 record_root_in_trans(h, root);
191 mutex_unlock(&root->fs_info->trans_mutex); 201 mutex_unlock(&root->fs_info->trans_mutex);
@@ -195,18 +205,18 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
195struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 205struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
196 int num_blocks) 206 int num_blocks)
197{ 207{
198 return start_transaction(root, num_blocks, 1); 208 return start_transaction(root, num_blocks, TRANS_START);
199} 209}
200struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 210struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
201 int num_blocks) 211 int num_blocks)
202{ 212{
203 return start_transaction(root, num_blocks, 0); 213 return start_transaction(root, num_blocks, TRANS_JOIN);
204} 214}
205 215
206struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 216struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
207 int num_blocks) 217 int num_blocks)
208{ 218{
209 return start_transaction(r, num_blocks, 2); 219 return start_transaction(r, num_blocks, TRANS_USERSPACE);
210} 220}
211 221
212/* wait for a transaction commit to be fully complete */ 222/* wait for a transaction commit to be fully complete */
@@ -317,6 +327,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
317 wake_up(&cur_trans->writer_wait); 327 wake_up(&cur_trans->writer_wait);
318 put_transaction(cur_trans); 328 put_transaction(cur_trans);
319 mutex_unlock(&info->trans_mutex); 329 mutex_unlock(&info->trans_mutex);
330
331 if (current->journal_info == trans)
332 current->journal_info = NULL;
320 memset(trans, 0, sizeof(*trans)); 333 memset(trans, 0, sizeof(*trans));
321 kmem_cache_free(btrfs_trans_handle_cachep, trans); 334 kmem_cache_free(btrfs_trans_handle_cachep, trans);
322 335
@@ -338,10 +351,10 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
338/* 351/*
339 * when btree blocks are allocated, they have some corresponding bits set for 352 * when btree blocks are allocated, they have some corresponding bits set for
340 * them in one of two extent_io trees. This is used to make sure all of 353 * them in one of two extent_io trees. This is used to make sure all of
341 * those extents are on disk for transaction or log commit 354 * those extents are sent to disk but does not wait on them
342 */ 355 */
343int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 356int btrfs_write_marked_extents(struct btrfs_root *root,
344 struct extent_io_tree *dirty_pages) 357 struct extent_io_tree *dirty_pages)
345{ 358{
346 int ret; 359 int ret;
347 int err = 0; 360 int err = 0;
@@ -388,6 +401,29 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
388 page_cache_release(page); 401 page_cache_release(page);
389 } 402 }
390 } 403 }
404 if (err)
405 werr = err;
406 return werr;
407}
408
409/*
410 * when btree blocks are allocated, they have some corresponding bits set for
411 * them in one of two extent_io trees. This is used to make sure all of
412 * those extents are on disk for transaction or log commit. We wait
413 * on all the pages and clear them from the dirty pages state tree
414 */
415int btrfs_wait_marked_extents(struct btrfs_root *root,
416 struct extent_io_tree *dirty_pages)
417{
418 int ret;
419 int err = 0;
420 int werr = 0;
421 struct page *page;
422 struct inode *btree_inode = root->fs_info->btree_inode;
423 u64 start = 0;
424 u64 end;
425 unsigned long index;
426
391 while (1) { 427 while (1) {
392 ret = find_first_extent_bit(dirty_pages, 0, &start, &end, 428 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
393 EXTENT_DIRTY); 429 EXTENT_DIRTY);
@@ -418,6 +454,22 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
418 return werr; 454 return werr;
419} 455}
420 456
457/*
458 * when btree blocks are allocated, they have some corresponding bits set for
459 * them in one of two extent_io trees. This is used to make sure all of
460 * those extents are on disk for transaction or log commit
461 */
462int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
463 struct extent_io_tree *dirty_pages)
464{
465 int ret;
466 int ret2;
467
468 ret = btrfs_write_marked_extents(root, dirty_pages);
469 ret2 = btrfs_wait_marked_extents(root, dirty_pages);
470 return ret || ret2;
471}
472
421int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 473int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
422 struct btrfs_root *root) 474 struct btrfs_root *root)
423{ 475{
@@ -743,6 +795,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
743 memcpy(&pending->root_key, &key, sizeof(key)); 795 memcpy(&pending->root_key, &key, sizeof(key));
744fail: 796fail:
745 kfree(new_root_item); 797 kfree(new_root_item);
798 btrfs_unreserve_metadata_space(root, 6);
746 return ret; 799 return ret;
747} 800}
748 801
@@ -1059,6 +1112,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1059 1112
1060 mutex_unlock(&root->fs_info->trans_mutex); 1113 mutex_unlock(&root->fs_info->trans_mutex);
1061 1114
1115 if (current->journal_info == trans)
1116 current->journal_info = NULL;
1117
1062 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1118 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1063 return ret; 1119 return ret;
1064} 1120}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 663c67404918..d4e3e7a6938c 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -79,6 +79,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
79 struct inode *inode) 79 struct inode *inode)
80{ 80{
81 BTRFS_I(inode)->last_trans = trans->transaction->transid; 81 BTRFS_I(inode)->last_trans = trans->transaction->transid;
82 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
82} 83}
83 84
84int btrfs_end_transaction(struct btrfs_trans_handle *trans, 85int btrfs_end_transaction(struct btrfs_trans_handle *trans,
@@ -107,5 +108,9 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root); 108 struct btrfs_root *root);
108int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 109int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
109 struct extent_io_tree *dirty_pages); 110 struct extent_io_tree *dirty_pages);
111int btrfs_write_marked_extents(struct btrfs_root *root,
112 struct extent_io_tree *dirty_pages);
113int btrfs_wait_marked_extents(struct btrfs_root *root,
114 struct extent_io_tree *dirty_pages);
110int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 115int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
111#endif 116#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 7827841b55cb..741666a7676a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -137,11 +137,20 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
137 137
138 mutex_lock(&root->log_mutex); 138 mutex_lock(&root->log_mutex);
139 if (root->log_root) { 139 if (root->log_root) {
140 if (!root->log_start_pid) {
141 root->log_start_pid = current->pid;
142 root->log_multiple_pids = false;
143 } else if (root->log_start_pid != current->pid) {
144 root->log_multiple_pids = true;
145 }
146
140 root->log_batch++; 147 root->log_batch++;
141 atomic_inc(&root->log_writers); 148 atomic_inc(&root->log_writers);
142 mutex_unlock(&root->log_mutex); 149 mutex_unlock(&root->log_mutex);
143 return 0; 150 return 0;
144 } 151 }
152 root->log_multiple_pids = false;
153 root->log_start_pid = current->pid;
145 mutex_lock(&root->fs_info->tree_log_mutex); 154 mutex_lock(&root->fs_info->tree_log_mutex);
146 if (!root->fs_info->log_root_tree) { 155 if (!root->fs_info->log_root_tree) {
147 ret = btrfs_init_log_root_tree(trans, root->fs_info); 156 ret = btrfs_init_log_root_tree(trans, root->fs_info);
@@ -1971,6 +1980,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1971 int ret; 1980 int ret;
1972 struct btrfs_root *log = root->log_root; 1981 struct btrfs_root *log = root->log_root;
1973 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; 1982 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
1983 u64 log_transid = 0;
1974 1984
1975 mutex_lock(&root->log_mutex); 1985 mutex_lock(&root->log_mutex);
1976 index1 = root->log_transid % 2; 1986 index1 = root->log_transid % 2;
@@ -1987,10 +1997,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1987 1997
1988 while (1) { 1998 while (1) {
1989 unsigned long batch = root->log_batch; 1999 unsigned long batch = root->log_batch;
1990 mutex_unlock(&root->log_mutex); 2000 if (root->log_multiple_pids) {
1991 schedule_timeout_uninterruptible(1); 2001 mutex_unlock(&root->log_mutex);
1992 mutex_lock(&root->log_mutex); 2002 schedule_timeout_uninterruptible(1);
1993 2003 mutex_lock(&root->log_mutex);
2004 }
1994 wait_for_writer(trans, root); 2005 wait_for_writer(trans, root);
1995 if (batch == root->log_batch) 2006 if (batch == root->log_batch)
1996 break; 2007 break;
@@ -2003,14 +2014,19 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2003 goto out; 2014 goto out;
2004 } 2015 }
2005 2016
2006 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages); 2017 /* we start IO on all the marked extents here, but we don't actually
2018 * wait for them until later.
2019 */
2020 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages);
2007 BUG_ON(ret); 2021 BUG_ON(ret);
2008 2022
2009 btrfs_set_root_node(&log->root_item, log->node); 2023 btrfs_set_root_node(&log->root_item, log->node);
2010 2024
2011 root->log_batch = 0; 2025 root->log_batch = 0;
2026 log_transid = root->log_transid;
2012 root->log_transid++; 2027 root->log_transid++;
2013 log->log_transid = root->log_transid; 2028 log->log_transid = root->log_transid;
2029 root->log_start_pid = 0;
2014 smp_mb(); 2030 smp_mb();
2015 /* 2031 /*
2016 * log tree has been flushed to disk, new modifications of 2032 * log tree has been flushed to disk, new modifications of
@@ -2036,6 +2052,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2036 2052
2037 index2 = log_root_tree->log_transid % 2; 2053 index2 = log_root_tree->log_transid % 2;
2038 if (atomic_read(&log_root_tree->log_commit[index2])) { 2054 if (atomic_read(&log_root_tree->log_commit[index2])) {
2055 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2039 wait_log_commit(trans, log_root_tree, 2056 wait_log_commit(trans, log_root_tree,
2040 log_root_tree->log_transid); 2057 log_root_tree->log_transid);
2041 mutex_unlock(&log_root_tree->log_mutex); 2058 mutex_unlock(&log_root_tree->log_mutex);
@@ -2055,6 +2072,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2055 * check the full commit flag again 2072 * check the full commit flag again
2056 */ 2073 */
2057 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2074 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2075 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2058 mutex_unlock(&log_root_tree->log_mutex); 2076 mutex_unlock(&log_root_tree->log_mutex);
2059 ret = -EAGAIN; 2077 ret = -EAGAIN;
2060 goto out_wake_log_root; 2078 goto out_wake_log_root;
@@ -2063,6 +2081,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2063 ret = btrfs_write_and_wait_marked_extents(log_root_tree, 2081 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2064 &log_root_tree->dirty_log_pages); 2082 &log_root_tree->dirty_log_pages);
2065 BUG_ON(ret); 2083 BUG_ON(ret);
2084 btrfs_wait_marked_extents(log, &log->dirty_log_pages);
2066 2085
2067 btrfs_set_super_log_root(&root->fs_info->super_for_commit, 2086 btrfs_set_super_log_root(&root->fs_info->super_for_commit,
2068 log_root_tree->node->start); 2087 log_root_tree->node->start);
@@ -2082,9 +2101,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2082 * the running transaction open, so a full commit can't hop 2101 * the running transaction open, so a full commit can't hop
2083 * in and cause problems either. 2102 * in and cause problems either.
2084 */ 2103 */
2085 write_ctree_super(trans, root->fs_info->tree_root, 2); 2104 write_ctree_super(trans, root->fs_info->tree_root, 1);
2086 ret = 0; 2105 ret = 0;
2087 2106
2107 mutex_lock(&root->log_mutex);
2108 if (root->last_log_commit < log_transid)
2109 root->last_log_commit = log_transid;
2110 mutex_unlock(&root->log_mutex);
2111
2088out_wake_log_root: 2112out_wake_log_root:
2089 atomic_set(&log_root_tree->log_commit[index2], 0); 2113 atomic_set(&log_root_tree->log_commit[index2], 0);
2090 smp_mb(); 2114 smp_mb();
@@ -2852,6 +2876,21 @@ out:
2852 return ret; 2876 return ret;
2853} 2877}
2854 2878
2879static int inode_in_log(struct btrfs_trans_handle *trans,
2880 struct inode *inode)
2881{
2882 struct btrfs_root *root = BTRFS_I(inode)->root;
2883 int ret = 0;
2884
2885 mutex_lock(&root->log_mutex);
2886 if (BTRFS_I(inode)->logged_trans == trans->transid &&
2887 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
2888 ret = 1;
2889 mutex_unlock(&root->log_mutex);
2890 return ret;
2891}
2892
2893
2855/* 2894/*
2856 * helper function around btrfs_log_inode to make sure newly created 2895 * helper function around btrfs_log_inode to make sure newly created
2857 * parent directories also end up in the log. A minimal inode and backref 2896 * parent directories also end up in the log. A minimal inode and backref
@@ -2891,6 +2930,11 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
2891 if (ret) 2930 if (ret)
2892 goto end_no_trans; 2931 goto end_no_trans;
2893 2932
2933 if (inode_in_log(trans, inode)) {
2934 ret = BTRFS_NO_LOG_SYNC;
2935 goto end_no_trans;
2936 }
2937
2894 start_log_trans(trans, root); 2938 start_log_trans(trans, root);
2895 2939
2896 ret = btrfs_log_inode(trans, root, inode, inode_only); 2940 ret = btrfs_log_inode(trans, root, inode, inode_only);
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index d09c7609e16b..0776eacb5083 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -19,6 +19,9 @@
19#ifndef __TREE_LOG_ 19#ifndef __TREE_LOG_
20#define __TREE_LOG_ 20#define __TREE_LOG_
21 21
22/* return value for btrfs_log_dentry_safe that means we don't need to log it at all */
23#define BTRFS_NO_LOG_SYNC 256
24
22int btrfs_sync_log(struct btrfs_trans_handle *trans, 25int btrfs_sync_log(struct btrfs_trans_handle *trans,
23 struct btrfs_root *root); 26 struct btrfs_root *root);
24int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root); 27int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 23e7d36ff325..7eda483d7b5a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -446,8 +446,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
446 goto error; 446 goto error;
447 447
448 device->name = kstrdup(orig_dev->name, GFP_NOFS); 448 device->name = kstrdup(orig_dev->name, GFP_NOFS);
449 if (!device->name) 449 if (!device->name) {
450 kfree(device);
450 goto error; 451 goto error;
452 }
451 453
452 device->devid = orig_dev->devid; 454 device->devid = orig_dev->devid;
453 device->work.func = pending_bios_fn; 455 device->work.func = pending_bios_fn;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index a9d3bf4d2689..b6dd5967c48a 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -260,7 +260,7 @@ err:
260 * attributes are handled directly. 260 * attributes are handled directly.
261 */ 261 */
262struct xattr_handler *btrfs_xattr_handlers[] = { 262struct xattr_handler *btrfs_xattr_handlers[] = {
263#ifdef CONFIG_FS_POSIX_ACL 263#ifdef CONFIG_BTRFS_FS_POSIX_ACL
264 &btrfs_xattr_acl_access_handler, 264 &btrfs_xattr_acl_access_handler,
265 &btrfs_xattr_acl_default_handler, 265 &btrfs_xattr_acl_default_handler,
266#endif 266#endif