aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/Kconfig13
-rw-r--r--fs/btrfs/async-thread.c61
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c276
-rw-r--r--fs/btrfs/ctree.h28
-rw-r--r--fs/btrfs/disk-io.c120
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c438
-rw-r--r--fs/btrfs/extent_io.c132
-rw-r--r--fs/btrfs/extent_io.h18
-rw-r--r--fs/btrfs/extent_map.c1
-rw-r--r--fs/btrfs/file.c5
-rw-r--r--fs/btrfs/inode.c84
-rw-r--r--fs/btrfs/ioctl.c1
-rw-r--r--fs/btrfs/locking.c208
-rw-r--r--fs/btrfs/locking.h6
-rw-r--r--fs/btrfs/ordered-data.c4
-rw-r--r--fs/btrfs/ref-cache.c1
-rw-r--r--fs/btrfs/ref-cache.h1
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/btrfs/tree-defrag.c1
-rw-r--r--fs/btrfs/tree-log.c354
-rw-r--r--fs/btrfs/volumes.c49
-rw-r--r--fs/btrfs/xattr.c48
-rw-r--r--fs/btrfs/xattr.h2
26 files changed, 1413 insertions, 451 deletions
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index f8fcf999ea1b..7bb3c020e570 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -16,3 +16,16 @@ config BTRFS_FS
16 module will be called btrfs. 16 module will be called btrfs.
17 17
18 If unsure, say N. 18 If unsure, say N.
19
20config BTRFS_FS_POSIX_ACL
21 bool "Btrfs POSIX Access Control Lists"
22 depends on BTRFS_FS
23 select FS_POSIX_ACL
24 help
25 POSIX Access Control Lists (ACLs) support permissions for users and
26 groups beyond the owner/group/world scheme.
27
28 To learn more about Access Control Lists, visit the POSIX ACLs for
29 Linux website <http://acl.bestbits.at/>.
30
31 If you don't know what Access Control Lists are, say N
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 8e2fec05dbe0..c84ca1f5259a 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -16,11 +16,11 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/version.h>
20#include <linux/kthread.h> 19#include <linux/kthread.h>
21#include <linux/list.h> 20#include <linux/list.h>
22#include <linux/spinlock.h> 21#include <linux/spinlock.h>
23# include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/ftrace.h>
24#include "async-thread.h" 24#include "async-thread.h"
25 25
26#define WORK_QUEUED_BIT 0 26#define WORK_QUEUED_BIT 0
@@ -143,6 +143,7 @@ static int worker_loop(void *arg)
143 struct btrfs_work *work; 143 struct btrfs_work *work;
144 do { 144 do {
145 spin_lock_irq(&worker->lock); 145 spin_lock_irq(&worker->lock);
146again_locked:
146 while (!list_empty(&worker->pending)) { 147 while (!list_empty(&worker->pending)) {
147 cur = worker->pending.next; 148 cur = worker->pending.next;
148 work = list_entry(cur, struct btrfs_work, list); 149 work = list_entry(cur, struct btrfs_work, list);
@@ -165,14 +166,50 @@ static int worker_loop(void *arg)
165 check_idle_worker(worker); 166 check_idle_worker(worker);
166 167
167 } 168 }
168 worker->working = 0;
169 if (freezing(current)) { 169 if (freezing(current)) {
170 worker->working = 0;
171 spin_unlock_irq(&worker->lock);
170 refrigerator(); 172 refrigerator();
171 } else { 173 } else {
172 set_current_state(TASK_INTERRUPTIBLE);
173 spin_unlock_irq(&worker->lock); 174 spin_unlock_irq(&worker->lock);
174 if (!kthread_should_stop()) 175 if (!kthread_should_stop()) {
176 cpu_relax();
177 /*
178 * we've dropped the lock, did someone else
179 * jump_in?
180 */
181 smp_mb();
182 if (!list_empty(&worker->pending))
183 continue;
184
185 /*
186 * this short schedule allows more work to
187 * come in without the queue functions
188 * needing to go through wake_up_process()
189 *
190 * worker->working is still 1, so nobody
191 * is going to try and wake us up
192 */
193 schedule_timeout(1);
194 smp_mb();
195 if (!list_empty(&worker->pending))
196 continue;
197
198 /* still no more work?, sleep for real */
199 spin_lock_irq(&worker->lock);
200 set_current_state(TASK_INTERRUPTIBLE);
201 if (!list_empty(&worker->pending))
202 goto again_locked;
203
204 /*
205 * this makes sure we get a wakeup when someone
206 * adds something new to the queue
207 */
208 worker->working = 0;
209 spin_unlock_irq(&worker->lock);
210
175 schedule(); 211 schedule();
212 }
176 __set_current_state(TASK_RUNNING); 213 __set_current_state(TASK_RUNNING);
177 } 214 }
178 } while (!kthread_should_stop()); 215 } while (!kthread_should_stop());
@@ -350,13 +387,14 @@ int btrfs_requeue_work(struct btrfs_work *work)
350{ 387{
351 struct btrfs_worker_thread *worker = work->worker; 388 struct btrfs_worker_thread *worker = work->worker;
352 unsigned long flags; 389 unsigned long flags;
390 int wake = 0;
353 391
354 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) 392 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
355 goto out; 393 goto out;
356 394
357 spin_lock_irqsave(&worker->lock, flags); 395 spin_lock_irqsave(&worker->lock, flags);
358 atomic_inc(&worker->num_pending);
359 list_add_tail(&work->list, &worker->pending); 396 list_add_tail(&work->list, &worker->pending);
397 atomic_inc(&worker->num_pending);
360 398
361 /* by definition we're busy, take ourselves off the idle 399 /* by definition we're busy, take ourselves off the idle
362 * list 400 * list
@@ -368,10 +406,16 @@ int btrfs_requeue_work(struct btrfs_work *work)
368 &worker->workers->worker_list); 406 &worker->workers->worker_list);
369 spin_unlock_irqrestore(&worker->workers->lock, flags); 407 spin_unlock_irqrestore(&worker->workers->lock, flags);
370 } 408 }
409 if (!worker->working) {
410 wake = 1;
411 worker->working = 1;
412 }
371 413
372 spin_unlock_irqrestore(&worker->lock, flags); 414 spin_unlock_irqrestore(&worker->lock, flags);
373 415 if (wake)
416 wake_up_process(worker->task);
374out: 417out:
418
375 return 0; 419 return 0;
376} 420}
377 421
@@ -398,9 +442,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
398 } 442 }
399 443
400 spin_lock_irqsave(&worker->lock, flags); 444 spin_lock_irqsave(&worker->lock, flags);
445
446 list_add_tail(&work->list, &worker->pending);
401 atomic_inc(&worker->num_pending); 447 atomic_inc(&worker->num_pending);
402 check_busy_worker(worker); 448 check_busy_worker(worker);
403 list_add_tail(&work->list, &worker->pending);
404 449
405 /* 450 /*
406 * avoid calling into wake_up_process if this thread has already 451 * avoid calling into wake_up_process if this thread has already
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ee848d8585d9..ab07627084f1 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -32,7 +32,6 @@
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/writeback.h> 33#include <linux/writeback.h>
34#include <linux/bit_spinlock.h> 34#include <linux/bit_spinlock.h>
35#include <linux/version.h>
36#include <linux/pagevec.h> 35#include <linux/pagevec.h>
37#include "compat.h" 36#include "compat.h"
38#include "ctree.h" 37#include "ctree.h"
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 9e46c0776816..551177c0011a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -54,6 +54,31 @@ struct btrfs_path *btrfs_alloc_path(void)
54 return path; 54 return path;
55} 55}
56 56
57/*
58 * set all locked nodes in the path to blocking locks. This should
59 * be done before scheduling
60 */
61noinline void btrfs_set_path_blocking(struct btrfs_path *p)
62{
63 int i;
64 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
65 if (p->nodes[i] && p->locks[i])
66 btrfs_set_lock_blocking(p->nodes[i]);
67 }
68}
69
70/*
71 * reset all the locked nodes in the patch to spinning locks.
72 */
73noinline void btrfs_clear_path_blocking(struct btrfs_path *p)
74{
75 int i;
76 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
77 if (p->nodes[i] && p->locks[i])
78 btrfs_clear_lock_blocking(p->nodes[i]);
79 }
80}
81
57/* this also releases the path */ 82/* this also releases the path */
58void btrfs_free_path(struct btrfs_path *p) 83void btrfs_free_path(struct btrfs_path *p)
59{ 84{
@@ -272,6 +297,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
272 if (IS_ERR(cow)) 297 if (IS_ERR(cow))
273 return PTR_ERR(cow); 298 return PTR_ERR(cow);
274 299
300 /* cow is set to blocking by btrfs_init_new_buffer */
301
275 copy_extent_buffer(cow, buf, 0, 0, cow->len); 302 copy_extent_buffer(cow, buf, 0, 0, cow->len);
276 btrfs_set_header_bytenr(cow, cow->start); 303 btrfs_set_header_bytenr(cow, cow->start);
277 btrfs_set_header_generation(cow, trans->transid); 304 btrfs_set_header_generation(cow, trans->transid);
@@ -388,17 +415,20 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
388 WARN_ON(1); 415 WARN_ON(1);
389 } 416 }
390 417
391 spin_lock(&root->fs_info->hash_lock);
392 if (btrfs_header_generation(buf) == trans->transid && 418 if (btrfs_header_generation(buf) == trans->transid &&
393 btrfs_header_owner(buf) == root->root_key.objectid && 419 btrfs_header_owner(buf) == root->root_key.objectid &&
394 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 420 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
395 *cow_ret = buf; 421 *cow_ret = buf;
396 spin_unlock(&root->fs_info->hash_lock);
397 WARN_ON(prealloc_dest); 422 WARN_ON(prealloc_dest);
398 return 0; 423 return 0;
399 } 424 }
400 spin_unlock(&root->fs_info->hash_lock); 425
401 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); 426 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
427
428 if (parent)
429 btrfs_set_lock_blocking(parent);
430 btrfs_set_lock_blocking(buf);
431
402 ret = __btrfs_cow_block(trans, root, buf, parent, 432 ret = __btrfs_cow_block(trans, root, buf, parent,
403 parent_slot, cow_ret, search_start, 0, 433 parent_slot, cow_ret, search_start, 0,
404 prealloc_dest); 434 prealloc_dest);
@@ -504,6 +534,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
504 if (parent_nritems == 1) 534 if (parent_nritems == 1)
505 return 0; 535 return 0;
506 536
537 btrfs_set_lock_blocking(parent);
538
507 for (i = start_slot; i < end_slot; i++) { 539 for (i = start_slot; i < end_slot; i++) {
508 int close = 1; 540 int close = 1;
509 541
@@ -564,6 +596,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
564 search_start = last_block; 596 search_start = last_block;
565 597
566 btrfs_tree_lock(cur); 598 btrfs_tree_lock(cur);
599 btrfs_set_lock_blocking(cur);
567 err = __btrfs_cow_block(trans, root, cur, parent, i, 600 err = __btrfs_cow_block(trans, root, cur, parent, i,
568 &cur, search_start, 601 &cur, search_start,
569 min(16 * blocksize, 602 min(16 * blocksize,
@@ -862,6 +895,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
862 return 0; 895 return 0;
863 896
864 mid = path->nodes[level]; 897 mid = path->nodes[level];
898
865 WARN_ON(!path->locks[level]); 899 WARN_ON(!path->locks[level]);
866 WARN_ON(btrfs_header_generation(mid) != trans->transid); 900 WARN_ON(btrfs_header_generation(mid) != trans->transid);
867 901
@@ -884,6 +918,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
884 /* promote the child to a root */ 918 /* promote the child to a root */
885 child = read_node_slot(root, mid, 0); 919 child = read_node_slot(root, mid, 0);
886 btrfs_tree_lock(child); 920 btrfs_tree_lock(child);
921 btrfs_set_lock_blocking(child);
887 BUG_ON(!child); 922 BUG_ON(!child);
888 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0); 923 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
889 BUG_ON(ret); 924 BUG_ON(ret);
@@ -900,6 +935,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
900 935
901 add_root_to_dirty_list(root); 936 add_root_to_dirty_list(root);
902 btrfs_tree_unlock(child); 937 btrfs_tree_unlock(child);
938
903 path->locks[level] = 0; 939 path->locks[level] = 0;
904 path->nodes[level] = NULL; 940 path->nodes[level] = NULL;
905 clean_tree_block(trans, root, mid); 941 clean_tree_block(trans, root, mid);
@@ -924,6 +960,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
924 left = read_node_slot(root, parent, pslot - 1); 960 left = read_node_slot(root, parent, pslot - 1);
925 if (left) { 961 if (left) {
926 btrfs_tree_lock(left); 962 btrfs_tree_lock(left);
963 btrfs_set_lock_blocking(left);
927 wret = btrfs_cow_block(trans, root, left, 964 wret = btrfs_cow_block(trans, root, left,
928 parent, pslot - 1, &left, 0); 965 parent, pslot - 1, &left, 0);
929 if (wret) { 966 if (wret) {
@@ -934,6 +971,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
934 right = read_node_slot(root, parent, pslot + 1); 971 right = read_node_slot(root, parent, pslot + 1);
935 if (right) { 972 if (right) {
936 btrfs_tree_lock(right); 973 btrfs_tree_lock(right);
974 btrfs_set_lock_blocking(right);
937 wret = btrfs_cow_block(trans, root, right, 975 wret = btrfs_cow_block(trans, root, right,
938 parent, pslot + 1, &right, 0); 976 parent, pslot + 1, &right, 0);
939 if (wret) { 977 if (wret) {
@@ -1109,6 +1147,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1109 u32 left_nr; 1147 u32 left_nr;
1110 1148
1111 btrfs_tree_lock(left); 1149 btrfs_tree_lock(left);
1150 btrfs_set_lock_blocking(left);
1151
1112 left_nr = btrfs_header_nritems(left); 1152 left_nr = btrfs_header_nritems(left);
1113 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 1153 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1114 wret = 1; 1154 wret = 1;
@@ -1155,7 +1195,10 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1155 */ 1195 */
1156 if (right) { 1196 if (right) {
1157 u32 right_nr; 1197 u32 right_nr;
1198
1158 btrfs_tree_lock(right); 1199 btrfs_tree_lock(right);
1200 btrfs_set_lock_blocking(right);
1201
1159 right_nr = btrfs_header_nritems(right); 1202 right_nr = btrfs_header_nritems(right);
1160 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 1203 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1161 wret = 1; 1204 wret = 1;
@@ -1210,8 +1253,7 @@ static noinline void reada_for_search(struct btrfs_root *root,
1210 struct btrfs_disk_key disk_key; 1253 struct btrfs_disk_key disk_key;
1211 u32 nritems; 1254 u32 nritems;
1212 u64 search; 1255 u64 search;
1213 u64 lowest_read; 1256 u64 target;
1214 u64 highest_read;
1215 u64 nread = 0; 1257 u64 nread = 0;
1216 int direction = path->reada; 1258 int direction = path->reada;
1217 struct extent_buffer *eb; 1259 struct extent_buffer *eb;
@@ -1235,8 +1277,7 @@ static noinline void reada_for_search(struct btrfs_root *root,
1235 return; 1277 return;
1236 } 1278 }
1237 1279
1238 highest_read = search; 1280 target = search;
1239 lowest_read = search;
1240 1281
1241 nritems = btrfs_header_nritems(node); 1282 nritems = btrfs_header_nritems(node);
1242 nr = slot; 1283 nr = slot;
@@ -1256,27 +1297,80 @@ static noinline void reada_for_search(struct btrfs_root *root,
1256 break; 1297 break;
1257 } 1298 }
1258 search = btrfs_node_blockptr(node, nr); 1299 search = btrfs_node_blockptr(node, nr);
1259 if ((search >= lowest_read && search <= highest_read) || 1300 if ((search <= target && target - search <= 65536) ||
1260 (search < lowest_read && lowest_read - search <= 16384) || 1301 (search > target && search - target <= 65536)) {
1261 (search > highest_read && search - highest_read <= 16384)) {
1262 readahead_tree_block(root, search, blocksize, 1302 readahead_tree_block(root, search, blocksize,
1263 btrfs_node_ptr_generation(node, nr)); 1303 btrfs_node_ptr_generation(node, nr));
1264 nread += blocksize; 1304 nread += blocksize;
1265 } 1305 }
1266 nscan++; 1306 nscan++;
1267 if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32)) 1307 if ((nread > 65536 || nscan > 32))
1268 break; 1308 break;
1309 }
1310}
1269 1311
1270 if (nread > (256 * 1024) || nscan > 128) 1312/*
1271 break; 1313 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1314 * cache
1315 */
1316static noinline int reada_for_balance(struct btrfs_root *root,
1317 struct btrfs_path *path, int level)
1318{
1319 int slot;
1320 int nritems;
1321 struct extent_buffer *parent;
1322 struct extent_buffer *eb;
1323 u64 gen;
1324 u64 block1 = 0;
1325 u64 block2 = 0;
1326 int ret = 0;
1327 int blocksize;
1272 1328
1273 if (search < lowest_read) 1329 parent = path->nodes[level - 1];
1274 lowest_read = search; 1330 if (!parent)
1275 if (search > highest_read) 1331 return 0;
1276 highest_read = search; 1332
1333 nritems = btrfs_header_nritems(parent);
1334 slot = path->slots[level];
1335 blocksize = btrfs_level_size(root, level);
1336
1337 if (slot > 0) {
1338 block1 = btrfs_node_blockptr(parent, slot - 1);
1339 gen = btrfs_node_ptr_generation(parent, slot - 1);
1340 eb = btrfs_find_tree_block(root, block1, blocksize);
1341 if (eb && btrfs_buffer_uptodate(eb, gen))
1342 block1 = 0;
1343 free_extent_buffer(eb);
1344 }
1345 if (slot < nritems) {
1346 block2 = btrfs_node_blockptr(parent, slot + 1);
1347 gen = btrfs_node_ptr_generation(parent, slot + 1);
1348 eb = btrfs_find_tree_block(root, block2, blocksize);
1349 if (eb && btrfs_buffer_uptodate(eb, gen))
1350 block2 = 0;
1351 free_extent_buffer(eb);
1352 }
1353 if (block1 || block2) {
1354 ret = -EAGAIN;
1355 btrfs_release_path(root, path);
1356 if (block1)
1357 readahead_tree_block(root, block1, blocksize, 0);
1358 if (block2)
1359 readahead_tree_block(root, block2, blocksize, 0);
1360
1361 if (block1) {
1362 eb = read_tree_block(root, block1, blocksize, 0);
1363 free_extent_buffer(eb);
1364 }
1365 if (block1) {
1366 eb = read_tree_block(root, block2, blocksize, 0);
1367 free_extent_buffer(eb);
1368 }
1277 } 1369 }
1370 return ret;
1278} 1371}
1279 1372
1373
1280/* 1374/*
1281 * when we walk down the tree, it is usually safe to unlock the higher layers 1375 * when we walk down the tree, it is usually safe to unlock the higher layers
1282 * in the tree. The exceptions are when our path goes through slot 0, because 1376 * in the tree. The exceptions are when our path goes through slot 0, because
@@ -1328,6 +1422,32 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
1328} 1422}
1329 1423
1330/* 1424/*
1425 * This releases any locks held in the path starting at level and
1426 * going all the way up to the root.
1427 *
1428 * btrfs_search_slot will keep the lock held on higher nodes in a few
1429 * corner cases, such as COW of the block at slot zero in the node. This
1430 * ignores those rules, and it should only be called when there are no
1431 * more updates to be done higher up in the tree.
1432 */
1433noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1434{
1435 int i;
1436
1437 if (path->keep_locks || path->lowest_level)
1438 return;
1439
1440 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1441 if (!path->nodes[i])
1442 continue;
1443 if (!path->locks[i])
1444 continue;
1445 btrfs_tree_unlock(path->nodes[i]);
1446 path->locks[i] = 0;
1447 }
1448}
1449
1450/*
1331 * look for key in the tree. path is filled in with nodes along the way 1451 * look for key in the tree. path is filled in with nodes along the way
1332 * if key is found, we return zero and you can find the item in the leaf 1452 * if key is found, we return zero and you can find the item in the leaf
1333 * level of the path (level 0) 1453 * level of the path (level 0)
@@ -1387,31 +1507,30 @@ again:
1387 int wret; 1507 int wret;
1388 1508
1389 /* is a cow on this block not required */ 1509 /* is a cow on this block not required */
1390 spin_lock(&root->fs_info->hash_lock);
1391 if (btrfs_header_generation(b) == trans->transid && 1510 if (btrfs_header_generation(b) == trans->transid &&
1392 btrfs_header_owner(b) == root->root_key.objectid && 1511 btrfs_header_owner(b) == root->root_key.objectid &&
1393 !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) { 1512 !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
1394 spin_unlock(&root->fs_info->hash_lock);
1395 goto cow_done; 1513 goto cow_done;
1396 } 1514 }
1397 spin_unlock(&root->fs_info->hash_lock);
1398 1515
1399 /* ok, we have to cow, is our old prealloc the right 1516 /* ok, we have to cow, is our old prealloc the right
1400 * size? 1517 * size?
1401 */ 1518 */
1402 if (prealloc_block.objectid && 1519 if (prealloc_block.objectid &&
1403 prealloc_block.offset != b->len) { 1520 prealloc_block.offset != b->len) {
1521 btrfs_release_path(root, p);
1404 btrfs_free_reserved_extent(root, 1522 btrfs_free_reserved_extent(root,
1405 prealloc_block.objectid, 1523 prealloc_block.objectid,
1406 prealloc_block.offset); 1524 prealloc_block.offset);
1407 prealloc_block.objectid = 0; 1525 prealloc_block.objectid = 0;
1526 goto again;
1408 } 1527 }
1409 1528
1410 /* 1529 /*
1411 * for higher level blocks, try not to allocate blocks 1530 * for higher level blocks, try not to allocate blocks
1412 * with the block and the parent locks held. 1531 * with the block and the parent locks held.
1413 */ 1532 */
1414 if (level > 1 && !prealloc_block.objectid && 1533 if (level > 0 && !prealloc_block.objectid &&
1415 btrfs_path_lock_waiting(p, level)) { 1534 btrfs_path_lock_waiting(p, level)) {
1416 u32 size = b->len; 1535 u32 size = b->len;
1417 u64 hint = b->start; 1536 u64 hint = b->start;
@@ -1425,6 +1544,8 @@ again:
1425 goto again; 1544 goto again;
1426 } 1545 }
1427 1546
1547 btrfs_set_path_blocking(p);
1548
1428 wret = btrfs_cow_block(trans, root, b, 1549 wret = btrfs_cow_block(trans, root, b,
1429 p->nodes[level + 1], 1550 p->nodes[level + 1],
1430 p->slots[level + 1], 1551 p->slots[level + 1],
@@ -1446,6 +1567,22 @@ cow_done:
1446 if (!p->skip_locking) 1567 if (!p->skip_locking)
1447 p->locks[level] = 1; 1568 p->locks[level] = 1;
1448 1569
1570 btrfs_clear_path_blocking(p);
1571
1572 /*
1573 * we have a lock on b and as long as we aren't changing
1574 * the tree, there is no way to for the items in b to change.
1575 * It is safe to drop the lock on our parent before we
1576 * go through the expensive btree search on b.
1577 *
1578 * If cow is true, then we might be changing slot zero,
1579 * which may require changing the parent. So, we can't
1580 * drop the lock until after we know which slot we're
1581 * operating on.
1582 */
1583 if (!cow)
1584 btrfs_unlock_up_safe(p, level + 1);
1585
1449 ret = check_block(root, p, level); 1586 ret = check_block(root, p, level);
1450 if (ret) { 1587 if (ret) {
1451 ret = -1; 1588 ret = -1;
@@ -1453,6 +1590,7 @@ cow_done:
1453 } 1590 }
1454 1591
1455 ret = bin_search(b, key, level, &slot); 1592 ret = bin_search(b, key, level, &slot);
1593
1456 if (level != 0) { 1594 if (level != 0) {
1457 if (ret && slot > 0) 1595 if (ret && slot > 0)
1458 slot -= 1; 1596 slot -= 1;
@@ -1460,7 +1598,16 @@ cow_done:
1460 if ((p->search_for_split || ins_len > 0) && 1598 if ((p->search_for_split || ins_len > 0) &&
1461 btrfs_header_nritems(b) >= 1599 btrfs_header_nritems(b) >=
1462 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { 1600 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1463 int sret = split_node(trans, root, p, level); 1601 int sret;
1602
1603 sret = reada_for_balance(root, p, level);
1604 if (sret)
1605 goto again;
1606
1607 btrfs_set_path_blocking(p);
1608 sret = split_node(trans, root, p, level);
1609 btrfs_clear_path_blocking(p);
1610
1464 BUG_ON(sret > 0); 1611 BUG_ON(sret > 0);
1465 if (sret) { 1612 if (sret) {
1466 ret = sret; 1613 ret = sret;
@@ -1468,9 +1615,19 @@ cow_done:
1468 } 1615 }
1469 b = p->nodes[level]; 1616 b = p->nodes[level];
1470 slot = p->slots[level]; 1617 slot = p->slots[level];
1471 } else if (ins_len < 0) { 1618 } else if (ins_len < 0 &&
1472 int sret = balance_level(trans, root, p, 1619 btrfs_header_nritems(b) <
1473 level); 1620 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) {
1621 int sret;
1622
1623 sret = reada_for_balance(root, p, level);
1624 if (sret)
1625 goto again;
1626
1627 btrfs_set_path_blocking(p);
1628 sret = balance_level(trans, root, p, level);
1629 btrfs_clear_path_blocking(p);
1630
1474 if (sret) { 1631 if (sret) {
1475 ret = sret; 1632 ret = sret;
1476 goto done; 1633 goto done;
@@ -1504,7 +1661,7 @@ cow_done:
1504 * of the btree by dropping locks before 1661 * of the btree by dropping locks before
1505 * we read. 1662 * we read.
1506 */ 1663 */
1507 if (level > 1) { 1664 if (level > 0) {
1508 btrfs_release_path(NULL, p); 1665 btrfs_release_path(NULL, p);
1509 if (tmp) 1666 if (tmp)
1510 free_extent_buffer(tmp); 1667 free_extent_buffer(tmp);
@@ -1519,6 +1676,7 @@ cow_done:
1519 free_extent_buffer(tmp); 1676 free_extent_buffer(tmp);
1520 goto again; 1677 goto again;
1521 } else { 1678 } else {
1679 btrfs_set_path_blocking(p);
1522 if (tmp) 1680 if (tmp)
1523 free_extent_buffer(tmp); 1681 free_extent_buffer(tmp);
1524 if (should_reada) 1682 if (should_reada)
@@ -1528,14 +1686,29 @@ cow_done:
1528 b = read_node_slot(root, b, slot); 1686 b = read_node_slot(root, b, slot);
1529 } 1687 }
1530 } 1688 }
1531 if (!p->skip_locking) 1689 if (!p->skip_locking) {
1532 btrfs_tree_lock(b); 1690 int lret;
1691
1692 btrfs_clear_path_blocking(p);
1693 lret = btrfs_try_spin_lock(b);
1694
1695 if (!lret) {
1696 btrfs_set_path_blocking(p);
1697 btrfs_tree_lock(b);
1698 btrfs_clear_path_blocking(p);
1699 }
1700 }
1533 } else { 1701 } else {
1534 p->slots[level] = slot; 1702 p->slots[level] = slot;
1535 if (ins_len > 0 && 1703 if (ins_len > 0 &&
1536 btrfs_leaf_free_space(root, b) < ins_len) { 1704 btrfs_leaf_free_space(root, b) < ins_len) {
1537 int sret = split_leaf(trans, root, key, 1705 int sret;
1706
1707 btrfs_set_path_blocking(p);
1708 sret = split_leaf(trans, root, key,
1538 p, ins_len, ret == 0); 1709 p, ins_len, ret == 0);
1710 btrfs_clear_path_blocking(p);
1711
1539 BUG_ON(sret > 0); 1712 BUG_ON(sret > 0);
1540 if (sret) { 1713 if (sret) {
1541 ret = sret; 1714 ret = sret;
@@ -1549,12 +1722,16 @@ cow_done:
1549 } 1722 }
1550 ret = 1; 1723 ret = 1;
1551done: 1724done:
1725 /*
1726 * we don't really know what they plan on doing with the path
1727 * from here on, so for now just mark it as blocking
1728 */
1729 btrfs_set_path_blocking(p);
1552 if (prealloc_block.objectid) { 1730 if (prealloc_block.objectid) {
1553 btrfs_free_reserved_extent(root, 1731 btrfs_free_reserved_extent(root,
1554 prealloc_block.objectid, 1732 prealloc_block.objectid,
1555 prealloc_block.offset); 1733 prealloc_block.offset);
1556 } 1734 }
1557
1558 return ret; 1735 return ret;
1559} 1736}
1560 1737
@@ -1578,6 +1755,8 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
1578 ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb, 0); 1755 ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb, 0);
1579 BUG_ON(ret); 1756 BUG_ON(ret);
1580 1757
1758 btrfs_set_lock_blocking(eb);
1759
1581 parent = eb; 1760 parent = eb;
1582 while (1) { 1761 while (1) {
1583 level = btrfs_header_level(parent); 1762 level = btrfs_header_level(parent);
@@ -1602,6 +1781,7 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
1602 eb = read_tree_block(root, bytenr, blocksize, 1781 eb = read_tree_block(root, bytenr, blocksize,
1603 generation); 1782 generation);
1604 btrfs_tree_lock(eb); 1783 btrfs_tree_lock(eb);
1784 btrfs_set_lock_blocking(eb);
1605 } 1785 }
1606 1786
1607 /* 1787 /*
@@ -1626,6 +1806,7 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
1626 eb = read_tree_block(root, bytenr, blocksize, 1806 eb = read_tree_block(root, bytenr, blocksize,
1627 generation); 1807 generation);
1628 btrfs_tree_lock(eb); 1808 btrfs_tree_lock(eb);
1809 btrfs_set_lock_blocking(eb);
1629 } 1810 }
1630 1811
1631 ret = btrfs_cow_block(trans, root, eb, parent, slot, 1812 ret = btrfs_cow_block(trans, root, eb, parent, slot,
@@ -2172,6 +2353,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2172 2353
2173 right = read_node_slot(root, upper, slot + 1); 2354 right = read_node_slot(root, upper, slot + 1);
2174 btrfs_tree_lock(right); 2355 btrfs_tree_lock(right);
2356 btrfs_set_lock_blocking(right);
2357
2175 free_space = btrfs_leaf_free_space(root, right); 2358 free_space = btrfs_leaf_free_space(root, right);
2176 if (free_space < data_size) 2359 if (free_space < data_size)
2177 goto out_unlock; 2360 goto out_unlock;
@@ -2367,6 +2550,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2367 2550
2368 left = read_node_slot(root, path->nodes[1], slot - 1); 2551 left = read_node_slot(root, path->nodes[1], slot - 1);
2369 btrfs_tree_lock(left); 2552 btrfs_tree_lock(left);
2553 btrfs_set_lock_blocking(left);
2554
2370 free_space = btrfs_leaf_free_space(root, left); 2555 free_space = btrfs_leaf_free_space(root, left);
2371 if (free_space < data_size) { 2556 if (free_space < data_size) {
2372 ret = 1; 2557 ret = 1;
@@ -2825,6 +3010,12 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
2825 path->keep_locks = 0; 3010 path->keep_locks = 0;
2826 BUG_ON(ret); 3011 BUG_ON(ret);
2827 3012
3013 /*
3014 * make sure any changes to the path from split_leaf leave it
3015 * in a blocking state
3016 */
3017 btrfs_set_path_blocking(path);
3018
2828 leaf = path->nodes[0]; 3019 leaf = path->nodes[0];
2829 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); 3020 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
2830 3021
@@ -3354,6 +3545,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3354 BUG(); 3545 BUG();
3355 } 3546 }
3356out: 3547out:
3548 btrfs_unlock_up_safe(path, 1);
3357 return ret; 3549 return ret;
3358} 3550}
3359 3551
@@ -3441,15 +3633,22 @@ noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3441{ 3633{
3442 int ret; 3634 int ret;
3443 u64 root_gen = btrfs_header_generation(path->nodes[1]); 3635 u64 root_gen = btrfs_header_generation(path->nodes[1]);
3636 u64 parent_start = path->nodes[1]->start;
3637 u64 parent_owner = btrfs_header_owner(path->nodes[1]);
3444 3638
3445 ret = del_ptr(trans, root, path, 1, path->slots[1]); 3639 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3446 if (ret) 3640 if (ret)
3447 return ret; 3641 return ret;
3448 3642
3643 /*
3644 * btrfs_free_extent is expensive, we want to make sure we
3645 * aren't holding any locks when we call it
3646 */
3647 btrfs_unlock_up_safe(path, 0);
3648
3449 ret = btrfs_free_extent(trans, root, bytenr, 3649 ret = btrfs_free_extent(trans, root, bytenr,
3450 btrfs_level_size(root, 0), 3650 btrfs_level_size(root, 0),
3451 path->nodes[1]->start, 3651 parent_start, parent_owner,
3452 btrfs_header_owner(path->nodes[1]),
3453 root_gen, 0, 1); 3652 root_gen, 0, 1);
3454 return ret; 3653 return ret;
3455} 3654}
@@ -3721,12 +3920,14 @@ find_next_key:
3721 */ 3920 */
3722 if (slot >= nritems) { 3921 if (slot >= nritems) {
3723 path->slots[level] = slot; 3922 path->slots[level] = slot;
3923 btrfs_set_path_blocking(path);
3724 sret = btrfs_find_next_key(root, path, min_key, level, 3924 sret = btrfs_find_next_key(root, path, min_key, level,
3725 cache_only, min_trans); 3925 cache_only, min_trans);
3726 if (sret == 0) { 3926 if (sret == 0) {
3727 btrfs_release_path(root, path); 3927 btrfs_release_path(root, path);
3728 goto again; 3928 goto again;
3729 } else { 3929 } else {
3930 btrfs_clear_path_blocking(path);
3730 goto out; 3931 goto out;
3731 } 3932 }
3732 } 3933 }
@@ -3738,16 +3939,20 @@ find_next_key:
3738 unlock_up(path, level, 1); 3939 unlock_up(path, level, 1);
3739 goto out; 3940 goto out;
3740 } 3941 }
3942 btrfs_set_path_blocking(path);
3741 cur = read_node_slot(root, cur, slot); 3943 cur = read_node_slot(root, cur, slot);
3742 3944
3743 btrfs_tree_lock(cur); 3945 btrfs_tree_lock(cur);
3946
3744 path->locks[level - 1] = 1; 3947 path->locks[level - 1] = 1;
3745 path->nodes[level - 1] = cur; 3948 path->nodes[level - 1] = cur;
3746 unlock_up(path, level, 1); 3949 unlock_up(path, level, 1);
3950 btrfs_clear_path_blocking(path);
3747 } 3951 }
3748out: 3952out:
3749 if (ret == 0) 3953 if (ret == 0)
3750 memcpy(min_key, &found_key, sizeof(found_key)); 3954 memcpy(min_key, &found_key, sizeof(found_key));
3955 btrfs_set_path_blocking(path);
3751 return ret; 3956 return ret;
3752} 3957}
3753 3958
@@ -3843,6 +4048,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3843 if (ret < 0) 4048 if (ret < 0)
3844 return ret; 4049 return ret;
3845 4050
4051 btrfs_set_path_blocking(path);
3846 nritems = btrfs_header_nritems(path->nodes[0]); 4052 nritems = btrfs_header_nritems(path->nodes[0]);
3847 /* 4053 /*
3848 * by releasing the path above we dropped all our locks. A balance 4054 * by releasing the path above we dropped all our locks. A balance
@@ -3873,6 +4079,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3873 free_extent_buffer(next); 4079 free_extent_buffer(next);
3874 } 4080 }
3875 4081
4082 /* the path was set to blocking above */
3876 if (level == 1 && (path->locks[1] || path->skip_locking) && 4083 if (level == 1 && (path->locks[1] || path->skip_locking) &&
3877 path->reada) 4084 path->reada)
3878 reada_for_search(root, path, level, slot, 0); 4085 reada_for_search(root, path, level, slot, 0);
@@ -3881,6 +4088,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3881 if (!path->skip_locking) { 4088 if (!path->skip_locking) {
3882 WARN_ON(!btrfs_tree_locked(c)); 4089 WARN_ON(!btrfs_tree_locked(c));
3883 btrfs_tree_lock(next); 4090 btrfs_tree_lock(next);
4091 btrfs_set_lock_blocking(next);
3884 } 4092 }
3885 break; 4093 break;
3886 } 4094 }
@@ -3897,12 +4105,15 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3897 path->locks[level] = 1; 4105 path->locks[level] = 1;
3898 if (!level) 4106 if (!level)
3899 break; 4107 break;
4108
4109 btrfs_set_path_blocking(path);
3900 if (level == 1 && path->locks[1] && path->reada) 4110 if (level == 1 && path->locks[1] && path->reada)
3901 reada_for_search(root, path, level, slot, 0); 4111 reada_for_search(root, path, level, slot, 0);
3902 next = read_node_slot(root, next, 0); 4112 next = read_node_slot(root, next, 0);
3903 if (!path->skip_locking) { 4113 if (!path->skip_locking) {
3904 WARN_ON(!btrfs_tree_locked(path->nodes[level])); 4114 WARN_ON(!btrfs_tree_locked(path->nodes[level]));
3905 btrfs_tree_lock(next); 4115 btrfs_tree_lock(next);
4116 btrfs_set_lock_blocking(next);
3906 } 4117 }
3907 } 4118 }
3908done: 4119done:
@@ -3927,6 +4138,7 @@ int btrfs_previous_item(struct btrfs_root *root,
3927 4138
3928 while (1) { 4139 while (1) {
3929 if (path->slots[0] == 0) { 4140 if (path->slots[0] == 0) {
4141 btrfs_set_path_blocking(path);
3930 ret = btrfs_prev_leaf(root, path); 4142 ret = btrfs_prev_leaf(root, path);
3931 if (ret != 0) 4143 if (ret != 0)
3932 return ret; 4144 return ret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index eee060f88113..531db112c8bd 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -454,17 +454,11 @@ struct btrfs_timespec {
454 __le32 nsec; 454 __le32 nsec;
455} __attribute__ ((__packed__)); 455} __attribute__ ((__packed__));
456 456
457typedef enum { 457enum btrfs_compression_type {
458 BTRFS_COMPRESS_NONE = 0, 458 BTRFS_COMPRESS_NONE = 0,
459 BTRFS_COMPRESS_ZLIB = 1, 459 BTRFS_COMPRESS_ZLIB = 1,
460 BTRFS_COMPRESS_LAST = 2, 460 BTRFS_COMPRESS_LAST = 2,
461} btrfs_compression_type; 461};
462
463/* we don't understand any encryption methods right now */
464typedef enum {
465 BTRFS_ENCRYPTION_NONE = 0,
466 BTRFS_ENCRYPTION_LAST = 1,
467} btrfs_encryption_type;
468 462
469struct btrfs_inode_item { 463struct btrfs_inode_item {
470 /* nfs style generation number */ 464 /* nfs style generation number */
@@ -701,9 +695,7 @@ struct btrfs_fs_info {
701 struct btrfs_transaction *running_transaction; 695 struct btrfs_transaction *running_transaction;
702 wait_queue_head_t transaction_throttle; 696 wait_queue_head_t transaction_throttle;
703 wait_queue_head_t transaction_wait; 697 wait_queue_head_t transaction_wait;
704
705 wait_queue_head_t async_submit_wait; 698 wait_queue_head_t async_submit_wait;
706 wait_queue_head_t tree_log_wait;
707 699
708 struct btrfs_super_block super_copy; 700 struct btrfs_super_block super_copy;
709 struct btrfs_super_block super_for_commit; 701 struct btrfs_super_block super_for_commit;
@@ -711,7 +703,6 @@ struct btrfs_fs_info {
711 struct super_block *sb; 703 struct super_block *sb;
712 struct inode *btree_inode; 704 struct inode *btree_inode;
713 struct backing_dev_info bdi; 705 struct backing_dev_info bdi;
714 spinlock_t hash_lock;
715 struct mutex trans_mutex; 706 struct mutex trans_mutex;
716 struct mutex tree_log_mutex; 707 struct mutex tree_log_mutex;
717 struct mutex transaction_kthread_mutex; 708 struct mutex transaction_kthread_mutex;
@@ -730,10 +721,6 @@ struct btrfs_fs_info {
730 atomic_t async_submit_draining; 721 atomic_t async_submit_draining;
731 atomic_t nr_async_bios; 722 atomic_t nr_async_bios;
732 atomic_t async_delalloc_pages; 723 atomic_t async_delalloc_pages;
733 atomic_t tree_log_writers;
734 atomic_t tree_log_commit;
735 unsigned long tree_log_batch;
736 u64 tree_log_transid;
737 724
738 /* 725 /*
739 * this is used by the balancing code to wait for all the pending 726 * this is used by the balancing code to wait for all the pending
@@ -833,7 +820,14 @@ struct btrfs_root {
833 struct kobject root_kobj; 820 struct kobject root_kobj;
834 struct completion kobj_unregister; 821 struct completion kobj_unregister;
835 struct mutex objectid_mutex; 822 struct mutex objectid_mutex;
823
836 struct mutex log_mutex; 824 struct mutex log_mutex;
825 wait_queue_head_t log_writer_wait;
826 wait_queue_head_t log_commit_wait[2];
827 atomic_t log_writers;
828 atomic_t log_commit[2];
829 unsigned long log_transid;
830 unsigned long log_batch;
837 831
838 u64 objectid; 832 u64 objectid;
839 u64 last_trans; 833 u64 last_trans;
@@ -1841,6 +1835,10 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
1841struct btrfs_path *btrfs_alloc_path(void); 1835struct btrfs_path *btrfs_alloc_path(void);
1842void btrfs_free_path(struct btrfs_path *p); 1836void btrfs_free_path(struct btrfs_path *p);
1843void btrfs_init_path(struct btrfs_path *p); 1837void btrfs_init_path(struct btrfs_path *p);
1838void btrfs_set_path_blocking(struct btrfs_path *p);
1839void btrfs_clear_path_blocking(struct btrfs_path *p);
1840void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
1841
1844int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1842int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1845 struct btrfs_path *path, int slot, int nr); 1843 struct btrfs_path *path, int slot, int nr);
1846int btrfs_del_leaf(struct btrfs_trans_handle *trans, 1844int btrfs_del_leaf(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 81a313874ae5..5aebddd71193 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -16,7 +16,6 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/version.h>
20#include <linux/fs.h> 19#include <linux/fs.h>
21#include <linux/blkdev.h> 20#include <linux/blkdev.h>
22#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
@@ -800,7 +799,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
800 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 799 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
801 800
802 if (ret == 0) 801 if (ret == 0)
803 buf->flags |= EXTENT_UPTODATE; 802 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
804 else 803 else
805 WARN_ON(1); 804 WARN_ON(1);
806 return buf; 805 return buf;
@@ -814,6 +813,10 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
814 if (btrfs_header_generation(buf) == 813 if (btrfs_header_generation(buf) ==
815 root->fs_info->running_transaction->transid) { 814 root->fs_info->running_transaction->transid) {
816 WARN_ON(!btrfs_tree_locked(buf)); 815 WARN_ON(!btrfs_tree_locked(buf));
816
817 /* ugh, clear_extent_buffer_dirty can be expensive */
818 btrfs_set_lock_blocking(buf);
819
817 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, 820 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
818 buf); 821 buf);
819 } 822 }
@@ -850,6 +853,14 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
850 spin_lock_init(&root->list_lock); 853 spin_lock_init(&root->list_lock);
851 mutex_init(&root->objectid_mutex); 854 mutex_init(&root->objectid_mutex);
852 mutex_init(&root->log_mutex); 855 mutex_init(&root->log_mutex);
856 init_waitqueue_head(&root->log_writer_wait);
857 init_waitqueue_head(&root->log_commit_wait[0]);
858 init_waitqueue_head(&root->log_commit_wait[1]);
859 atomic_set(&root->log_commit[0], 0);
860 atomic_set(&root->log_commit[1], 0);
861 atomic_set(&root->log_writers, 0);
862 root->log_batch = 0;
863 root->log_transid = 0;
853 extent_io_tree_init(&root->dirty_log_pages, 864 extent_io_tree_init(&root->dirty_log_pages,
854 fs_info->btree_inode->i_mapping, GFP_NOFS); 865 fs_info->btree_inode->i_mapping, GFP_NOFS);
855 866
@@ -934,15 +945,16 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
934 return 0; 945 return 0;
935} 946}
936 947
937int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 948static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
938 struct btrfs_fs_info *fs_info) 949 struct btrfs_fs_info *fs_info)
939{ 950{
940 struct btrfs_root *root; 951 struct btrfs_root *root;
941 struct btrfs_root *tree_root = fs_info->tree_root; 952 struct btrfs_root *tree_root = fs_info->tree_root;
953 struct extent_buffer *leaf;
942 954
943 root = kzalloc(sizeof(*root), GFP_NOFS); 955 root = kzalloc(sizeof(*root), GFP_NOFS);
944 if (!root) 956 if (!root)
945 return -ENOMEM; 957 return ERR_PTR(-ENOMEM);
946 958
947 __setup_root(tree_root->nodesize, tree_root->leafsize, 959 __setup_root(tree_root->nodesize, tree_root->leafsize,
948 tree_root->sectorsize, tree_root->stripesize, 960 tree_root->sectorsize, tree_root->stripesize,
@@ -951,12 +963,23 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
951 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; 963 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
952 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 964 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
953 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; 965 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
966 /*
967 * log trees do not get reference counted because they go away
968 * before a real commit is actually done. They do store pointers
969 * to file data extents, and those reference counts still get
970 * updated (along with back refs to the log tree).
971 */
954 root->ref_cows = 0; 972 root->ref_cows = 0;
955 973
956 root->node = btrfs_alloc_free_block(trans, root, root->leafsize, 974 leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
957 0, BTRFS_TREE_LOG_OBJECTID, 975 0, BTRFS_TREE_LOG_OBJECTID,
958 trans->transid, 0, 0, 0); 976 trans->transid, 0, 0, 0);
977 if (IS_ERR(leaf)) {
978 kfree(root);
979 return ERR_CAST(leaf);
980 }
959 981
982 root->node = leaf;
960 btrfs_set_header_nritems(root->node, 0); 983 btrfs_set_header_nritems(root->node, 0);
961 btrfs_set_header_level(root->node, 0); 984 btrfs_set_header_level(root->node, 0);
962 btrfs_set_header_bytenr(root->node, root->node->start); 985 btrfs_set_header_bytenr(root->node, root->node->start);
@@ -968,7 +991,48 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
968 BTRFS_FSID_SIZE); 991 BTRFS_FSID_SIZE);
969 btrfs_mark_buffer_dirty(root->node); 992 btrfs_mark_buffer_dirty(root->node);
970 btrfs_tree_unlock(root->node); 993 btrfs_tree_unlock(root->node);
971 fs_info->log_root_tree = root; 994 return root;
995}
996
997int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
998 struct btrfs_fs_info *fs_info)
999{
1000 struct btrfs_root *log_root;
1001
1002 log_root = alloc_log_tree(trans, fs_info);
1003 if (IS_ERR(log_root))
1004 return PTR_ERR(log_root);
1005 WARN_ON(fs_info->log_root_tree);
1006 fs_info->log_root_tree = log_root;
1007 return 0;
1008}
1009
1010int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1011 struct btrfs_root *root)
1012{
1013 struct btrfs_root *log_root;
1014 struct btrfs_inode_item *inode_item;
1015
1016 log_root = alloc_log_tree(trans, root->fs_info);
1017 if (IS_ERR(log_root))
1018 return PTR_ERR(log_root);
1019
1020 log_root->last_trans = trans->transid;
1021 log_root->root_key.offset = root->root_key.objectid;
1022
1023 inode_item = &log_root->root_item.inode;
1024 inode_item->generation = cpu_to_le64(1);
1025 inode_item->size = cpu_to_le64(3);
1026 inode_item->nlink = cpu_to_le32(1);
1027 inode_item->nbytes = cpu_to_le64(root->leafsize);
1028 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1029
1030 btrfs_set_root_bytenr(&log_root->root_item, log_root->node->start);
1031 btrfs_set_root_generation(&log_root->root_item, trans->transid);
1032
1033 WARN_ON(root->log_root);
1034 root->log_root = log_root;
1035 root->log_transid = 0;
972 return 0; 1036 return 0;
973} 1037}
974 1038
@@ -1136,7 +1200,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1136{ 1200{
1137 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 1201 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1138 int ret = 0; 1202 int ret = 0;
1139 struct list_head *cur;
1140 struct btrfs_device *device; 1203 struct btrfs_device *device;
1141 struct backing_dev_info *bdi; 1204 struct backing_dev_info *bdi;
1142#if 0 1205#if 0
@@ -1144,8 +1207,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1144 btrfs_congested_async(info, 0)) 1207 btrfs_congested_async(info, 0))
1145 return 1; 1208 return 1;
1146#endif 1209#endif
1147 list_for_each(cur, &info->fs_devices->devices) { 1210 list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1148 device = list_entry(cur, struct btrfs_device, dev_list);
1149 if (!device->bdev) 1211 if (!device->bdev)
1150 continue; 1212 continue;
1151 bdi = blk_get_backing_dev_info(device->bdev); 1213 bdi = blk_get_backing_dev_info(device->bdev);
@@ -1163,13 +1225,11 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1163 */ 1225 */
1164static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1226static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1165{ 1227{
1166 struct list_head *cur;
1167 struct btrfs_device *device; 1228 struct btrfs_device *device;
1168 struct btrfs_fs_info *info; 1229 struct btrfs_fs_info *info;
1169 1230
1170 info = (struct btrfs_fs_info *)bdi->unplug_io_data; 1231 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1171 list_for_each(cur, &info->fs_devices->devices) { 1232 list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1172 device = list_entry(cur, struct btrfs_device, dev_list);
1173 if (!device->bdev) 1233 if (!device->bdev)
1174 continue; 1234 continue;
1175 1235
@@ -1447,7 +1507,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1447 INIT_LIST_HEAD(&fs_info->dead_roots); 1507 INIT_LIST_HEAD(&fs_info->dead_roots);
1448 INIT_LIST_HEAD(&fs_info->hashers); 1508 INIT_LIST_HEAD(&fs_info->hashers);
1449 INIT_LIST_HEAD(&fs_info->delalloc_inodes); 1509 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1450 spin_lock_init(&fs_info->hash_lock);
1451 spin_lock_init(&fs_info->delalloc_lock); 1510 spin_lock_init(&fs_info->delalloc_lock);
1452 spin_lock_init(&fs_info->new_trans_lock); 1511 spin_lock_init(&fs_info->new_trans_lock);
1453 spin_lock_init(&fs_info->ref_cache_lock); 1512 spin_lock_init(&fs_info->ref_cache_lock);
@@ -1535,10 +1594,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1535 init_waitqueue_head(&fs_info->transaction_throttle); 1594 init_waitqueue_head(&fs_info->transaction_throttle);
1536 init_waitqueue_head(&fs_info->transaction_wait); 1595 init_waitqueue_head(&fs_info->transaction_wait);
1537 init_waitqueue_head(&fs_info->async_submit_wait); 1596 init_waitqueue_head(&fs_info->async_submit_wait);
1538 init_waitqueue_head(&fs_info->tree_log_wait);
1539 atomic_set(&fs_info->tree_log_commit, 0);
1540 atomic_set(&fs_info->tree_log_writers, 0);
1541 fs_info->tree_log_transid = 0;
1542 1597
1543 __setup_root(4096, 4096, 4096, 4096, tree_root, 1598 __setup_root(4096, 4096, 4096, 4096, tree_root,
1544 fs_info, BTRFS_ROOT_TREE_OBJECTID); 1599 fs_info, BTRFS_ROOT_TREE_OBJECTID);
@@ -1627,6 +1682,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1627 * low idle thresh 1682 * low idle thresh
1628 */ 1683 */
1629 fs_info->endio_workers.idle_thresh = 4; 1684 fs_info->endio_workers.idle_thresh = 4;
1685 fs_info->endio_meta_workers.idle_thresh = 4;
1686
1630 fs_info->endio_write_workers.idle_thresh = 64; 1687 fs_info->endio_write_workers.idle_thresh = 64;
1631 fs_info->endio_meta_write_workers.idle_thresh = 64; 1688 fs_info->endio_meta_write_workers.idle_thresh = 64;
1632 1689
@@ -1740,13 +1797,13 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1740 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; 1797 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1741 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 1798 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1742 "btrfs-cleaner"); 1799 "btrfs-cleaner");
1743 if (!fs_info->cleaner_kthread) 1800 if (IS_ERR(fs_info->cleaner_kthread))
1744 goto fail_csum_root; 1801 goto fail_csum_root;
1745 1802
1746 fs_info->transaction_kthread = kthread_run(transaction_kthread, 1803 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1747 tree_root, 1804 tree_root,
1748 "btrfs-transaction"); 1805 "btrfs-transaction");
1749 if (!fs_info->transaction_kthread) 1806 if (IS_ERR(fs_info->transaction_kthread))
1750 goto fail_cleaner; 1807 goto fail_cleaner;
1751 1808
1752 if (btrfs_super_log_root(disk_super) != 0) { 1809 if (btrfs_super_log_root(disk_super) != 0) {
@@ -1828,13 +1885,14 @@ fail_sb_buffer:
1828fail_iput: 1885fail_iput:
1829 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 1886 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1830 iput(fs_info->btree_inode); 1887 iput(fs_info->btree_inode);
1831fail: 1888
1832 btrfs_close_devices(fs_info->fs_devices); 1889 btrfs_close_devices(fs_info->fs_devices);
1833 btrfs_mapping_tree_free(&fs_info->mapping_tree); 1890 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1891 bdi_destroy(&fs_info->bdi);
1834 1892
1893fail:
1835 kfree(extent_root); 1894 kfree(extent_root);
1836 kfree(tree_root); 1895 kfree(tree_root);
1837 bdi_destroy(&fs_info->bdi);
1838 kfree(fs_info); 1896 kfree(fs_info);
1839 kfree(chunk_root); 1897 kfree(chunk_root);
1840 kfree(dev_root); 1898 kfree(dev_root);
@@ -1995,7 +2053,6 @@ static int write_dev_supers(struct btrfs_device *device,
1995 2053
1996int write_all_supers(struct btrfs_root *root, int max_mirrors) 2054int write_all_supers(struct btrfs_root *root, int max_mirrors)
1997{ 2055{
1998 struct list_head *cur;
1999 struct list_head *head = &root->fs_info->fs_devices->devices; 2056 struct list_head *head = &root->fs_info->fs_devices->devices;
2000 struct btrfs_device *dev; 2057 struct btrfs_device *dev;
2001 struct btrfs_super_block *sb; 2058 struct btrfs_super_block *sb;
@@ -2011,8 +2068,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2011 2068
2012 sb = &root->fs_info->super_for_commit; 2069 sb = &root->fs_info->super_for_commit;
2013 dev_item = &sb->dev_item; 2070 dev_item = &sb->dev_item;
2014 list_for_each(cur, head) { 2071 list_for_each_entry(dev, head, dev_list) {
2015 dev = list_entry(cur, struct btrfs_device, dev_list);
2016 if (!dev->bdev) { 2072 if (!dev->bdev) {
2017 total_errors++; 2073 total_errors++;
2018 continue; 2074 continue;
@@ -2045,8 +2101,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2045 } 2101 }
2046 2102
2047 total_errors = 0; 2103 total_errors = 0;
2048 list_for_each(cur, head) { 2104 list_for_each_entry(dev, head, dev_list) {
2049 dev = list_entry(cur, struct btrfs_device, dev_list);
2050 if (!dev->bdev) 2105 if (!dev->bdev)
2051 continue; 2106 continue;
2052 if (!dev->in_fs_metadata || !dev->writeable) 2107 if (!dev->in_fs_metadata || !dev->writeable)
@@ -2260,6 +2315,8 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2260 u64 transid = btrfs_header_generation(buf); 2315 u64 transid = btrfs_header_generation(buf);
2261 struct inode *btree_inode = root->fs_info->btree_inode; 2316 struct inode *btree_inode = root->fs_info->btree_inode;
2262 2317
2318 btrfs_set_lock_blocking(buf);
2319
2263 WARN_ON(!btrfs_tree_locked(buf)); 2320 WARN_ON(!btrfs_tree_locked(buf));
2264 if (transid != root->fs_info->generation) { 2321 if (transid != root->fs_info->generation) {
2265 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " 2322 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
@@ -2302,14 +2359,13 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2302 int ret; 2359 int ret;
2303 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 2360 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2304 if (ret == 0) 2361 if (ret == 0)
2305 buf->flags |= EXTENT_UPTODATE; 2362 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2306 return ret; 2363 return ret;
2307} 2364}
2308 2365
2309int btree_lock_page_hook(struct page *page) 2366int btree_lock_page_hook(struct page *page)
2310{ 2367{
2311 struct inode *inode = page->mapping->host; 2368 struct inode *inode = page->mapping->host;
2312 struct btrfs_root *root = BTRFS_I(inode)->root;
2313 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2369 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2314 struct extent_buffer *eb; 2370 struct extent_buffer *eb;
2315 unsigned long len; 2371 unsigned long len;
@@ -2324,9 +2380,7 @@ int btree_lock_page_hook(struct page *page)
2324 goto out; 2380 goto out;
2325 2381
2326 btrfs_tree_lock(eb); 2382 btrfs_tree_lock(eb);
2327 spin_lock(&root->fs_info->hash_lock);
2328 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); 2383 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2329 spin_unlock(&root->fs_info->hash_lock);
2330 btrfs_tree_unlock(eb); 2384 btrfs_tree_unlock(eb);
2331 free_extent_buffer(eb); 2385 free_extent_buffer(eb);
2332out: 2386out:
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index c0ff404c31b7..494a56eb2986 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -98,5 +98,7 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
98 struct btrfs_fs_info *fs_info); 98 struct btrfs_fs_info *fs_info);
99int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 99int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
100 struct btrfs_fs_info *fs_info); 100 struct btrfs_fs_info *fs_info);
101int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
102 struct btrfs_root *root);
101int btree_lock_page_hook(struct page *page); 103int btree_lock_page_hook(struct page *page);
102#endif 104#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 293da650873f..7527523c2d2d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -19,7 +19,7 @@
19#include <linux/pagemap.h> 19#include <linux/pagemap.h>
20#include <linux/writeback.h> 20#include <linux/writeback.h>
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/version.h> 22#include <linux/sort.h>
23#include "compat.h" 23#include "compat.h"
24#include "hash.h" 24#include "hash.h"
25#include "crc32c.h" 25#include "crc32c.h"
@@ -30,7 +30,6 @@
30#include "volumes.h" 30#include "volumes.h"
31#include "locking.h" 31#include "locking.h"
32#include "ref-cache.h" 32#include "ref-cache.h"
33#include "compat.h"
34 33
35#define PENDING_EXTENT_INSERT 0 34#define PENDING_EXTENT_INSERT 0
36#define PENDING_EXTENT_DELETE 1 35#define PENDING_EXTENT_DELETE 1
@@ -326,10 +325,8 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
326 u64 flags) 325 u64 flags)
327{ 326{
328 struct list_head *head = &info->space_info; 327 struct list_head *head = &info->space_info;
329 struct list_head *cur;
330 struct btrfs_space_info *found; 328 struct btrfs_space_info *found;
331 list_for_each(cur, head) { 329 list_for_each_entry(found, head, list) {
332 found = list_entry(cur, struct btrfs_space_info, list);
333 if (found->flags == flags) 330 if (found->flags == flags)
334 return found; 331 return found;
335 } 332 }
@@ -1525,15 +1522,55 @@ out:
1525 return ret; 1522 return ret;
1526} 1523}
1527 1524
1528int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1525/* when a block goes through cow, we update the reference counts of
1529 struct extent_buffer *orig_buf, struct extent_buffer *buf, 1526 * everything that block points to. The internal pointers of the block
1530 u32 *nr_extents) 1527 * can be in just about any order, and it is likely to have clusters of
1528 * things that are close together and clusters of things that are not.
1529 *
1530 * To help reduce the seeks that come with updating all of these reference
1531 * counts, sort them by byte number before actual updates are done.
1532 *
1533 * struct refsort is used to match byte number to slot in the btree block.
1534 * we sort based on the byte number and then use the slot to actually
1535 * find the item.
1536 *
1537 * struct refsort is smaller than strcut btrfs_item and smaller than
1538 * struct btrfs_key_ptr. Since we're currently limited to the page size
1539 * for a btree block, there's no way for a kmalloc of refsorts for a
1540 * single node to be bigger than a page.
1541 */
1542struct refsort {
1543 u64 bytenr;
1544 u32 slot;
1545};
1546
1547/*
1548 * for passing into sort()
1549 */
1550static int refsort_cmp(const void *a_void, const void *b_void)
1551{
1552 const struct refsort *a = a_void;
1553 const struct refsort *b = b_void;
1554
1555 if (a->bytenr < b->bytenr)
1556 return -1;
1557 if (a->bytenr > b->bytenr)
1558 return 1;
1559 return 0;
1560}
1561
1562
1563noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
1564 struct btrfs_root *root,
1565 struct extent_buffer *orig_buf,
1566 struct extent_buffer *buf, u32 *nr_extents)
1531{ 1567{
1532 u64 bytenr; 1568 u64 bytenr;
1533 u64 ref_root; 1569 u64 ref_root;
1534 u64 orig_root; 1570 u64 orig_root;
1535 u64 ref_generation; 1571 u64 ref_generation;
1536 u64 orig_generation; 1572 u64 orig_generation;
1573 struct refsort *sorted;
1537 u32 nritems; 1574 u32 nritems;
1538 u32 nr_file_extents = 0; 1575 u32 nr_file_extents = 0;
1539 struct btrfs_key key; 1576 struct btrfs_key key;
@@ -1542,6 +1579,8 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1542 int level; 1579 int level;
1543 int ret = 0; 1580 int ret = 0;
1544 int faili = 0; 1581 int faili = 0;
1582 int refi = 0;
1583 int slot;
1545 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, 1584 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1546 u64, u64, u64, u64, u64, u64, u64, u64); 1585 u64, u64, u64, u64, u64, u64, u64, u64);
1547 1586
@@ -1553,6 +1592,9 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1553 nritems = btrfs_header_nritems(buf); 1592 nritems = btrfs_header_nritems(buf);
1554 level = btrfs_header_level(buf); 1593 level = btrfs_header_level(buf);
1555 1594
1595 sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
1596 BUG_ON(!sorted);
1597
1556 if (root->ref_cows) { 1598 if (root->ref_cows) {
1557 process_func = __btrfs_inc_extent_ref; 1599 process_func = __btrfs_inc_extent_ref;
1558 } else { 1600 } else {
@@ -1565,6 +1607,11 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1565 process_func = __btrfs_update_extent_ref; 1607 process_func = __btrfs_update_extent_ref;
1566 } 1608 }
1567 1609
1610 /*
1611 * we make two passes through the items. In the first pass we
1612 * only record the byte number and slot. Then we sort based on
1613 * byte number and do the actual work based on the sorted results
1614 */
1568 for (i = 0; i < nritems; i++) { 1615 for (i = 0; i < nritems; i++) {
1569 cond_resched(); 1616 cond_resched();
1570 if (level == 0) { 1617 if (level == 0) {
@@ -1581,6 +1628,32 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1581 continue; 1628 continue;
1582 1629
1583 nr_file_extents++; 1630 nr_file_extents++;
1631 sorted[refi].bytenr = bytenr;
1632 sorted[refi].slot = i;
1633 refi++;
1634 } else {
1635 bytenr = btrfs_node_blockptr(buf, i);
1636 sorted[refi].bytenr = bytenr;
1637 sorted[refi].slot = i;
1638 refi++;
1639 }
1640 }
1641 /*
1642 * if refi == 0, we didn't actually put anything into the sorted
1643 * array and we're done
1644 */
1645 if (refi == 0)
1646 goto out;
1647
1648 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
1649
1650 for (i = 0; i < refi; i++) {
1651 cond_resched();
1652 slot = sorted[i].slot;
1653 bytenr = sorted[i].bytenr;
1654
1655 if (level == 0) {
1656 btrfs_item_key_to_cpu(buf, &key, slot);
1584 1657
1585 ret = process_func(trans, root, bytenr, 1658 ret = process_func(trans, root, bytenr,
1586 orig_buf->start, buf->start, 1659 orig_buf->start, buf->start,
@@ -1589,25 +1662,25 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1589 key.objectid); 1662 key.objectid);
1590 1663
1591 if (ret) { 1664 if (ret) {
1592 faili = i; 1665 faili = slot;
1593 WARN_ON(1); 1666 WARN_ON(1);
1594 goto fail; 1667 goto fail;
1595 } 1668 }
1596 } else { 1669 } else {
1597 bytenr = btrfs_node_blockptr(buf, i);
1598 ret = process_func(trans, root, bytenr, 1670 ret = process_func(trans, root, bytenr,
1599 orig_buf->start, buf->start, 1671 orig_buf->start, buf->start,
1600 orig_root, ref_root, 1672 orig_root, ref_root,
1601 orig_generation, ref_generation, 1673 orig_generation, ref_generation,
1602 level - 1); 1674 level - 1);
1603 if (ret) { 1675 if (ret) {
1604 faili = i; 1676 faili = slot;
1605 WARN_ON(1); 1677 WARN_ON(1);
1606 goto fail; 1678 goto fail;
1607 } 1679 }
1608 } 1680 }
1609 } 1681 }
1610out: 1682out:
1683 kfree(sorted);
1611 if (nr_extents) { 1684 if (nr_extents) {
1612 if (level == 0) 1685 if (level == 0)
1613 *nr_extents = nr_file_extents; 1686 *nr_extents = nr_file_extents;
@@ -1616,6 +1689,7 @@ out:
1616 } 1689 }
1617 return 0; 1690 return 0;
1618fail: 1691fail:
1692 kfree(sorted);
1619 WARN_ON(1); 1693 WARN_ON(1);
1620 return ret; 1694 return ret;
1621} 1695}
@@ -2159,7 +2233,8 @@ again:
2159 ret = find_first_extent_bit(&info->extent_ins, search, &start, 2233 ret = find_first_extent_bit(&info->extent_ins, search, &start,
2160 &end, EXTENT_WRITEBACK); 2234 &end, EXTENT_WRITEBACK);
2161 if (ret) { 2235 if (ret) {
2162 if (skipped && all && !num_inserts) { 2236 if (skipped && all && !num_inserts &&
2237 list_empty(&update_list)) {
2163 skipped = 0; 2238 skipped = 0;
2164 search = 0; 2239 search = 0;
2165 continue; 2240 continue;
@@ -2547,6 +2622,7 @@ again:
2547 if (ret) { 2622 if (ret) {
2548 if (all && skipped && !nr) { 2623 if (all && skipped && !nr) {
2549 search = 0; 2624 search = 0;
2625 skipped = 0;
2550 continue; 2626 continue;
2551 } 2627 }
2552 mutex_unlock(&info->extent_ins_mutex); 2628 mutex_unlock(&info->extent_ins_mutex);
@@ -2700,13 +2776,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2700 /* if metadata always pin */ 2776 /* if metadata always pin */
2701 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) { 2777 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2702 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 2778 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2703 struct btrfs_block_group_cache *cache; 2779 mutex_lock(&root->fs_info->pinned_mutex);
2704 2780 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2705 /* btrfs_free_reserved_extent */ 2781 mutex_unlock(&root->fs_info->pinned_mutex);
2706 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
2707 BUG_ON(!cache);
2708 btrfs_add_free_space(cache, bytenr, num_bytes);
2709 put_block_group(cache);
2710 update_reserved_extents(root, bytenr, num_bytes, 0); 2782 update_reserved_extents(root, bytenr, num_bytes, 0);
2711 return 0; 2783 return 0;
2712 } 2784 }
@@ -3014,7 +3086,6 @@ loop_check:
3014static void dump_space_info(struct btrfs_space_info *info, u64 bytes) 3086static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3015{ 3087{
3016 struct btrfs_block_group_cache *cache; 3088 struct btrfs_block_group_cache *cache;
3017 struct list_head *l;
3018 3089
3019 printk(KERN_INFO "space_info has %llu free, is %sfull\n", 3090 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3020 (unsigned long long)(info->total_bytes - info->bytes_used - 3091 (unsigned long long)(info->total_bytes - info->bytes_used -
@@ -3022,8 +3093,7 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3022 (info->full) ? "" : "not "); 3093 (info->full) ? "" : "not ");
3023 3094
3024 down_read(&info->groups_sem); 3095 down_read(&info->groups_sem);
3025 list_for_each(l, &info->block_groups) { 3096 list_for_each_entry(cache, &info->block_groups, list) {
3026 cache = list_entry(l, struct btrfs_block_group_cache, list);
3027 spin_lock(&cache->lock); 3097 spin_lock(&cache->lock);
3028 printk(KERN_INFO "block group %llu has %llu bytes, %llu used " 3098 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3029 "%llu pinned %llu reserved\n", 3099 "%llu pinned %llu reserved\n",
@@ -3342,7 +3412,10 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3342 btrfs_set_header_generation(buf, trans->transid); 3412 btrfs_set_header_generation(buf, trans->transid);
3343 btrfs_tree_lock(buf); 3413 btrfs_tree_lock(buf);
3344 clean_tree_block(trans, root, buf); 3414 clean_tree_block(trans, root, buf);
3415
3416 btrfs_set_lock_blocking(buf);
3345 btrfs_set_buffer_uptodate(buf); 3417 btrfs_set_buffer_uptodate(buf);
3418
3346 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 3419 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3347 set_extent_dirty(&root->dirty_log_pages, buf->start, 3420 set_extent_dirty(&root->dirty_log_pages, buf->start,
3348 buf->start + buf->len - 1, GFP_NOFS); 3421 buf->start + buf->len - 1, GFP_NOFS);
@@ -3351,6 +3424,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3351 buf->start + buf->len - 1, GFP_NOFS); 3424 buf->start + buf->len - 1, GFP_NOFS);
3352 } 3425 }
3353 trans->blocks_used++; 3426 trans->blocks_used++;
3427 /* this returns a buffer locked for blocking */
3354 return buf; 3428 return buf;
3355} 3429}
3356 3430
@@ -3388,36 +3462,73 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3388{ 3462{
3389 u64 leaf_owner; 3463 u64 leaf_owner;
3390 u64 leaf_generation; 3464 u64 leaf_generation;
3465 struct refsort *sorted;
3391 struct btrfs_key key; 3466 struct btrfs_key key;
3392 struct btrfs_file_extent_item *fi; 3467 struct btrfs_file_extent_item *fi;
3393 int i; 3468 int i;
3394 int nritems; 3469 int nritems;
3395 int ret; 3470 int ret;
3471 int refi = 0;
3472 int slot;
3396 3473
3397 BUG_ON(!btrfs_is_leaf(leaf)); 3474 BUG_ON(!btrfs_is_leaf(leaf));
3398 nritems = btrfs_header_nritems(leaf); 3475 nritems = btrfs_header_nritems(leaf);
3399 leaf_owner = btrfs_header_owner(leaf); 3476 leaf_owner = btrfs_header_owner(leaf);
3400 leaf_generation = btrfs_header_generation(leaf); 3477 leaf_generation = btrfs_header_generation(leaf);
3401 3478
3479 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3480 /* we do this loop twice. The first time we build a list
3481 * of the extents we have a reference on, then we sort the list
3482 * by bytenr. The second time around we actually do the
3483 * extent freeing.
3484 */
3402 for (i = 0; i < nritems; i++) { 3485 for (i = 0; i < nritems; i++) {
3403 u64 disk_bytenr; 3486 u64 disk_bytenr;
3404 cond_resched(); 3487 cond_resched();
3405 3488
3406 btrfs_item_key_to_cpu(leaf, &key, i); 3489 btrfs_item_key_to_cpu(leaf, &key, i);
3490
3491 /* only extents have references, skip everything else */
3407 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) 3492 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3408 continue; 3493 continue;
3494
3409 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 3495 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3496
3497 /* inline extents live in the btree, they don't have refs */
3410 if (btrfs_file_extent_type(leaf, fi) == 3498 if (btrfs_file_extent_type(leaf, fi) ==
3411 BTRFS_FILE_EXTENT_INLINE) 3499 BTRFS_FILE_EXTENT_INLINE)
3412 continue; 3500 continue;
3413 /* 3501
3414 * FIXME make sure to insert a trans record that
3415 * repeats the snapshot del on crash
3416 */
3417 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 3502 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3503
3504 /* holes don't have refs */
3418 if (disk_bytenr == 0) 3505 if (disk_bytenr == 0)
3419 continue; 3506 continue;
3420 3507
3508 sorted[refi].bytenr = disk_bytenr;
3509 sorted[refi].slot = i;
3510 refi++;
3511 }
3512
3513 if (refi == 0)
3514 goto out;
3515
3516 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3517
3518 for (i = 0; i < refi; i++) {
3519 u64 disk_bytenr;
3520
3521 disk_bytenr = sorted[i].bytenr;
3522 slot = sorted[i].slot;
3523
3524 cond_resched();
3525
3526 btrfs_item_key_to_cpu(leaf, &key, slot);
3527 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3528 continue;
3529
3530 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
3531
3421 ret = __btrfs_free_extent(trans, root, disk_bytenr, 3532 ret = __btrfs_free_extent(trans, root, disk_bytenr,
3422 btrfs_file_extent_disk_num_bytes(leaf, fi), 3533 btrfs_file_extent_disk_num_bytes(leaf, fi),
3423 leaf->start, leaf_owner, leaf_generation, 3534 leaf->start, leaf_owner, leaf_generation,
@@ -3428,6 +3539,8 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3428 wake_up(&root->fs_info->transaction_throttle); 3539 wake_up(&root->fs_info->transaction_throttle);
3429 cond_resched(); 3540 cond_resched();
3430 } 3541 }
3542out:
3543 kfree(sorted);
3431 return 0; 3544 return 0;
3432} 3545}
3433 3546
@@ -3437,9 +3550,25 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3437{ 3550{
3438 int i; 3551 int i;
3439 int ret; 3552 int ret;
3440 struct btrfs_extent_info *info = ref->extents; 3553 struct btrfs_extent_info *info;
3554 struct refsort *sorted;
3555
3556 if (ref->nritems == 0)
3557 return 0;
3441 3558
3559 sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
3442 for (i = 0; i < ref->nritems; i++) { 3560 for (i = 0; i < ref->nritems; i++) {
3561 sorted[i].bytenr = ref->extents[i].bytenr;
3562 sorted[i].slot = i;
3563 }
3564 sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
3565
3566 /*
3567 * the items in the ref were sorted when the ref was inserted
3568 * into the ref cache, so this is already in order
3569 */
3570 for (i = 0; i < ref->nritems; i++) {
3571 info = ref->extents + sorted[i].slot;
3443 ret = __btrfs_free_extent(trans, root, info->bytenr, 3572 ret = __btrfs_free_extent(trans, root, info->bytenr,
3444 info->num_bytes, ref->bytenr, 3573 info->num_bytes, ref->bytenr,
3445 ref->owner, ref->generation, 3574 ref->owner, ref->generation,
@@ -3453,6 +3582,7 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3453 info++; 3582 info++;
3454 } 3583 }
3455 3584
3585 kfree(sorted);
3456 return 0; 3586 return 0;
3457} 3587}
3458 3588
@@ -3497,6 +3627,152 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
3497} 3627}
3498 3628
3499/* 3629/*
3630 * this is used while deleting old snapshots, and it drops the refs
3631 * on a whole subtree starting from a level 1 node.
3632 *
3633 * The idea is to sort all the leaf pointers, and then drop the
3634 * ref on all the leaves in order. Most of the time the leaves
3635 * will have ref cache entries, so no leaf IOs will be required to
3636 * find the extents they have references on.
3637 *
3638 * For each leaf, any references it has are also dropped in order
3639 *
3640 * This ends up dropping the references in something close to optimal
3641 * order for reading and modifying the extent allocation tree.
3642 */
3643static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
3644 struct btrfs_root *root,
3645 struct btrfs_path *path)
3646{
3647 u64 bytenr;
3648 u64 root_owner;
3649 u64 root_gen;
3650 struct extent_buffer *eb = path->nodes[1];
3651 struct extent_buffer *leaf;
3652 struct btrfs_leaf_ref *ref;
3653 struct refsort *sorted = NULL;
3654 int nritems = btrfs_header_nritems(eb);
3655 int ret;
3656 int i;
3657 int refi = 0;
3658 int slot = path->slots[1];
3659 u32 blocksize = btrfs_level_size(root, 0);
3660 u32 refs;
3661
3662 if (nritems == 0)
3663 goto out;
3664
3665 root_owner = btrfs_header_owner(eb);
3666 root_gen = btrfs_header_generation(eb);
3667 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3668
3669 /*
3670 * step one, sort all the leaf pointers so we don't scribble
3671 * randomly into the extent allocation tree
3672 */
3673 for (i = slot; i < nritems; i++) {
3674 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
3675 sorted[refi].slot = i;
3676 refi++;
3677 }
3678
3679 /*
3680 * nritems won't be zero, but if we're picking up drop_snapshot
3681 * after a crash, slot might be > 0, so double check things
3682 * just in case.
3683 */
3684 if (refi == 0)
3685 goto out;
3686
3687 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3688
3689 /*
3690 * the first loop frees everything the leaves point to
3691 */
3692 for (i = 0; i < refi; i++) {
3693 u64 ptr_gen;
3694
3695 bytenr = sorted[i].bytenr;
3696
3697 /*
3698 * check the reference count on this leaf. If it is > 1
3699 * we just decrement it below and don't update any
3700 * of the refs the leaf points to.
3701 */
3702 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3703 BUG_ON(ret);
3704 if (refs != 1)
3705 continue;
3706
3707 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
3708
3709 /*
3710 * the leaf only had one reference, which means the
3711 * only thing pointing to this leaf is the snapshot
3712 * we're deleting. It isn't possible for the reference
3713 * count to increase again later
3714 *
3715 * The reference cache is checked for the leaf,
3716 * and if found we'll be able to drop any refs held by
3717 * the leaf without needing to read it in.
3718 */
3719 ref = btrfs_lookup_leaf_ref(root, bytenr);
3720 if (ref && ref->generation != ptr_gen) {
3721 btrfs_free_leaf_ref(root, ref);
3722 ref = NULL;
3723 }
3724 if (ref) {
3725 ret = cache_drop_leaf_ref(trans, root, ref);
3726 BUG_ON(ret);
3727 btrfs_remove_leaf_ref(root, ref);
3728 btrfs_free_leaf_ref(root, ref);
3729 } else {
3730 /*
3731 * the leaf wasn't in the reference cache, so
3732 * we have to read it.
3733 */
3734 leaf = read_tree_block(root, bytenr, blocksize,
3735 ptr_gen);
3736 ret = btrfs_drop_leaf_ref(trans, root, leaf);
3737 BUG_ON(ret);
3738 free_extent_buffer(leaf);
3739 }
3740 atomic_inc(&root->fs_info->throttle_gen);
3741 wake_up(&root->fs_info->transaction_throttle);
3742 cond_resched();
3743 }
3744
3745 /*
3746 * run through the loop again to free the refs on the leaves.
3747 * This is faster than doing it in the loop above because
3748 * the leaves are likely to be clustered together. We end up
3749 * working in nice chunks on the extent allocation tree.
3750 */
3751 for (i = 0; i < refi; i++) {
3752 bytenr = sorted[i].bytenr;
3753 ret = __btrfs_free_extent(trans, root, bytenr,
3754 blocksize, eb->start,
3755 root_owner, root_gen, 0, 1);
3756 BUG_ON(ret);
3757
3758 atomic_inc(&root->fs_info->throttle_gen);
3759 wake_up(&root->fs_info->transaction_throttle);
3760 cond_resched();
3761 }
3762out:
3763 kfree(sorted);
3764
3765 /*
3766 * update the path to show we've processed the entire level 1
3767 * node. This will get saved into the root's drop_snapshot_progress
3768 * field so these drops are not repeated again if this transaction
3769 * commits.
3770 */
3771 path->slots[1] = nritems;
3772 return 0;
3773}
3774
3775/*
3500 * helper function for drop_snapshot, this walks down the tree dropping ref 3776 * helper function for drop_snapshot, this walks down the tree dropping ref
3501 * counts as it goes. 3777 * counts as it goes.
3502 */ 3778 */
@@ -3511,7 +3787,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3511 struct extent_buffer *next; 3787 struct extent_buffer *next;
3512 struct extent_buffer *cur; 3788 struct extent_buffer *cur;
3513 struct extent_buffer *parent; 3789 struct extent_buffer *parent;
3514 struct btrfs_leaf_ref *ref;
3515 u32 blocksize; 3790 u32 blocksize;
3516 int ret; 3791 int ret;
3517 u32 refs; 3792 u32 refs;
@@ -3538,17 +3813,46 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3538 if (path->slots[*level] >= 3813 if (path->slots[*level] >=
3539 btrfs_header_nritems(cur)) 3814 btrfs_header_nritems(cur))
3540 break; 3815 break;
3816
3817 /* the new code goes down to level 1 and does all the
3818 * leaves pointed to that node in bulk. So, this check
3819 * for level 0 will always be false.
3820 *
3821 * But, the disk format allows the drop_snapshot_progress
3822 * field in the root to leave things in a state where
3823 * a leaf will need cleaning up here. If someone crashes
3824 * with the old code and then boots with the new code,
3825 * we might find a leaf here.
3826 */
3541 if (*level == 0) { 3827 if (*level == 0) {
3542 ret = btrfs_drop_leaf_ref(trans, root, cur); 3828 ret = btrfs_drop_leaf_ref(trans, root, cur);
3543 BUG_ON(ret); 3829 BUG_ON(ret);
3544 break; 3830 break;
3545 } 3831 }
3832
3833 /*
3834 * once we get to level one, process the whole node
3835 * at once, including everything below it.
3836 */
3837 if (*level == 1) {
3838 ret = drop_level_one_refs(trans, root, path);
3839 BUG_ON(ret);
3840 break;
3841 }
3842
3546 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 3843 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3547 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 3844 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3548 blocksize = btrfs_level_size(root, *level - 1); 3845 blocksize = btrfs_level_size(root, *level - 1);
3549 3846
3550 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs); 3847 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3551 BUG_ON(ret); 3848 BUG_ON(ret);
3849
3850 /*
3851 * if there is more than one reference, we don't need
3852 * to read that node to drop any references it has. We
3853 * just drop the ref we hold on that node and move on to the
3854 * next slot in this level.
3855 */
3552 if (refs != 1) { 3856 if (refs != 1) {
3553 parent = path->nodes[*level]; 3857 parent = path->nodes[*level];
3554 root_owner = btrfs_header_owner(parent); 3858 root_owner = btrfs_header_owner(parent);
@@ -3567,46 +3871,12 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3567 3871
3568 continue; 3872 continue;
3569 } 3873 }
3874
3570 /* 3875 /*
3571 * at this point, we have a single ref, and since the 3876 * we need to keep freeing things in the next level down.
3572 * only place referencing this extent is a dead root 3877 * read the block and loop around to process it
3573 * the reference count should never go higher.
3574 * So, we don't need to check it again
3575 */ 3878 */
3576 if (*level == 1) { 3879 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3577 ref = btrfs_lookup_leaf_ref(root, bytenr);
3578 if (ref && ref->generation != ptr_gen) {
3579 btrfs_free_leaf_ref(root, ref);
3580 ref = NULL;
3581 }
3582 if (ref) {
3583 ret = cache_drop_leaf_ref(trans, root, ref);
3584 BUG_ON(ret);
3585 btrfs_remove_leaf_ref(root, ref);
3586 btrfs_free_leaf_ref(root, ref);
3587 *level = 0;
3588 break;
3589 }
3590 }
3591 next = btrfs_find_tree_block(root, bytenr, blocksize);
3592 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
3593 free_extent_buffer(next);
3594
3595 next = read_tree_block(root, bytenr, blocksize,
3596 ptr_gen);
3597 cond_resched();
3598#if 0
3599 /*
3600 * this is a debugging check and can go away
3601 * the ref should never go all the way down to 1
3602 * at this point
3603 */
3604 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
3605 &refs);
3606 BUG_ON(ret);
3607 WARN_ON(refs != 1);
3608#endif
3609 }
3610 WARN_ON(*level <= 0); 3880 WARN_ON(*level <= 0);
3611 if (path->nodes[*level-1]) 3881 if (path->nodes[*level-1])
3612 free_extent_buffer(path->nodes[*level-1]); 3882 free_extent_buffer(path->nodes[*level-1]);
@@ -3631,11 +3901,16 @@ out:
3631 root_owner = btrfs_header_owner(parent); 3901 root_owner = btrfs_header_owner(parent);
3632 root_gen = btrfs_header_generation(parent); 3902 root_gen = btrfs_header_generation(parent);
3633 3903
3904 /*
3905 * cleanup and free the reference on the last node
3906 * we processed
3907 */
3634 ret = __btrfs_free_extent(trans, root, bytenr, blocksize, 3908 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
3635 parent->start, root_owner, root_gen, 3909 parent->start, root_owner, root_gen,
3636 *level, 1); 3910 *level, 1);
3637 free_extent_buffer(path->nodes[*level]); 3911 free_extent_buffer(path->nodes[*level]);
3638 path->nodes[*level] = NULL; 3912 path->nodes[*level] = NULL;
3913
3639 *level += 1; 3914 *level += 1;
3640 BUG_ON(ret); 3915 BUG_ON(ret);
3641 3916
@@ -3687,6 +3962,7 @@ static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3687 3962
3688 next = read_tree_block(root, bytenr, blocksize, ptr_gen); 3963 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3689 btrfs_tree_lock(next); 3964 btrfs_tree_lock(next);
3965 btrfs_set_lock_blocking(next);
3690 3966
3691 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize, 3967 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3692 &refs); 3968 &refs);
@@ -3754,6 +4030,13 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3754 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { 4030 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3755 struct extent_buffer *node; 4031 struct extent_buffer *node;
3756 struct btrfs_disk_key disk_key; 4032 struct btrfs_disk_key disk_key;
4033
4034 /*
4035 * there is more work to do in this level.
4036 * Update the drop_progress marker to reflect
4037 * the work we've done so far, and then bump
4038 * the slot number
4039 */
3757 node = path->nodes[i]; 4040 node = path->nodes[i];
3758 path->slots[i]++; 4041 path->slots[i]++;
3759 *level = i; 4042 *level = i;
@@ -3765,6 +4048,11 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3765 return 0; 4048 return 0;
3766 } else { 4049 } else {
3767 struct extent_buffer *parent; 4050 struct extent_buffer *parent;
4051
4052 /*
4053 * this whole node is done, free our reference
4054 * on it and go up one level
4055 */
3768 if (path->nodes[*level] == root->node) 4056 if (path->nodes[*level] == root->node)
3769 parent = path->nodes[*level]; 4057 parent = path->nodes[*level];
3770 else 4058 else
@@ -4444,7 +4732,7 @@ static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4444 u64 lock_end = 0; 4732 u64 lock_end = 0;
4445 u64 num_bytes; 4733 u64 num_bytes;
4446 u64 ext_offset; 4734 u64 ext_offset;
4447 u64 first_pos; 4735 u64 search_end = (u64)-1;
4448 u32 nritems; 4736 u32 nritems;
4449 int nr_scaned = 0; 4737 int nr_scaned = 0;
4450 int extent_locked = 0; 4738 int extent_locked = 0;
@@ -4452,7 +4740,6 @@ static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4452 int ret; 4740 int ret;
4453 4741
4454 memcpy(&key, leaf_key, sizeof(key)); 4742 memcpy(&key, leaf_key, sizeof(key));
4455 first_pos = INT_LIMIT(loff_t) - extent_key->offset;
4456 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { 4743 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4457 if (key.objectid < ref_path->owner_objectid || 4744 if (key.objectid < ref_path->owner_objectid ||
4458 (key.objectid == ref_path->owner_objectid && 4745 (key.objectid == ref_path->owner_objectid &&
@@ -4501,7 +4788,7 @@ next:
4501 if ((key.objectid > ref_path->owner_objectid) || 4788 if ((key.objectid > ref_path->owner_objectid) ||
4502 (key.objectid == ref_path->owner_objectid && 4789 (key.objectid == ref_path->owner_objectid &&
4503 key.type > BTRFS_EXTENT_DATA_KEY) || 4790 key.type > BTRFS_EXTENT_DATA_KEY) ||
4504 (key.offset >= first_pos + extent_key->offset)) 4791 key.offset >= search_end)
4505 break; 4792 break;
4506 } 4793 }
4507 4794
@@ -4534,8 +4821,10 @@ next:
4534 num_bytes = btrfs_file_extent_num_bytes(leaf, fi); 4821 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4535 ext_offset = btrfs_file_extent_offset(leaf, fi); 4822 ext_offset = btrfs_file_extent_offset(leaf, fi);
4536 4823
4537 if (first_pos > key.offset - ext_offset) 4824 if (search_end == (u64)-1) {
4538 first_pos = key.offset - ext_offset; 4825 search_end = key.offset - ext_offset +
4826 btrfs_file_extent_ram_bytes(leaf, fi);
4827 }
4539 4828
4540 if (!extent_locked) { 4829 if (!extent_locked) {
4541 lock_start = key.offset; 4830 lock_start = key.offset;
@@ -4724,7 +5013,7 @@ next:
4724 } 5013 }
4725skip: 5014skip:
4726 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS && 5015 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
4727 key.offset >= first_pos + extent_key->offset) 5016 key.offset >= search_end)
4728 break; 5017 break;
4729 5018
4730 cond_resched(); 5019 cond_resched();
@@ -4778,6 +5067,7 @@ int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4778 ref->bytenr = buf->start; 5067 ref->bytenr = buf->start;
4779 ref->owner = btrfs_header_owner(buf); 5068 ref->owner = btrfs_header_owner(buf);
4780 ref->generation = btrfs_header_generation(buf); 5069 ref->generation = btrfs_header_generation(buf);
5070
4781 ret = btrfs_add_leaf_ref(root, ref, 0); 5071 ret = btrfs_add_leaf_ref(root, ref, 0);
4782 WARN_ON(ret); 5072 WARN_ON(ret);
4783 btrfs_free_leaf_ref(root, ref); 5073 btrfs_free_leaf_ref(root, ref);
@@ -5957,9 +6247,11 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5957 path = btrfs_alloc_path(); 6247 path = btrfs_alloc_path();
5958 BUG_ON(!path); 6248 BUG_ON(!path);
5959 6249
5960 btrfs_remove_free_space_cache(block_group); 6250 spin_lock(&root->fs_info->block_group_cache_lock);
5961 rb_erase(&block_group->cache_node, 6251 rb_erase(&block_group->cache_node,
5962 &root->fs_info->block_group_cache_tree); 6252 &root->fs_info->block_group_cache_tree);
6253 spin_unlock(&root->fs_info->block_group_cache_lock);
6254 btrfs_remove_free_space_cache(block_group);
5963 down_write(&block_group->space_info->groups_sem); 6255 down_write(&block_group->space_info->groups_sem);
5964 list_del(&block_group->list); 6256 list_del(&block_group->list);
5965 up_write(&block_group->space_info->groups_sem); 6257 up_write(&block_group->space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e086d407f1fa..37d43b516b79 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -9,7 +9,6 @@
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10#include <linux/blkdev.h> 10#include <linux/blkdev.h>
11#include <linux/swap.h> 11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h> 12#include <linux/writeback.h>
14#include <linux/pagevec.h> 13#include <linux/pagevec.h>
15#include "extent_io.h" 14#include "extent_io.h"
@@ -31,7 +30,7 @@ static LIST_HEAD(buffers);
31static LIST_HEAD(states); 30static LIST_HEAD(states);
32 31
33#define LEAK_DEBUG 0 32#define LEAK_DEBUG 0
34#ifdef LEAK_DEBUG 33#if LEAK_DEBUG
35static DEFINE_SPINLOCK(leak_lock); 34static DEFINE_SPINLOCK(leak_lock);
36#endif 35#endif
37 36
@@ -120,7 +119,7 @@ void extent_io_tree_init(struct extent_io_tree *tree,
120static struct extent_state *alloc_extent_state(gfp_t mask) 119static struct extent_state *alloc_extent_state(gfp_t mask)
121{ 120{
122 struct extent_state *state; 121 struct extent_state *state;
123#ifdef LEAK_DEBUG 122#if LEAK_DEBUG
124 unsigned long flags; 123 unsigned long flags;
125#endif 124#endif
126 125
@@ -130,7 +129,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
130 state->state = 0; 129 state->state = 0;
131 state->private = 0; 130 state->private = 0;
132 state->tree = NULL; 131 state->tree = NULL;
133#ifdef LEAK_DEBUG 132#if LEAK_DEBUG
134 spin_lock_irqsave(&leak_lock, flags); 133 spin_lock_irqsave(&leak_lock, flags);
135 list_add(&state->leak_list, &states); 134 list_add(&state->leak_list, &states);
136 spin_unlock_irqrestore(&leak_lock, flags); 135 spin_unlock_irqrestore(&leak_lock, flags);
@@ -145,11 +144,11 @@ static void free_extent_state(struct extent_state *state)
145 if (!state) 144 if (!state)
146 return; 145 return;
147 if (atomic_dec_and_test(&state->refs)) { 146 if (atomic_dec_and_test(&state->refs)) {
148#ifdef LEAK_DEBUG 147#if LEAK_DEBUG
149 unsigned long flags; 148 unsigned long flags;
150#endif 149#endif
151 WARN_ON(state->tree); 150 WARN_ON(state->tree);
152#ifdef LEAK_DEBUG 151#if LEAK_DEBUG
153 spin_lock_irqsave(&leak_lock, flags); 152 spin_lock_irqsave(&leak_lock, flags);
154 list_del(&state->leak_list); 153 list_del(&state->leak_list);
155 spin_unlock_irqrestore(&leak_lock, flags); 154 spin_unlock_irqrestore(&leak_lock, flags);
@@ -2378,11 +2377,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2378 int scanned = 0; 2377 int scanned = 0;
2379 int range_whole = 0; 2378 int range_whole = 0;
2380 2379
2381 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2382 wbc->encountered_congestion = 1;
2383 return 0;
2384 }
2385
2386 pagevec_init(&pvec, 0); 2380 pagevec_init(&pvec, 0);
2387 if (wbc->range_cyclic) { 2381 if (wbc->range_cyclic) {
2388 index = mapping->writeback_index; /* Start from prev offset */ 2382 index = mapping->writeback_index; /* Start from prev offset */
@@ -2855,6 +2849,98 @@ out:
2855 return sector; 2849 return sector;
2856} 2850}
2857 2851
2852int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2853 __u64 start, __u64 len, get_extent_t *get_extent)
2854{
2855 int ret;
2856 u64 off = start;
2857 u64 max = start + len;
2858 u32 flags = 0;
2859 u64 disko = 0;
2860 struct extent_map *em = NULL;
2861 int end = 0;
2862 u64 em_start = 0, em_len = 0;
2863 unsigned long emflags;
2864 ret = 0;
2865
2866 if (len == 0)
2867 return -EINVAL;
2868
2869 lock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2870 GFP_NOFS);
2871 em = get_extent(inode, NULL, 0, off, max - off, 0);
2872 if (!em)
2873 goto out;
2874 if (IS_ERR(em)) {
2875 ret = PTR_ERR(em);
2876 goto out;
2877 }
2878 while (!end) {
2879 off = em->start + em->len;
2880 if (off >= max)
2881 end = 1;
2882
2883 em_start = em->start;
2884 em_len = em->len;
2885
2886 disko = 0;
2887 flags = 0;
2888
2889 switch (em->block_start) {
2890 case EXTENT_MAP_LAST_BYTE:
2891 end = 1;
2892 flags |= FIEMAP_EXTENT_LAST;
2893 break;
2894 case EXTENT_MAP_HOLE:
2895 flags |= FIEMAP_EXTENT_UNWRITTEN;
2896 break;
2897 case EXTENT_MAP_INLINE:
2898 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2899 FIEMAP_EXTENT_NOT_ALIGNED);
2900 break;
2901 case EXTENT_MAP_DELALLOC:
2902 flags |= (FIEMAP_EXTENT_DELALLOC |
2903 FIEMAP_EXTENT_UNKNOWN);
2904 break;
2905 default:
2906 disko = em->block_start;
2907 break;
2908 }
2909 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2910 flags |= FIEMAP_EXTENT_ENCODED;
2911
2912 emflags = em->flags;
2913 free_extent_map(em);
2914 em = NULL;
2915
2916 if (!end) {
2917 em = get_extent(inode, NULL, 0, off, max - off, 0);
2918 if (!em)
2919 goto out;
2920 if (IS_ERR(em)) {
2921 ret = PTR_ERR(em);
2922 goto out;
2923 }
2924 emflags = em->flags;
2925 }
2926 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
2927 flags |= FIEMAP_EXTENT_LAST;
2928 end = 1;
2929 }
2930
2931 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2932 em_len, flags);
2933 if (ret)
2934 goto out_free;
2935 }
2936out_free:
2937 free_extent_map(em);
2938out:
2939 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2940 GFP_NOFS);
2941 return ret;
2942}
2943
2858static inline struct page *extent_buffer_page(struct extent_buffer *eb, 2944static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2859 unsigned long i) 2945 unsigned long i)
2860{ 2946{
@@ -2892,15 +2978,17 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2892 gfp_t mask) 2978 gfp_t mask)
2893{ 2979{
2894 struct extent_buffer *eb = NULL; 2980 struct extent_buffer *eb = NULL;
2895#ifdef LEAK_DEBUG 2981#if LEAK_DEBUG
2896 unsigned long flags; 2982 unsigned long flags;
2897#endif 2983#endif
2898 2984
2899 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 2985 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2900 eb->start = start; 2986 eb->start = start;
2901 eb->len = len; 2987 eb->len = len;
2902 mutex_init(&eb->mutex); 2988 spin_lock_init(&eb->lock);
2903#ifdef LEAK_DEBUG 2989 init_waitqueue_head(&eb->lock_wq);
2990
2991#if LEAK_DEBUG
2904 spin_lock_irqsave(&leak_lock, flags); 2992 spin_lock_irqsave(&leak_lock, flags);
2905 list_add(&eb->leak_list, &buffers); 2993 list_add(&eb->leak_list, &buffers);
2906 spin_unlock_irqrestore(&leak_lock, flags); 2994 spin_unlock_irqrestore(&leak_lock, flags);
@@ -2912,7 +3000,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2912 3000
2913static void __free_extent_buffer(struct extent_buffer *eb) 3001static void __free_extent_buffer(struct extent_buffer *eb)
2914{ 3002{
2915#ifdef LEAK_DEBUG 3003#if LEAK_DEBUG
2916 unsigned long flags; 3004 unsigned long flags;
2917 spin_lock_irqsave(&leak_lock, flags); 3005 spin_lock_irqsave(&leak_lock, flags);
2918 list_del(&eb->leak_list); 3006 list_del(&eb->leak_list);
@@ -2980,8 +3068,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2980 unlock_page(p); 3068 unlock_page(p);
2981 } 3069 }
2982 if (uptodate) 3070 if (uptodate)
2983 eb->flags |= EXTENT_UPTODATE; 3071 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
2984 eb->flags |= EXTENT_BUFFER_FILLED;
2985 3072
2986 spin_lock(&tree->buffer_lock); 3073 spin_lock(&tree->buffer_lock);
2987 exists = buffer_tree_insert(tree, start, &eb->rb_node); 3074 exists = buffer_tree_insert(tree, start, &eb->rb_node);
@@ -3135,7 +3222,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3135 unsigned long num_pages; 3222 unsigned long num_pages;
3136 3223
3137 num_pages = num_extent_pages(eb->start, eb->len); 3224 num_pages = num_extent_pages(eb->start, eb->len);
3138 eb->flags &= ~EXTENT_UPTODATE; 3225 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3139 3226
3140 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3227 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3141 GFP_NOFS); 3228 GFP_NOFS);
@@ -3206,7 +3293,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
3206 struct page *page; 3293 struct page *page;
3207 int pg_uptodate = 1; 3294 int pg_uptodate = 1;
3208 3295
3209 if (eb->flags & EXTENT_UPTODATE) 3296 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3210 return 1; 3297 return 1;
3211 3298
3212 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3299 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
@@ -3242,7 +3329,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3242 struct bio *bio = NULL; 3329 struct bio *bio = NULL;
3243 unsigned long bio_flags = 0; 3330 unsigned long bio_flags = 0;
3244 3331
3245 if (eb->flags & EXTENT_UPTODATE) 3332 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3246 return 0; 3333 return 0;
3247 3334
3248 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3335 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
@@ -3273,7 +3360,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3273 } 3360 }
3274 if (all_uptodate) { 3361 if (all_uptodate) {
3275 if (start_i == 0) 3362 if (start_i == 0)
3276 eb->flags |= EXTENT_UPTODATE; 3363 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3277 goto unlock_exit; 3364 goto unlock_exit;
3278 } 3365 }
3279 3366
@@ -3309,7 +3396,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3309 } 3396 }
3310 3397
3311 if (!ret) 3398 if (!ret)
3312 eb->flags |= EXTENT_UPTODATE; 3399 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3313 return ret; 3400 return ret;
3314 3401
3315unlock_exit: 3402unlock_exit:
@@ -3406,7 +3493,6 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3406 unmap_extent_buffer(eb, eb->map_token, km); 3493 unmap_extent_buffer(eb, eb->map_token, km);
3407 eb->map_token = NULL; 3494 eb->map_token = NULL;
3408 save = 1; 3495 save = 1;
3409 WARN_ON(!mutex_is_locked(&eb->mutex));
3410 } 3496 }
3411 err = map_private_extent_buffer(eb, start, min_len, token, map, 3497 err = map_private_extent_buffer(eb, start, min_len, token, map,
3412 map_start, map_len, km); 3498 map_start, map_len, km);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index c5b483a79137..1f9df88afbf6 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -22,6 +22,10 @@
22/* flags for bio submission */ 22/* flags for bio submission */
23#define EXTENT_BIO_COMPRESSED 1 23#define EXTENT_BIO_COMPRESSED 1
24 24
25/* these are bit numbers for test/set bit */
26#define EXTENT_BUFFER_UPTODATE 0
27#define EXTENT_BUFFER_BLOCKING 1
28
25/* 29/*
26 * page->private values. Every page that is controlled by the extent 30 * page->private values. Every page that is controlled by the extent
27 * map has page->private set to one. 31 * map has page->private set to one.
@@ -95,11 +99,19 @@ struct extent_buffer {
95 unsigned long map_start; 99 unsigned long map_start;
96 unsigned long map_len; 100 unsigned long map_len;
97 struct page *first_page; 101 struct page *first_page;
102 unsigned long bflags;
98 atomic_t refs; 103 atomic_t refs;
99 int flags;
100 struct list_head leak_list; 104 struct list_head leak_list;
101 struct rb_node rb_node; 105 struct rb_node rb_node;
102 struct mutex mutex; 106
107 /* the spinlock is used to protect most operations */
108 spinlock_t lock;
109
110 /*
111 * when we keep the lock held while blocking, waiters go onto
112 * the wq
113 */
114 wait_queue_head_t lock_wq;
103}; 115};
104 116
105struct extent_map_tree; 117struct extent_map_tree;
@@ -193,6 +205,8 @@ int extent_commit_write(struct extent_io_tree *tree,
193 unsigned from, unsigned to); 205 unsigned from, unsigned to);
194sector_t extent_bmap(struct address_space *mapping, sector_t iblock, 206sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
195 get_extent_t *get_extent); 207 get_extent_t *get_extent);
208int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
209 __u64 start, __u64 len, get_extent_t *get_extent);
196int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end); 210int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
197int set_state_private(struct extent_io_tree *tree, u64 start, u64 private); 211int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
198int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); 212int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 4a83e33ada32..50da69da20ce 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -3,7 +3,6 @@
3#include <linux/slab.h> 3#include <linux/slab.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/version.h>
7#include <linux/hardirq.h> 6#include <linux/hardirq.h>
8#include "extent_map.h" 7#include "extent_map.h"
9 8
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 90268334145e..3e8023efaff7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -29,7 +29,6 @@
29#include <linux/writeback.h> 29#include <linux/writeback.h>
30#include <linux/statfs.h> 30#include <linux/statfs.h>
31#include <linux/compat.h> 31#include <linux/compat.h>
32#include <linux/version.h>
33#include "ctree.h" 32#include "ctree.h"
34#include "disk-io.h" 33#include "disk-io.h"
35#include "transaction.h" 34#include "transaction.h"
@@ -1215,10 +1214,10 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1215 } 1214 }
1216 mutex_unlock(&root->fs_info->trans_mutex); 1215 mutex_unlock(&root->fs_info->trans_mutex);
1217 1216
1218 root->fs_info->tree_log_batch++; 1217 root->log_batch++;
1219 filemap_fdatawrite(inode->i_mapping); 1218 filemap_fdatawrite(inode->i_mapping);
1220 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1219 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1221 root->fs_info->tree_log_batch++; 1220 root->log_batch++;
1222 1221
1223 /* 1222 /*
1224 * ok we haven't committed the transaction yet, lets do a commit 1223 * ok we haven't committed the transaction yet, lets do a commit
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8adfe059ab41..8f0706210a47 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -34,7 +34,6 @@
34#include <linux/statfs.h> 34#include <linux/statfs.h>
35#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/bit_spinlock.h> 36#include <linux/bit_spinlock.h>
37#include <linux/version.h>
38#include <linux/xattr.h> 37#include <linux/xattr.h>
39#include <linux/posix_acl.h> 38#include <linux/posix_acl.h>
40#include <linux/falloc.h> 39#include <linux/falloc.h>
@@ -51,6 +50,7 @@
51#include "tree-log.h" 50#include "tree-log.h"
52#include "ref-cache.h" 51#include "ref-cache.h"
53#include "compression.h" 52#include "compression.h"
53#include "locking.h"
54 54
55struct btrfs_iget_args { 55struct btrfs_iget_args {
56 u64 ino; 56 u64 ino;
@@ -91,6 +91,16 @@ static noinline int cow_file_range(struct inode *inode,
91 u64 start, u64 end, int *page_started, 91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock); 92 unsigned long *nr_written, int unlock);
93 93
94static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
95{
96 int err;
97
98 err = btrfs_init_acl(inode, dir);
99 if (!err)
100 err = btrfs_xattr_security_init(inode, dir);
101 return err;
102}
103
94/* 104/*
95 * a very lame attempt at stopping writes when the FS is 85% full. There 105 * a very lame attempt at stopping writes when the FS is 85% full. There
96 * are countless ways this is incorrect, but it is better than nothing. 106 * are countless ways this is incorrect, but it is better than nothing.
@@ -350,6 +360,19 @@ again:
350 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 360 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
351 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 361 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
352 362
363 /*
364 * we don't want to send crud past the end of i_size through
365 * compression, that's just a waste of CPU time. So, if the
366 * end of the file is before the start of our current
367 * requested range of bytes, we bail out to the uncompressed
368 * cleanup code that can deal with all of this.
369 *
370 * It isn't really the fastest way to fix things, but this is a
371 * very uncommon corner.
372 */
373 if (actual_end <= start)
374 goto cleanup_and_bail_uncompressed;
375
353 total_compressed = actual_end - start; 376 total_compressed = actual_end - start;
354 377
355 /* we want to make sure that amount of ram required to uncompress 378 /* we want to make sure that amount of ram required to uncompress
@@ -494,6 +517,7 @@ again:
494 goto again; 517 goto again;
495 } 518 }
496 } else { 519 } else {
520cleanup_and_bail_uncompressed:
497 /* 521 /*
498 * No compression, but we still need to write the pages in 522 * No compression, but we still need to write the pages in
499 * the file we've been given so far. redirty the locked 523 * the file we've been given so far. redirty the locked
@@ -1324,12 +1348,11 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1324 struct inode *inode, u64 file_offset, 1348 struct inode *inode, u64 file_offset,
1325 struct list_head *list) 1349 struct list_head *list)
1326{ 1350{
1327 struct list_head *cur;
1328 struct btrfs_ordered_sum *sum; 1351 struct btrfs_ordered_sum *sum;
1329 1352
1330 btrfs_set_trans_block_group(trans, inode); 1353 btrfs_set_trans_block_group(trans, inode);
1331 list_for_each(cur, list) { 1354
1332 sum = list_entry(cur, struct btrfs_ordered_sum, list); 1355 list_for_each_entry(sum, list, list) {
1333 btrfs_csum_file_blocks(trans, 1356 btrfs_csum_file_blocks(trans,
1334 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1357 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1335 } 1358 }
@@ -2013,6 +2036,7 @@ void btrfs_read_locked_inode(struct inode *inode)
2013 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2036 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2014 2037
2015 alloc_group_block = btrfs_inode_block_group(leaf, inode_item); 2038 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2039
2016 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, 2040 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2017 alloc_group_block, 0); 2041 alloc_group_block, 0);
2018 btrfs_free_path(path); 2042 btrfs_free_path(path);
@@ -2039,6 +2063,7 @@ void btrfs_read_locked_inode(struct inode *inode)
2039 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 2063 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2040 break; 2064 break;
2041 default: 2065 default:
2066 inode->i_op = &btrfs_special_inode_operations;
2042 init_special_inode(inode, inode->i_mode, rdev); 2067 init_special_inode(inode, inode->i_mode, rdev);
2043 break; 2068 break;
2044 } 2069 }
@@ -2108,6 +2133,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2108 goto failed; 2133 goto failed;
2109 } 2134 }
2110 2135
2136 btrfs_unlock_up_safe(path, 1);
2111 leaf = path->nodes[0]; 2137 leaf = path->nodes[0];
2112 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2138 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2113 struct btrfs_inode_item); 2139 struct btrfs_inode_item);
@@ -2429,6 +2455,8 @@ next_node:
2429 ref->generation = leaf_gen; 2455 ref->generation = leaf_gen;
2430 ref->nritems = 0; 2456 ref->nritems = 0;
2431 2457
2458 btrfs_sort_leaf_ref(ref);
2459
2432 ret = btrfs_add_leaf_ref(root, ref, 0); 2460 ret = btrfs_add_leaf_ref(root, ref, 0);
2433 WARN_ON(ret); 2461 WARN_ON(ret);
2434 btrfs_free_leaf_ref(root, ref); 2462 btrfs_free_leaf_ref(root, ref);
@@ -2476,7 +2504,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2476 struct btrfs_path *path; 2504 struct btrfs_path *path;
2477 struct btrfs_key key; 2505 struct btrfs_key key;
2478 struct btrfs_key found_key; 2506 struct btrfs_key found_key;
2479 u32 found_type; 2507 u32 found_type = (u8)-1;
2480 struct extent_buffer *leaf; 2508 struct extent_buffer *leaf;
2481 struct btrfs_file_extent_item *fi; 2509 struct btrfs_file_extent_item *fi;
2482 u64 extent_start = 0; 2510 u64 extent_start = 0;
@@ -2663,6 +2691,8 @@ next:
2663 if (pending_del_nr) 2691 if (pending_del_nr)
2664 goto del_pending; 2692 goto del_pending;
2665 btrfs_release_path(root, path); 2693 btrfs_release_path(root, path);
2694 if (found_type == BTRFS_INODE_ITEM_KEY)
2695 break;
2666 goto search_again; 2696 goto search_again;
2667 } 2697 }
2668 2698
@@ -2679,6 +2709,8 @@ del_pending:
2679 BUG_ON(ret); 2709 BUG_ON(ret);
2680 pending_del_nr = 0; 2710 pending_del_nr = 0;
2681 btrfs_release_path(root, path); 2711 btrfs_release_path(root, path);
2712 if (found_type == BTRFS_INODE_ITEM_KEY)
2713 break;
2682 goto search_again; 2714 goto search_again;
2683 } 2715 }
2684 } 2716 }
@@ -3265,7 +3297,7 @@ skip:
3265 3297
3266 /* Reached end of directory/root. Bump pos past the last item. */ 3298 /* Reached end of directory/root. Bump pos past the last item. */
3267 if (key_type == BTRFS_DIR_INDEX_KEY) 3299 if (key_type == BTRFS_DIR_INDEX_KEY)
3268 filp->f_pos = INT_LIMIT(typeof(filp->f_pos)); 3300 filp->f_pos = INT_LIMIT(off_t);
3269 else 3301 else
3270 filp->f_pos++; 3302 filp->f_pos++;
3271nopos: 3303nopos:
@@ -3458,7 +3490,14 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3458 root->highest_inode = objectid; 3490 root->highest_inode = objectid;
3459 3491
3460 inode->i_uid = current_fsuid(); 3492 inode->i_uid = current_fsuid();
3461 inode->i_gid = current_fsgid(); 3493
3494 if (dir && (dir->i_mode & S_ISGID)) {
3495 inode->i_gid = dir->i_gid;
3496 if (S_ISDIR(mode))
3497 mode |= S_ISGID;
3498 } else
3499 inode->i_gid = current_fsgid();
3500
3462 inode->i_mode = mode; 3501 inode->i_mode = mode;
3463 inode->i_ino = objectid; 3502 inode->i_ino = objectid;
3464 inode_set_bytes(inode, 0); 3503 inode_set_bytes(inode, 0);
@@ -3586,7 +3625,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3586 if (IS_ERR(inode)) 3625 if (IS_ERR(inode))
3587 goto out_unlock; 3626 goto out_unlock;
3588 3627
3589 err = btrfs_init_acl(inode, dir); 3628 err = btrfs_init_inode_security(inode, dir);
3590 if (err) { 3629 if (err) {
3591 drop_inode = 1; 3630 drop_inode = 1;
3592 goto out_unlock; 3631 goto out_unlock;
@@ -3649,7 +3688,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
3649 if (IS_ERR(inode)) 3688 if (IS_ERR(inode))
3650 goto out_unlock; 3689 goto out_unlock;
3651 3690
3652 err = btrfs_init_acl(inode, dir); 3691 err = btrfs_init_inode_security(inode, dir);
3653 if (err) { 3692 if (err) {
3654 drop_inode = 1; 3693 drop_inode = 1;
3655 goto out_unlock; 3694 goto out_unlock;
@@ -3772,7 +3811,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3772 3811
3773 drop_on_err = 1; 3812 drop_on_err = 1;
3774 3813
3775 err = btrfs_init_acl(inode, dir); 3814 err = btrfs_init_inode_security(inode, dir);
3776 if (err) 3815 if (err)
3777 goto out_fail; 3816 goto out_fail;
3778 3817
@@ -4158,9 +4197,10 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4158 return -EINVAL; 4197 return -EINVAL;
4159} 4198}
4160 4199
4161static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock) 4200static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4201 __u64 start, __u64 len)
4162{ 4202{
4163 return extent_bmap(mapping, iblock, btrfs_get_extent); 4203 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4164} 4204}
4165 4205
4166int btrfs_readpage(struct file *file, struct page *page) 4206int btrfs_readpage(struct file *file, struct page *page)
@@ -4733,7 +4773,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4733 if (IS_ERR(inode)) 4773 if (IS_ERR(inode))
4734 goto out_unlock; 4774 goto out_unlock;
4735 4775
4736 err = btrfs_init_acl(inode, dir); 4776 err = btrfs_init_inode_security(inode, dir);
4737 if (err) { 4777 if (err) {
4738 drop_inode = 1; 4778 drop_inode = 1;
4739 goto out_unlock; 4779 goto out_unlock;
@@ -4987,13 +5027,24 @@ static struct extent_io_ops btrfs_extent_io_ops = {
4987 .clear_bit_hook = btrfs_clear_bit_hook, 5027 .clear_bit_hook = btrfs_clear_bit_hook,
4988}; 5028};
4989 5029
5030/*
5031 * btrfs doesn't support the bmap operation because swapfiles
5032 * use bmap to make a mapping of extents in the file. They assume
5033 * these extents won't change over the life of the file and they
5034 * use the bmap result to do IO directly to the drive.
5035 *
5036 * the btrfs bmap call would return logical addresses that aren't
5037 * suitable for IO and they also will change frequently as COW
5038 * operations happen. So, swapfile + btrfs == corruption.
5039 *
5040 * For now we're avoiding this by dropping bmap.
5041 */
4990static struct address_space_operations btrfs_aops = { 5042static struct address_space_operations btrfs_aops = {
4991 .readpage = btrfs_readpage, 5043 .readpage = btrfs_readpage,
4992 .writepage = btrfs_writepage, 5044 .writepage = btrfs_writepage,
4993 .writepages = btrfs_writepages, 5045 .writepages = btrfs_writepages,
4994 .readpages = btrfs_readpages, 5046 .readpages = btrfs_readpages,
4995 .sync_page = block_sync_page, 5047 .sync_page = block_sync_page,
4996 .bmap = btrfs_bmap,
4997 .direct_IO = btrfs_direct_IO, 5048 .direct_IO = btrfs_direct_IO,
4998 .invalidatepage = btrfs_invalidatepage, 5049 .invalidatepage = btrfs_invalidatepage,
4999 .releasepage = btrfs_releasepage, 5050 .releasepage = btrfs_releasepage,
@@ -5017,6 +5068,7 @@ static struct inode_operations btrfs_file_inode_operations = {
5017 .removexattr = btrfs_removexattr, 5068 .removexattr = btrfs_removexattr,
5018 .permission = btrfs_permission, 5069 .permission = btrfs_permission,
5019 .fallocate = btrfs_fallocate, 5070 .fallocate = btrfs_fallocate,
5071 .fiemap = btrfs_fiemap,
5020}; 5072};
5021static struct inode_operations btrfs_special_inode_operations = { 5073static struct inode_operations btrfs_special_inode_operations = {
5022 .getattr = btrfs_getattr, 5074 .getattr = btrfs_getattr,
@@ -5032,4 +5084,8 @@ static struct inode_operations btrfs_symlink_inode_operations = {
5032 .follow_link = page_follow_link_light, 5084 .follow_link = page_follow_link_light,
5033 .put_link = page_put_link, 5085 .put_link = page_put_link,
5034 .permission = btrfs_permission, 5086 .permission = btrfs_permission,
5087 .setxattr = btrfs_setxattr,
5088 .getxattr = btrfs_getxattr,
5089 .listxattr = btrfs_listxattr,
5090 .removexattr = btrfs_removexattr,
5035}; 5091};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c2aa33e3feb5..988fdc8b49eb 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -38,7 +38,6 @@
38#include <linux/compat.h> 38#include <linux/compat.h>
39#include <linux/bit_spinlock.h> 39#include <linux/bit_spinlock.h>
40#include <linux/security.h> 40#include <linux/security.h>
41#include <linux/version.h>
42#include <linux/xattr.h> 41#include <linux/xattr.h>
43#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
44#include "compat.h" 43#include "compat.h"
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 39bae7761db6..68fd9ccf1805 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -26,45 +26,215 @@
26#include "locking.h" 26#include "locking.h"
27 27
28/* 28/*
29 * locks the per buffer mutex in an extent buffer. This uses adaptive locks 29 * btrfs_header_level() isn't free, so don't call it when lockdep isn't
30 * and the spin is not tuned very extensively. The spinning does make a big 30 * on
31 * difference in almost every workload, but spinning for the right amount of
32 * time needs some help.
33 *
34 * In general, we want to spin as long as the lock holder is doing btree
35 * searches, and we should give up if they are in more expensive code.
36 */ 31 */
32#ifdef CONFIG_DEBUG_LOCK_ALLOC
33static inline void spin_nested(struct extent_buffer *eb)
34{
35 spin_lock_nested(&eb->lock, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
36}
37#else
38static inline void spin_nested(struct extent_buffer *eb)
39{
40 spin_lock(&eb->lock);
41}
42#endif
37 43
38int btrfs_tree_lock(struct extent_buffer *eb) 44/*
45 * Setting a lock to blocking will drop the spinlock and set the
46 * flag that forces other procs who want the lock to wait. After
47 * this you can safely schedule with the lock held.
48 */
49void btrfs_set_lock_blocking(struct extent_buffer *eb)
39{ 50{
40 int i; 51 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
52 set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
53 spin_unlock(&eb->lock);
54 }
55 /* exit with the spin lock released and the bit set */
56}
41 57
42 if (mutex_trylock(&eb->mutex)) 58/*
43 return 0; 59 * clearing the blocking flag will take the spinlock again.
60 * After this you can't safely schedule
61 */
62void btrfs_clear_lock_blocking(struct extent_buffer *eb)
63{
64 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
65 spin_nested(eb);
66 clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
67 smp_mb__after_clear_bit();
68 }
69 /* exit with the spin lock held */
70}
71
72/*
73 * unfortunately, many of the places that currently set a lock to blocking
74 * don't end up blocking for every long, and often they don't block
75 * at all. For a dbench 50 run, if we don't spin one the blocking bit
76 * at all, the context switch rate can jump up to 400,000/sec or more.
77 *
78 * So, we're still stuck with this crummy spin on the blocking bit,
79 * at least until the most common causes of the short blocks
80 * can be dealt with.
81 */
82static int btrfs_spin_on_block(struct extent_buffer *eb)
83{
84 int i;
44 for (i = 0; i < 512; i++) { 85 for (i = 0; i < 512; i++) {
45 cpu_relax(); 86 cpu_relax();
46 if (mutex_trylock(&eb->mutex)) 87 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
88 return 1;
89 if (need_resched())
90 break;
91 }
92 return 0;
93}
94
95/*
96 * This is somewhat different from trylock. It will take the
97 * spinlock but if it finds the lock is set to blocking, it will
98 * return without the lock held.
99 *
100 * returns 1 if it was able to take the lock and zero otherwise
101 *
102 * After this call, scheduling is not safe without first calling
103 * btrfs_set_lock_blocking()
104 */
105int btrfs_try_spin_lock(struct extent_buffer *eb)
106{
107 int i;
108
109 spin_nested(eb);
110 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
111 return 1;
112 spin_unlock(&eb->lock);
113
114 /* spin for a bit on the BLOCKING flag */
115 for (i = 0; i < 2; i++) {
116 if (!btrfs_spin_on_block(eb))
117 break;
118
119 spin_nested(eb);
120 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
121 return 1;
122 spin_unlock(&eb->lock);
123 }
124 return 0;
125}
126
127/*
128 * the autoremove wake function will return 0 if it tried to wake up
129 * a process that was already awake, which means that process won't
130 * count as an exclusive wakeup. The waitq code will continue waking
131 * procs until it finds one that was actually sleeping.
132 *
133 * For btrfs, this isn't quite what we want. We want a single proc
134 * to be notified that the lock is ready for taking. If that proc
135 * already happen to be awake, great, it will loop around and try for
136 * the lock.
137 *
138 * So, btrfs_wake_function always returns 1, even when the proc that we
139 * tried to wake up was already awake.
140 */
141static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
142 int sync, void *key)
143{
144 autoremove_wake_function(wait, mode, sync, key);
145 return 1;
146}
147
148/*
149 * returns with the extent buffer spinlocked.
150 *
151 * This will spin and/or wait as required to take the lock, and then
152 * return with the spinlock held.
153 *
154 * After this call, scheduling is not safe without first calling
155 * btrfs_set_lock_blocking()
156 */
157int btrfs_tree_lock(struct extent_buffer *eb)
158{
159 DEFINE_WAIT(wait);
160 wait.func = btrfs_wake_function;
161
162 while(1) {
163 spin_nested(eb);
164
165 /* nobody is blocking, exit with the spinlock held */
166 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
47 return 0; 167 return 0;
168
169 /*
170 * we have the spinlock, but the real owner is blocking.
171 * wait for them
172 */
173 spin_unlock(&eb->lock);
174
175 /*
176 * spin for a bit, and if the blocking flag goes away,
177 * loop around
178 */
179 if (btrfs_spin_on_block(eb))
180 continue;
181
182 prepare_to_wait_exclusive(&eb->lock_wq, &wait,
183 TASK_UNINTERRUPTIBLE);
184
185 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
186 schedule();
187
188 finish_wait(&eb->lock_wq, &wait);
48 } 189 }
49 cpu_relax();
50 mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
51 return 0; 190 return 0;
52} 191}
53 192
193/*
194 * Very quick trylock, this does not spin or schedule. It returns
195 * 1 with the spinlock held if it was able to take the lock, or it
196 * returns zero if it was unable to take the lock.
197 *
198 * After this call, scheduling is not safe without first calling
199 * btrfs_set_lock_blocking()
200 */
54int btrfs_try_tree_lock(struct extent_buffer *eb) 201int btrfs_try_tree_lock(struct extent_buffer *eb)
55{ 202{
56 return mutex_trylock(&eb->mutex); 203 if (spin_trylock(&eb->lock)) {
204 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
205 /*
206 * we've got the spinlock, but the real owner is
207 * blocking. Drop the spinlock and return failure
208 */
209 spin_unlock(&eb->lock);
210 return 0;
211 }
212 return 1;
213 }
214 /* someone else has the spinlock giveup */
215 return 0;
57} 216}
58 217
59int btrfs_tree_unlock(struct extent_buffer *eb) 218int btrfs_tree_unlock(struct extent_buffer *eb)
60{ 219{
61 mutex_unlock(&eb->mutex); 220 /*
221 * if we were a blocking owner, we don't have the spinlock held
222 * just clear the bit and look for waiters
223 */
224 if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
225 smp_mb__after_clear_bit();
226 else
227 spin_unlock(&eb->lock);
228
229 if (waitqueue_active(&eb->lock_wq))
230 wake_up(&eb->lock_wq);
62 return 0; 231 return 0;
63} 232}
64 233
65int btrfs_tree_locked(struct extent_buffer *eb) 234int btrfs_tree_locked(struct extent_buffer *eb)
66{ 235{
67 return mutex_is_locked(&eb->mutex); 236 return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
237 spin_is_locked(&eb->lock);
68} 238}
69 239
70/* 240/*
@@ -75,12 +245,14 @@ int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
75{ 245{
76 int i; 246 int i;
77 struct extent_buffer *eb; 247 struct extent_buffer *eb;
248
78 for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) { 249 for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
79 eb = path->nodes[i]; 250 eb = path->nodes[i];
80 if (!eb) 251 if (!eb)
81 break; 252 break;
82 smp_mb(); 253 smp_mb();
83 if (!list_empty(&eb->mutex.wait_list)) 254 if (spin_is_contended(&eb->lock) ||
255 waitqueue_active(&eb->lock_wq))
84 return 1; 256 return 1;
85 } 257 }
86 return 0; 258 return 0;
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index bc1faef12519..d92e707f5870 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -22,6 +22,12 @@
22int btrfs_tree_lock(struct extent_buffer *eb); 22int btrfs_tree_lock(struct extent_buffer *eb);
23int btrfs_tree_unlock(struct extent_buffer *eb); 23int btrfs_tree_unlock(struct extent_buffer *eb);
24int btrfs_tree_locked(struct extent_buffer *eb); 24int btrfs_tree_locked(struct extent_buffer *eb);
25
25int btrfs_try_tree_lock(struct extent_buffer *eb); 26int btrfs_try_tree_lock(struct extent_buffer *eb);
27int btrfs_try_spin_lock(struct extent_buffer *eb);
28
26int btrfs_path_lock_waiting(struct btrfs_path *path, int level); 29int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
30
31void btrfs_set_lock_blocking(struct extent_buffer *eb);
32void btrfs_clear_lock_blocking(struct extent_buffer *eb);
27#endif 33#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a20940170274..77c2411a5f0f 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -613,7 +613,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
613 struct btrfs_sector_sum *sector_sums; 613 struct btrfs_sector_sum *sector_sums;
614 struct btrfs_ordered_extent *ordered; 614 struct btrfs_ordered_extent *ordered;
615 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 615 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
616 struct list_head *cur;
617 unsigned long num_sectors; 616 unsigned long num_sectors;
618 unsigned long i; 617 unsigned long i;
619 u32 sectorsize = BTRFS_I(inode)->root->sectorsize; 618 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
@@ -624,8 +623,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
624 return 1; 623 return 1;
625 624
626 mutex_lock(&tree->mutex); 625 mutex_lock(&tree->mutex);
627 list_for_each_prev(cur, &ordered->list) { 626 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
628 ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list);
629 if (disk_bytenr >= ordered_sum->bytenr) { 627 if (disk_bytenr >= ordered_sum->bytenr) {
630 num_sectors = ordered_sum->len / sectorsize; 628 num_sectors = ordered_sum->len / sectorsize;
631 sector_sums = ordered_sum->sums; 629 sector_sums = ordered_sum->sums;
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
index 6f0acc4c9eab..d0cc62bccb94 100644
--- a/fs/btrfs/ref-cache.c
+++ b/fs/btrfs/ref-cache.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/sort.h>
20#include "ctree.h" 21#include "ctree.h"
21#include "ref-cache.h" 22#include "ref-cache.h"
22#include "transaction.h" 23#include "transaction.h"
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
index 16f3183d7c59..bc283ad2db73 100644
--- a/fs/btrfs/ref-cache.h
+++ b/fs/btrfs/ref-cache.h
@@ -73,5 +73,4 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
73int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, 73int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
74 int shared); 74 int shared);
75int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); 75int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
76
77#endif 76#endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index db9fb3bc1e33..f3fd7e2cbc38 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -37,7 +37,6 @@
37#include <linux/ctype.h> 37#include <linux/ctype.h>
38#include <linux/namei.h> 38#include <linux/namei.h>
39#include <linux/miscdevice.h> 39#include <linux/miscdevice.h>
40#include <linux/version.h>
41#include <linux/magic.h> 40#include <linux/magic.h>
42#include "compat.h" 41#include "compat.h"
43#include "ctree.h" 42#include "ctree.h"
@@ -583,17 +582,18 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
583 struct btrfs_ioctl_vol_args *vol; 582 struct btrfs_ioctl_vol_args *vol;
584 struct btrfs_fs_devices *fs_devices; 583 struct btrfs_fs_devices *fs_devices;
585 int ret = -ENOTTY; 584 int ret = -ENOTTY;
586 int len;
587 585
588 if (!capable(CAP_SYS_ADMIN)) 586 if (!capable(CAP_SYS_ADMIN))
589 return -EPERM; 587 return -EPERM;
590 588
591 vol = kmalloc(sizeof(*vol), GFP_KERNEL); 589 vol = kmalloc(sizeof(*vol), GFP_KERNEL);
590 if (!vol)
591 return -ENOMEM;
592
592 if (copy_from_user(vol, (void __user *)arg, sizeof(*vol))) { 593 if (copy_from_user(vol, (void __user *)arg, sizeof(*vol))) {
593 ret = -EFAULT; 594 ret = -EFAULT;
594 goto out; 595 goto out;
595 } 596 }
596 len = strnlen(vol->name, BTRFS_PATH_NAME_MAX);
597 597
598 switch (cmd) { 598 switch (cmd) {
599 case BTRFS_IOC_SCAN_DEV: 599 case BTRFS_IOC_SCAN_DEV:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8a08f9443340..919172de5c9a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -852,11 +852,9 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
852{ 852{
853 struct btrfs_pending_snapshot *pending; 853 struct btrfs_pending_snapshot *pending;
854 struct list_head *head = &trans->transaction->pending_snapshots; 854 struct list_head *head = &trans->transaction->pending_snapshots;
855 struct list_head *cur;
856 int ret; 855 int ret;
857 856
858 list_for_each(cur, head) { 857 list_for_each_entry(pending, head, list) {
859 pending = list_entry(cur, struct btrfs_pending_snapshot, list);
860 ret = create_pending_snapshot(trans, fs_info, pending); 858 ret = create_pending_snapshot(trans, fs_info, pending);
861 BUG_ON(ret); 859 BUG_ON(ret);
862 } 860 }
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 3e8358c36165..98d25fa4570e 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -74,6 +74,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
74 u32 nritems; 74 u32 nritems;
75 75
76 root_node = btrfs_lock_root_node(root); 76 root_node = btrfs_lock_root_node(root);
77 btrfs_set_lock_blocking(root_node);
77 nritems = btrfs_header_nritems(root_node); 78 nritems = btrfs_header_nritems(root_node);
78 root->defrag_max.objectid = 0; 79 root->defrag_max.objectid = 0;
79 /* from above we know this is not a leaf */ 80 /* from above we know this is not a leaf */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d81cda2e077c..20794290256b 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -78,104 +78,6 @@ static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
78 */ 78 */
79 79
80/* 80/*
81 * btrfs_add_log_tree adds a new per-subvolume log tree into the
82 * tree of log tree roots. This must be called with a tree log transaction
83 * running (see start_log_trans).
84 */
85static int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
86 struct btrfs_root *root)
87{
88 struct btrfs_key key;
89 struct btrfs_root_item root_item;
90 struct btrfs_inode_item *inode_item;
91 struct extent_buffer *leaf;
92 struct btrfs_root *new_root = root;
93 int ret;
94 u64 objectid = root->root_key.objectid;
95
96 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
97 BTRFS_TREE_LOG_OBJECTID,
98 trans->transid, 0, 0, 0);
99 if (IS_ERR(leaf)) {
100 ret = PTR_ERR(leaf);
101 return ret;
102 }
103
104 btrfs_set_header_nritems(leaf, 0);
105 btrfs_set_header_level(leaf, 0);
106 btrfs_set_header_bytenr(leaf, leaf->start);
107 btrfs_set_header_generation(leaf, trans->transid);
108 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
109
110 write_extent_buffer(leaf, root->fs_info->fsid,
111 (unsigned long)btrfs_header_fsid(leaf),
112 BTRFS_FSID_SIZE);
113 btrfs_mark_buffer_dirty(leaf);
114
115 inode_item = &root_item.inode;
116 memset(inode_item, 0, sizeof(*inode_item));
117 inode_item->generation = cpu_to_le64(1);
118 inode_item->size = cpu_to_le64(3);
119 inode_item->nlink = cpu_to_le32(1);
120 inode_item->nbytes = cpu_to_le64(root->leafsize);
121 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
122
123 btrfs_set_root_bytenr(&root_item, leaf->start);
124 btrfs_set_root_generation(&root_item, trans->transid);
125 btrfs_set_root_level(&root_item, 0);
126 btrfs_set_root_refs(&root_item, 0);
127 btrfs_set_root_used(&root_item, 0);
128
129 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
130 root_item.drop_level = 0;
131
132 btrfs_tree_unlock(leaf);
133 free_extent_buffer(leaf);
134 leaf = NULL;
135
136 btrfs_set_root_dirid(&root_item, 0);
137
138 key.objectid = BTRFS_TREE_LOG_OBJECTID;
139 key.offset = objectid;
140 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
141 ret = btrfs_insert_root(trans, root->fs_info->log_root_tree, &key,
142 &root_item);
143 if (ret)
144 goto fail;
145
146 new_root = btrfs_read_fs_root_no_radix(root->fs_info->log_root_tree,
147 &key);
148 BUG_ON(!new_root);
149
150 WARN_ON(root->log_root);
151 root->log_root = new_root;
152
153 /*
154 * log trees do not get reference counted because they go away
155 * before a real commit is actually done. They do store pointers
156 * to file data extents, and those reference counts still get
157 * updated (along with back refs to the log tree).
158 */
159 new_root->ref_cows = 0;
160 new_root->last_trans = trans->transid;
161
162 /*
163 * we need to make sure the root block for this new tree
164 * is marked as dirty in the dirty_log_pages tree. This
165 * is how it gets flushed down to disk at tree log commit time.
166 *
167 * the tree logging mutex keeps others from coming in and changing
168 * the new_root->node, so we can safely access it here
169 */
170 set_extent_dirty(&new_root->dirty_log_pages, new_root->node->start,
171 new_root->node->start + new_root->node->len - 1,
172 GFP_NOFS);
173
174fail:
175 return ret;
176}
177
178/*
179 * start a sub transaction and setup the log tree 81 * start a sub transaction and setup the log tree
180 * this increments the log tree writer count to make the people 82 * this increments the log tree writer count to make the people
181 * syncing the tree wait for us to finish 83 * syncing the tree wait for us to finish
@@ -184,6 +86,14 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
184 struct btrfs_root *root) 86 struct btrfs_root *root)
185{ 87{
186 int ret; 88 int ret;
89
90 mutex_lock(&root->log_mutex);
91 if (root->log_root) {
92 root->log_batch++;
93 atomic_inc(&root->log_writers);
94 mutex_unlock(&root->log_mutex);
95 return 0;
96 }
187 mutex_lock(&root->fs_info->tree_log_mutex); 97 mutex_lock(&root->fs_info->tree_log_mutex);
188 if (!root->fs_info->log_root_tree) { 98 if (!root->fs_info->log_root_tree) {
189 ret = btrfs_init_log_root_tree(trans, root->fs_info); 99 ret = btrfs_init_log_root_tree(trans, root->fs_info);
@@ -193,9 +103,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
193 ret = btrfs_add_log_tree(trans, root); 103 ret = btrfs_add_log_tree(trans, root);
194 BUG_ON(ret); 104 BUG_ON(ret);
195 } 105 }
196 atomic_inc(&root->fs_info->tree_log_writers);
197 root->fs_info->tree_log_batch++;
198 mutex_unlock(&root->fs_info->tree_log_mutex); 106 mutex_unlock(&root->fs_info->tree_log_mutex);
107 root->log_batch++;
108 atomic_inc(&root->log_writers);
109 mutex_unlock(&root->log_mutex);
199 return 0; 110 return 0;
200} 111}
201 112
@@ -212,13 +123,12 @@ static int join_running_log_trans(struct btrfs_root *root)
212 if (!root->log_root) 123 if (!root->log_root)
213 return -ENOENT; 124 return -ENOENT;
214 125
215 mutex_lock(&root->fs_info->tree_log_mutex); 126 mutex_lock(&root->log_mutex);
216 if (root->log_root) { 127 if (root->log_root) {
217 ret = 0; 128 ret = 0;
218 atomic_inc(&root->fs_info->tree_log_writers); 129 atomic_inc(&root->log_writers);
219 root->fs_info->tree_log_batch++;
220 } 130 }
221 mutex_unlock(&root->fs_info->tree_log_mutex); 131 mutex_unlock(&root->log_mutex);
222 return ret; 132 return ret;
223} 133}
224 134
@@ -228,10 +138,11 @@ static int join_running_log_trans(struct btrfs_root *root)
228 */ 138 */
229static int end_log_trans(struct btrfs_root *root) 139static int end_log_trans(struct btrfs_root *root)
230{ 140{
231 atomic_dec(&root->fs_info->tree_log_writers); 141 if (atomic_dec_and_test(&root->log_writers)) {
232 smp_mb(); 142 smp_mb();
233 if (waitqueue_active(&root->fs_info->tree_log_wait)) 143 if (waitqueue_active(&root->log_writer_wait))
234 wake_up(&root->fs_info->tree_log_wait); 144 wake_up(&root->log_writer_wait);
145 }
235 return 0; 146 return 0;
236} 147}
237 148
@@ -1704,6 +1615,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1704 1615
1705 btrfs_tree_lock(next); 1616 btrfs_tree_lock(next);
1706 clean_tree_block(trans, root, next); 1617 clean_tree_block(trans, root, next);
1618 btrfs_set_lock_blocking(next);
1707 btrfs_wait_tree_block_writeback(next); 1619 btrfs_wait_tree_block_writeback(next);
1708 btrfs_tree_unlock(next); 1620 btrfs_tree_unlock(next);
1709 1621
@@ -1750,6 +1662,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1750 next = path->nodes[*level]; 1662 next = path->nodes[*level];
1751 btrfs_tree_lock(next); 1663 btrfs_tree_lock(next);
1752 clean_tree_block(trans, root, next); 1664 clean_tree_block(trans, root, next);
1665 btrfs_set_lock_blocking(next);
1753 btrfs_wait_tree_block_writeback(next); 1666 btrfs_wait_tree_block_writeback(next);
1754 btrfs_tree_unlock(next); 1667 btrfs_tree_unlock(next);
1755 1668
@@ -1807,6 +1720,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1807 1720
1808 btrfs_tree_lock(next); 1721 btrfs_tree_lock(next);
1809 clean_tree_block(trans, root, next); 1722 clean_tree_block(trans, root, next);
1723 btrfs_set_lock_blocking(next);
1810 btrfs_wait_tree_block_writeback(next); 1724 btrfs_wait_tree_block_writeback(next);
1811 btrfs_tree_unlock(next); 1725 btrfs_tree_unlock(next);
1812 1726
@@ -1879,6 +1793,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1879 1793
1880 btrfs_tree_lock(next); 1794 btrfs_tree_lock(next);
1881 clean_tree_block(trans, log, next); 1795 clean_tree_block(trans, log, next);
1796 btrfs_set_lock_blocking(next);
1882 btrfs_wait_tree_block_writeback(next); 1797 btrfs_wait_tree_block_writeback(next);
1883 btrfs_tree_unlock(next); 1798 btrfs_tree_unlock(next);
1884 1799
@@ -1902,26 +1817,65 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1902 } 1817 }
1903 } 1818 }
1904 btrfs_free_path(path); 1819 btrfs_free_path(path);
1905 if (wc->free)
1906 free_extent_buffer(log->node);
1907 return ret; 1820 return ret;
1908} 1821}
1909 1822
1910static int wait_log_commit(struct btrfs_root *log) 1823/*
1824 * helper function to update the item for a given subvolumes log root
1825 * in the tree of log roots
1826 */
1827static int update_log_root(struct btrfs_trans_handle *trans,
1828 struct btrfs_root *log)
1829{
1830 int ret;
1831
1832 if (log->log_transid == 1) {
1833 /* insert root item on the first sync */
1834 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
1835 &log->root_key, &log->root_item);
1836 } else {
1837 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
1838 &log->root_key, &log->root_item);
1839 }
1840 return ret;
1841}
1842
1843static int wait_log_commit(struct btrfs_root *root, unsigned long transid)
1911{ 1844{
1912 DEFINE_WAIT(wait); 1845 DEFINE_WAIT(wait);
1913 u64 transid = log->fs_info->tree_log_transid; 1846 int index = transid % 2;
1914 1847
1848 /*
1849 * we only allow two pending log transactions at a time,
1850 * so we know that if ours is more than 2 older than the
1851 * current transaction, we're done
1852 */
1915 do { 1853 do {
1916 prepare_to_wait(&log->fs_info->tree_log_wait, &wait, 1854 prepare_to_wait(&root->log_commit_wait[index],
1917 TASK_UNINTERRUPTIBLE); 1855 &wait, TASK_UNINTERRUPTIBLE);
1918 mutex_unlock(&log->fs_info->tree_log_mutex); 1856 mutex_unlock(&root->log_mutex);
1919 if (atomic_read(&log->fs_info->tree_log_commit)) 1857 if (root->log_transid < transid + 2 &&
1858 atomic_read(&root->log_commit[index]))
1920 schedule(); 1859 schedule();
1921 finish_wait(&log->fs_info->tree_log_wait, &wait); 1860 finish_wait(&root->log_commit_wait[index], &wait);
1922 mutex_lock(&log->fs_info->tree_log_mutex); 1861 mutex_lock(&root->log_mutex);
1923 } while (transid == log->fs_info->tree_log_transid && 1862 } while (root->log_transid < transid + 2 &&
1924 atomic_read(&log->fs_info->tree_log_commit)); 1863 atomic_read(&root->log_commit[index]));
1864 return 0;
1865}
1866
1867static int wait_for_writer(struct btrfs_root *root)
1868{
1869 DEFINE_WAIT(wait);
1870 while (atomic_read(&root->log_writers)) {
1871 prepare_to_wait(&root->log_writer_wait,
1872 &wait, TASK_UNINTERRUPTIBLE);
1873 mutex_unlock(&root->log_mutex);
1874 if (atomic_read(&root->log_writers))
1875 schedule();
1876 mutex_lock(&root->log_mutex);
1877 finish_wait(&root->log_writer_wait, &wait);
1878 }
1925 return 0; 1879 return 0;
1926} 1880}
1927 1881
@@ -1933,57 +1887,114 @@ static int wait_log_commit(struct btrfs_root *log)
1933int btrfs_sync_log(struct btrfs_trans_handle *trans, 1887int btrfs_sync_log(struct btrfs_trans_handle *trans,
1934 struct btrfs_root *root) 1888 struct btrfs_root *root)
1935{ 1889{
1890 int index1;
1891 int index2;
1936 int ret; 1892 int ret;
1937 unsigned long batch;
1938 struct btrfs_root *log = root->log_root; 1893 struct btrfs_root *log = root->log_root;
1894 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
1939 1895
1940 mutex_lock(&log->fs_info->tree_log_mutex); 1896 mutex_lock(&root->log_mutex);
1941 if (atomic_read(&log->fs_info->tree_log_commit)) { 1897 index1 = root->log_transid % 2;
1942 wait_log_commit(log); 1898 if (atomic_read(&root->log_commit[index1])) {
1943 goto out; 1899 wait_log_commit(root, root->log_transid);
1900 mutex_unlock(&root->log_mutex);
1901 return 0;
1944 } 1902 }
1945 atomic_set(&log->fs_info->tree_log_commit, 1); 1903 atomic_set(&root->log_commit[index1], 1);
1904
1905 /* wait for previous tree log sync to complete */
1906 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
1907 wait_log_commit(root, root->log_transid - 1);
1946 1908
1947 while (1) { 1909 while (1) {
1948 batch = log->fs_info->tree_log_batch; 1910 unsigned long batch = root->log_batch;
1949 mutex_unlock(&log->fs_info->tree_log_mutex); 1911 mutex_unlock(&root->log_mutex);
1950 schedule_timeout_uninterruptible(1); 1912 schedule_timeout_uninterruptible(1);
1951 mutex_lock(&log->fs_info->tree_log_mutex); 1913 mutex_lock(&root->log_mutex);
1952 1914 wait_for_writer(root);
1953 while (atomic_read(&log->fs_info->tree_log_writers)) { 1915 if (batch == root->log_batch)
1954 DEFINE_WAIT(wait);
1955 prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
1956 TASK_UNINTERRUPTIBLE);
1957 mutex_unlock(&log->fs_info->tree_log_mutex);
1958 if (atomic_read(&log->fs_info->tree_log_writers))
1959 schedule();
1960 mutex_lock(&log->fs_info->tree_log_mutex);
1961 finish_wait(&log->fs_info->tree_log_wait, &wait);
1962 }
1963 if (batch == log->fs_info->tree_log_batch)
1964 break; 1916 break;
1965 } 1917 }
1966 1918
1967 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages); 1919 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages);
1968 BUG_ON(ret); 1920 BUG_ON(ret);
1969 ret = btrfs_write_and_wait_marked_extents(root->fs_info->log_root_tree, 1921
1970 &root->fs_info->log_root_tree->dirty_log_pages); 1922 btrfs_set_root_bytenr(&log->root_item, log->node->start);
1923 btrfs_set_root_generation(&log->root_item, trans->transid);
1924 btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
1925
1926 root->log_batch = 0;
1927 root->log_transid++;
1928 log->log_transid = root->log_transid;
1929 smp_mb();
1930 /*
1931 * log tree has been flushed to disk, new modifications of
1932 * the log will be written to new positions. so it's safe to
1933 * allow log writers to go in.
1934 */
1935 mutex_unlock(&root->log_mutex);
1936
1937 mutex_lock(&log_root_tree->log_mutex);
1938 log_root_tree->log_batch++;
1939 atomic_inc(&log_root_tree->log_writers);
1940 mutex_unlock(&log_root_tree->log_mutex);
1941
1942 ret = update_log_root(trans, log);
1943 BUG_ON(ret);
1944
1945 mutex_lock(&log_root_tree->log_mutex);
1946 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
1947 smp_mb();
1948 if (waitqueue_active(&log_root_tree->log_writer_wait))
1949 wake_up(&log_root_tree->log_writer_wait);
1950 }
1951
1952 index2 = log_root_tree->log_transid % 2;
1953 if (atomic_read(&log_root_tree->log_commit[index2])) {
1954 wait_log_commit(log_root_tree, log_root_tree->log_transid);
1955 mutex_unlock(&log_root_tree->log_mutex);
1956 goto out;
1957 }
1958 atomic_set(&log_root_tree->log_commit[index2], 1);
1959
1960 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2]))
1961 wait_log_commit(log_root_tree, log_root_tree->log_transid - 1);
1962
1963 wait_for_writer(log_root_tree);
1964
1965 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
1966 &log_root_tree->dirty_log_pages);
1971 BUG_ON(ret); 1967 BUG_ON(ret);
1972 1968
1973 btrfs_set_super_log_root(&root->fs_info->super_for_commit, 1969 btrfs_set_super_log_root(&root->fs_info->super_for_commit,
1974 log->fs_info->log_root_tree->node->start); 1970 log_root_tree->node->start);
1975 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit, 1971 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
1976 btrfs_header_level(log->fs_info->log_root_tree->node)); 1972 btrfs_header_level(log_root_tree->node));
1973
1974 log_root_tree->log_batch = 0;
1975 log_root_tree->log_transid++;
1976 smp_mb();
1977
1978 mutex_unlock(&log_root_tree->log_mutex);
1979
1980 /*
1981 * nobody else is going to jump in and write the the ctree
1982 * super here because the log_commit atomic below is protecting
1983 * us. We must be called with a transaction handle pinning
1984 * the running transaction open, so a full commit can't hop
1985 * in and cause problems either.
1986 */
1987 write_ctree_super(trans, root->fs_info->tree_root, 2);
1977 1988
1978 write_ctree_super(trans, log->fs_info->tree_root, 2); 1989 atomic_set(&log_root_tree->log_commit[index2], 0);
1979 log->fs_info->tree_log_transid++;
1980 log->fs_info->tree_log_batch = 0;
1981 atomic_set(&log->fs_info->tree_log_commit, 0);
1982 smp_mb(); 1990 smp_mb();
1983 if (waitqueue_active(&log->fs_info->tree_log_wait)) 1991 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
1984 wake_up(&log->fs_info->tree_log_wait); 1992 wake_up(&log_root_tree->log_commit_wait[index2]);
1985out: 1993out:
1986 mutex_unlock(&log->fs_info->tree_log_mutex); 1994 atomic_set(&root->log_commit[index1], 0);
1995 smp_mb();
1996 if (waitqueue_active(&root->log_commit_wait[index1]))
1997 wake_up(&root->log_commit_wait[index1]);
1987 return 0; 1998 return 0;
1988} 1999}
1989 2000
@@ -2019,38 +2030,18 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2019 start, end, GFP_NOFS); 2030 start, end, GFP_NOFS);
2020 } 2031 }
2021 2032
2022 log = root->log_root; 2033 if (log->log_transid > 0) {
2023 ret = btrfs_del_root(trans, root->fs_info->log_root_tree, 2034 ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
2024 &log->root_key); 2035 &log->root_key);
2025 BUG_ON(ret); 2036 BUG_ON(ret);
2037 }
2026 root->log_root = NULL; 2038 root->log_root = NULL;
2027 kfree(root->log_root); 2039 free_extent_buffer(log->node);
2040 kfree(log);
2028 return 0; 2041 return 0;
2029} 2042}
2030 2043
2031/* 2044/*
2032 * helper function to update the item for a given subvolumes log root
2033 * in the tree of log roots
2034 */
2035static int update_log_root(struct btrfs_trans_handle *trans,
2036 struct btrfs_root *log)
2037{
2038 u64 bytenr = btrfs_root_bytenr(&log->root_item);
2039 int ret;
2040
2041 if (log->node->start == bytenr)
2042 return 0;
2043
2044 btrfs_set_root_bytenr(&log->root_item, log->node->start);
2045 btrfs_set_root_generation(&log->root_item, trans->transid);
2046 btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
2047 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2048 &log->root_key, &log->root_item);
2049 BUG_ON(ret);
2050 return ret;
2051}
2052
2053/*
2054 * If both a file and directory are logged, and unlinks or renames are 2045 * If both a file and directory are logged, and unlinks or renames are
2055 * mixed in, we have a few interesting corners: 2046 * mixed in, we have a few interesting corners:
2056 * 2047 *
@@ -2711,11 +2702,6 @@ next_slot:
2711 2702
2712 btrfs_free_path(path); 2703 btrfs_free_path(path);
2713 btrfs_free_path(dst_path); 2704 btrfs_free_path(dst_path);
2714
2715 mutex_lock(&root->fs_info->tree_log_mutex);
2716 ret = update_log_root(trans, log);
2717 BUG_ON(ret);
2718 mutex_unlock(&root->fs_info->tree_log_mutex);
2719out: 2705out:
2720 return 0; 2706 return 0;
2721} 2707}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3451e1cca2b5..bcd14ebccae1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -20,7 +20,6 @@
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/version.h>
24#include <asm/div64.h> 23#include <asm/div64.h>
25#include "compat.h" 24#include "compat.h"
26#include "ctree.h" 25#include "ctree.h"
@@ -104,10 +103,8 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
104 u64 devid, u8 *uuid) 103 u64 devid, u8 *uuid)
105{ 104{
106 struct btrfs_device *dev; 105 struct btrfs_device *dev;
107 struct list_head *cur;
108 106
109 list_for_each(cur, head) { 107 list_for_each_entry(dev, head, dev_list) {
110 dev = list_entry(cur, struct btrfs_device, dev_list);
111 if (dev->devid == devid && 108 if (dev->devid == devid &&
112 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 109 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
113 return dev; 110 return dev;
@@ -118,11 +115,9 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
118 115
119static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 116static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120{ 117{
121 struct list_head *cur;
122 struct btrfs_fs_devices *fs_devices; 118 struct btrfs_fs_devices *fs_devices;
123 119
124 list_for_each(cur, &fs_uuids) { 120 list_for_each_entry(fs_devices, &fs_uuids, list) {
125 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
126 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 121 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
127 return fs_devices; 122 return fs_devices;
128 } 123 }
@@ -159,6 +154,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
159loop: 154loop:
160 spin_lock(&device->io_lock); 155 spin_lock(&device->io_lock);
161 156
157loop_lock:
162 /* take all the bios off the list at once and process them 158 /* take all the bios off the list at once and process them
163 * later on (without the lock held). But, remember the 159 * later on (without the lock held). But, remember the
164 * tail and other pointers so the bios can be properly reinserted 160 * tail and other pointers so the bios can be properly reinserted
@@ -208,7 +204,7 @@ loop:
208 * is now congested. Back off and let other work structs 204 * is now congested. Back off and let other work structs
209 * run instead 205 * run instead
210 */ 206 */
211 if (pending && bdi_write_congested(bdi) && 207 if (pending && bdi_write_congested(bdi) && num_run > 16 &&
212 fs_info->fs_devices->open_devices > 1) { 208 fs_info->fs_devices->open_devices > 1) {
213 struct bio *old_head; 209 struct bio *old_head;
214 210
@@ -220,7 +216,8 @@ loop:
220 tail->bi_next = old_head; 216 tail->bi_next = old_head;
221 else 217 else
222 device->pending_bio_tail = tail; 218 device->pending_bio_tail = tail;
223 device->running_pending = 0; 219
220 device->running_pending = 1;
224 221
225 spin_unlock(&device->io_lock); 222 spin_unlock(&device->io_lock);
226 btrfs_requeue_work(&device->work); 223 btrfs_requeue_work(&device->work);
@@ -229,6 +226,11 @@ loop:
229 } 226 }
230 if (again) 227 if (again)
231 goto loop; 228 goto loop;
229
230 spin_lock(&device->io_lock);
231 if (device->pending_bios)
232 goto loop_lock;
233 spin_unlock(&device->io_lock);
232done: 234done:
233 return 0; 235 return 0;
234} 236}
@@ -345,14 +347,11 @@ error:
345 347
346int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) 348int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
347{ 349{
348 struct list_head *tmp; 350 struct btrfs_device *device, *next;
349 struct list_head *cur;
350 struct btrfs_device *device;
351 351
352 mutex_lock(&uuid_mutex); 352 mutex_lock(&uuid_mutex);
353again: 353again:
354 list_for_each_safe(cur, tmp, &fs_devices->devices) { 354 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
355 device = list_entry(cur, struct btrfs_device, dev_list);
356 if (device->in_fs_metadata) 355 if (device->in_fs_metadata)
357 continue; 356 continue;
358 357
@@ -383,14 +382,12 @@ again:
383 382
384static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 383static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
385{ 384{
386 struct list_head *cur;
387 struct btrfs_device *device; 385 struct btrfs_device *device;
388 386
389 if (--fs_devices->opened > 0) 387 if (--fs_devices->opened > 0)
390 return 0; 388 return 0;
391 389
392 list_for_each(cur, &fs_devices->devices) { 390 list_for_each_entry(device, &fs_devices->devices, dev_list) {
393 device = list_entry(cur, struct btrfs_device, dev_list);
394 if (device->bdev) { 391 if (device->bdev) {
395 close_bdev_exclusive(device->bdev, device->mode); 392 close_bdev_exclusive(device->bdev, device->mode);
396 fs_devices->open_devices--; 393 fs_devices->open_devices--;
@@ -439,7 +436,6 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
439{ 436{
440 struct block_device *bdev; 437 struct block_device *bdev;
441 struct list_head *head = &fs_devices->devices; 438 struct list_head *head = &fs_devices->devices;
442 struct list_head *cur;
443 struct btrfs_device *device; 439 struct btrfs_device *device;
444 struct block_device *latest_bdev = NULL; 440 struct block_device *latest_bdev = NULL;
445 struct buffer_head *bh; 441 struct buffer_head *bh;
@@ -450,8 +446,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
450 int seeding = 1; 446 int seeding = 1;
451 int ret = 0; 447 int ret = 0;
452 448
453 list_for_each(cur, head) { 449 list_for_each_entry(device, head, dev_list) {
454 device = list_entry(cur, struct btrfs_device, dev_list);
455 if (device->bdev) 450 if (device->bdev)
456 continue; 451 continue;
457 if (!device->name) 452 if (!device->name)
@@ -578,7 +573,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
578 *(unsigned long long *)disk_super->fsid, 573 *(unsigned long long *)disk_super->fsid,
579 *(unsigned long long *)(disk_super->fsid + 8)); 574 *(unsigned long long *)(disk_super->fsid + 8));
580 } 575 }
581 printk(KERN_INFO "devid %llu transid %llu %s\n", 576 printk(KERN_CONT "devid %llu transid %llu %s\n",
582 (unsigned long long)devid, (unsigned long long)transid, path); 577 (unsigned long long)devid, (unsigned long long)transid, path);
583 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 578 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
584 579
@@ -1017,14 +1012,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1017 } 1012 }
1018 1013
1019 if (strcmp(device_path, "missing") == 0) { 1014 if (strcmp(device_path, "missing") == 0) {
1020 struct list_head *cur;
1021 struct list_head *devices; 1015 struct list_head *devices;
1022 struct btrfs_device *tmp; 1016 struct btrfs_device *tmp;
1023 1017
1024 device = NULL; 1018 device = NULL;
1025 devices = &root->fs_info->fs_devices->devices; 1019 devices = &root->fs_info->fs_devices->devices;
1026 list_for_each(cur, devices) { 1020 list_for_each_entry(tmp, devices, dev_list) {
1027 tmp = list_entry(cur, struct btrfs_device, dev_list);
1028 if (tmp->in_fs_metadata && !tmp->bdev) { 1021 if (tmp->in_fs_metadata && !tmp->bdev) {
1029 device = tmp; 1022 device = tmp;
1030 break; 1023 break;
@@ -1280,7 +1273,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1280 struct btrfs_trans_handle *trans; 1273 struct btrfs_trans_handle *trans;
1281 struct btrfs_device *device; 1274 struct btrfs_device *device;
1282 struct block_device *bdev; 1275 struct block_device *bdev;
1283 struct list_head *cur;
1284 struct list_head *devices; 1276 struct list_head *devices;
1285 struct super_block *sb = root->fs_info->sb; 1277 struct super_block *sb = root->fs_info->sb;
1286 u64 total_bytes; 1278 u64 total_bytes;
@@ -1304,8 +1296,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1304 mutex_lock(&root->fs_info->volume_mutex); 1296 mutex_lock(&root->fs_info->volume_mutex);
1305 1297
1306 devices = &root->fs_info->fs_devices->devices; 1298 devices = &root->fs_info->fs_devices->devices;
1307 list_for_each(cur, devices) { 1299 list_for_each_entry(device, devices, dev_list) {
1308 device = list_entry(cur, struct btrfs_device, dev_list);
1309 if (device->bdev == bdev) { 1300 if (device->bdev == bdev) {
1310 ret = -EEXIST; 1301 ret = -EEXIST;
1311 goto error; 1302 goto error;
@@ -1704,7 +1695,6 @@ static u64 div_factor(u64 num, int factor)
1704int btrfs_balance(struct btrfs_root *dev_root) 1695int btrfs_balance(struct btrfs_root *dev_root)
1705{ 1696{
1706 int ret; 1697 int ret;
1707 struct list_head *cur;
1708 struct list_head *devices = &dev_root->fs_info->fs_devices->devices; 1698 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1709 struct btrfs_device *device; 1699 struct btrfs_device *device;
1710 u64 old_size; 1700 u64 old_size;
@@ -1723,8 +1713,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
1723 dev_root = dev_root->fs_info->dev_root; 1713 dev_root = dev_root->fs_info->dev_root;
1724 1714
1725 /* step one make some room on all the devices */ 1715 /* step one make some room on all the devices */
1726 list_for_each(cur, devices) { 1716 list_for_each_entry(device, devices, dev_list) {
1727 device = list_entry(cur, struct btrfs_device, dev_list);
1728 old_size = device->total_bytes; 1717 old_size = device->total_bytes;
1729 size_to_free = div_factor(old_size, 1); 1718 size_to_free = div_factor(old_size, 1);
1730 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); 1719 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 7f332e270894..a9d3bf4d2689 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/rwsem.h> 22#include <linux/rwsem.h>
23#include <linux/xattr.h> 23#include <linux/xattr.h>
24#include <linux/security.h>
24#include "ctree.h" 25#include "ctree.h"
25#include "btrfs_inode.h" 26#include "btrfs_inode.h"
26#include "transaction.h" 27#include "transaction.h"
@@ -45,9 +46,12 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
45 /* lookup the xattr by name */ 46 /* lookup the xattr by name */
46 di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, 47 di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
47 strlen(name), 0); 48 strlen(name), 0);
48 if (!di || IS_ERR(di)) { 49 if (!di) {
49 ret = -ENODATA; 50 ret = -ENODATA;
50 goto out; 51 goto out;
52 } else if (IS_ERR(di)) {
53 ret = PTR_ERR(di);
54 goto out;
51 } 55 }
52 56
53 leaf = path->nodes[0]; 57 leaf = path->nodes[0];
@@ -62,6 +66,14 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
62 ret = -ERANGE; 66 ret = -ERANGE;
63 goto out; 67 goto out;
64 } 68 }
69
70 /*
71 * The way things are packed into the leaf is like this
72 * |struct btrfs_dir_item|name|data|
73 * where name is the xattr name, so security.foo, and data is the
74 * content of the xattr. data_ptr points to the location in memory
75 * where the data starts in the in memory leaf
76 */
65 data_ptr = (unsigned long)((char *)(di + 1) + 77 data_ptr = (unsigned long)((char *)(di + 1) +
66 btrfs_dir_name_len(leaf, di)); 78 btrfs_dir_name_len(leaf, di));
67 read_extent_buffer(leaf, buffer, data_ptr, 79 read_extent_buffer(leaf, buffer, data_ptr,
@@ -86,7 +98,7 @@ int __btrfs_setxattr(struct inode *inode, const char *name,
86 if (!path) 98 if (!path)
87 return -ENOMEM; 99 return -ENOMEM;
88 100
89 trans = btrfs_start_transaction(root, 1); 101 trans = btrfs_join_transaction(root, 1);
90 btrfs_set_trans_block_group(trans, inode); 102 btrfs_set_trans_block_group(trans, inode);
91 103
92 /* first lets see if we already have this xattr */ 104 /* first lets see if we already have this xattr */
@@ -176,7 +188,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
176 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 188 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
177 if (ret < 0) 189 if (ret < 0)
178 goto err; 190 goto err;
179 ret = 0;
180 advance = 0; 191 advance = 0;
181 while (1) { 192 while (1) {
182 leaf = path->nodes[0]; 193 leaf = path->nodes[0];
@@ -320,3 +331,34 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
320 return -EOPNOTSUPP; 331 return -EOPNOTSUPP;
321 return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); 332 return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
322} 333}
334
335int btrfs_xattr_security_init(struct inode *inode, struct inode *dir)
336{
337 int err;
338 size_t len;
339 void *value;
340 char *suffix;
341 char *name;
342
343 err = security_inode_init_security(inode, dir, &suffix, &value, &len);
344 if (err) {
345 if (err == -EOPNOTSUPP)
346 return 0;
347 return err;
348 }
349
350 name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(suffix) + 1,
351 GFP_NOFS);
352 if (!name) {
353 err = -ENOMEM;
354 } else {
355 strcpy(name, XATTR_SECURITY_PREFIX);
356 strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
357 err = __btrfs_setxattr(inode, name, value, len, 0);
358 kfree(name);
359 }
360
361 kfree(suffix);
362 kfree(value);
363 return err;
364}
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 5b1d08f8e68d..c71e9c3cf3f7 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -36,4 +36,6 @@ extern int btrfs_setxattr(struct dentry *dentry, const char *name,
36 const void *value, size_t size, int flags); 36 const void *value, size_t size, int flags);
37extern int btrfs_removexattr(struct dentry *dentry, const char *name); 37extern int btrfs_removexattr(struct dentry *dentry, const char *name);
38 38
39extern int btrfs_xattr_security_init(struct inode *inode, struct inode *dir);
40
39#endif /* __XATTR__ */ 41#endif /* __XATTR__ */