aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@redhat.com>2008-10-29 14:49:05 -0400
committerChris Mason <chris.mason@oracle.com>2008-10-29 14:49:05 -0400
commit2517920135b0d29e70453e5b03d70d7b94207df3 (patch)
treee0c526faa5c2c7bc3add340e5b7e8df26924dca9
parent80eb234af09dbe6c97b2e3d60a13ec391e98fbba (diff)
Btrfs: nuke fs wide allocation mutex V2
This patch removes the giant fs_info->alloc_mutex and replaces it with a bunch of little locks. There is now a pinned_mutex, which is used when messing with the pinned_extents extent io tree, and the extent_ins_mutex which is used with the pending_del and extent_ins extent io trees. The locking for the extent tree stuff was inspired by a patch that Yan Zheng wrote to fix a race condition, I cleaned it up some and changed the locking around a little bit, but the idea remains the same. Basically instead of holding the extent_ins_mutex throughout the processing of an extent on the extent_ins or pending_del trees, we just hold it while we're searching and when we clear the bits on those trees, and lock the extent for the duration of the operations on the extent. Also to keep from getting hung up waiting to lock an extent, I've added a try_lock_extent so if we cannot lock the extent, move on to the next one in the tree and we'll come back to that one. I have tested this heavily and it does not appear to break anything. This has to be applied on top of my find_free_extent redo patch. I tested this patch on top of Yan's space reblancing code and it worked fine. The only thing that has changed since the last version is I pulled out all my debugging stuff, apparently I forgot to run guilt refresh before I sent the last patch out. Thank you, Signed-off-by: Josef Bacik <jbacik@redhat.com>
-rw-r--r--fs/btrfs/ctree.c3
-rw-r--r--fs/btrfs/ctree.h8
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent-tree.c333
-rw-r--r--fs/btrfs/extent_io.c14
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/free-space-cache.c92
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/tree-defrag.c3
-rw-r--r--fs/btrfs/tree-log.c4
-rw-r--r--fs/btrfs/volumes.c2
11 files changed, 250 insertions, 216 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 73899d0f9d8f..f82f8db02275 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1387,8 +1387,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1387 lowest_level = p->lowest_level; 1387 lowest_level = p->lowest_level;
1388 WARN_ON(lowest_level && ins_len > 0); 1388 WARN_ON(lowest_level && ins_len > 0);
1389 WARN_ON(p->nodes[0] != NULL); 1389 WARN_ON(p->nodes[0] != NULL);
1390 WARN_ON(cow && root == root->fs_info->extent_root && 1390
1391 !mutex_is_locked(&root->fs_info->alloc_mutex));
1392 if (ins_len < 0) 1391 if (ins_len < 0)
1393 lowest_unlock = 2; 1392 lowest_unlock = 2;
1394 1393
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index caa860a1c3e5..fdba4f1b634e 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -558,6 +558,7 @@ struct btrfs_block_group_cache {
558 struct btrfs_key key; 558 struct btrfs_key key;
559 struct btrfs_block_group_item item; 559 struct btrfs_block_group_item item;
560 spinlock_t lock; 560 spinlock_t lock;
561 struct mutex alloc_mutex;
561 u64 pinned; 562 u64 pinned;
562 u64 reserved; 563 u64 reserved;
563 u64 flags; 564 u64 flags;
@@ -635,7 +636,8 @@ struct btrfs_fs_info {
635 struct mutex tree_log_mutex; 636 struct mutex tree_log_mutex;
636 struct mutex transaction_kthread_mutex; 637 struct mutex transaction_kthread_mutex;
637 struct mutex cleaner_mutex; 638 struct mutex cleaner_mutex;
638 struct mutex alloc_mutex; 639 struct mutex extent_ins_mutex;
640 struct mutex pinned_mutex;
639 struct mutex chunk_mutex; 641 struct mutex chunk_mutex;
640 struct mutex drop_mutex; 642 struct mutex drop_mutex;
641 struct mutex volume_mutex; 643 struct mutex volume_mutex;
@@ -1941,8 +1943,12 @@ int btrfs_acl_chmod(struct inode *inode);
1941/* free-space-cache.c */ 1943/* free-space-cache.c */
1942int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 1944int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1943 u64 bytenr, u64 size); 1945 u64 bytenr, u64 size);
1946int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
1947 u64 offset, u64 bytes);
1944int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 1948int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1945 u64 bytenr, u64 size); 1949 u64 bytenr, u64 size);
1950int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
1951 u64 offset, u64 bytes);
1946void btrfs_remove_free_space_cache(struct btrfs_block_group_cache 1952void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
1947 *block_group); 1953 *block_group);
1948struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache 1954struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 796256440dfa..d1137d7ea8d4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1460,7 +1460,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1460 mutex_init(&fs_info->trans_mutex); 1460 mutex_init(&fs_info->trans_mutex);
1461 mutex_init(&fs_info->tree_log_mutex); 1461 mutex_init(&fs_info->tree_log_mutex);
1462 mutex_init(&fs_info->drop_mutex); 1462 mutex_init(&fs_info->drop_mutex);
1463 mutex_init(&fs_info->alloc_mutex); 1463 mutex_init(&fs_info->extent_ins_mutex);
1464 mutex_init(&fs_info->pinned_mutex);
1464 mutex_init(&fs_info->chunk_mutex); 1465 mutex_init(&fs_info->chunk_mutex);
1465 mutex_init(&fs_info->transaction_kthread_mutex); 1466 mutex_init(&fs_info->transaction_kthread_mutex);
1466 mutex_init(&fs_info->cleaner_mutex); 1467 mutex_init(&fs_info->cleaner_mutex);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e3b3e13a4817..564260872c7e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -53,24 +53,6 @@ __btrfs_find_block_group(struct btrfs_root *root,
53 struct btrfs_block_group_cache *hint, 53 struct btrfs_block_group_cache *hint,
54 u64 search_start, int data, int owner); 54 u64 search_start, int data, int owner);
55 55
56void maybe_lock_mutex(struct btrfs_root *root)
57{
58 if (root != root->fs_info->extent_root &&
59 root != root->fs_info->chunk_root &&
60 root != root->fs_info->dev_root) {
61 mutex_lock(&root->fs_info->alloc_mutex);
62 }
63}
64
65void maybe_unlock_mutex(struct btrfs_root *root)
66{
67 if (root != root->fs_info->extent_root &&
68 root != root->fs_info->chunk_root &&
69 root != root->fs_info->dev_root) {
70 mutex_unlock(&root->fs_info->alloc_mutex);
71 }
72}
73
74static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 56static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
75{ 57{
76 return (cache->flags & bits) == bits; 58 return (cache->flags & bits) == bits;
@@ -164,6 +146,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
164 u64 extent_start, extent_end, size; 146 u64 extent_start, extent_end, size;
165 int ret; 147 int ret;
166 148
149 mutex_lock(&info->pinned_mutex);
167 while (start < end) { 150 while (start < end) {
168 ret = find_first_extent_bit(&info->pinned_extents, start, 151 ret = find_first_extent_bit(&info->pinned_extents, start,
169 &extent_start, &extent_end, 152 &extent_start, &extent_end,
@@ -175,7 +158,8 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
175 start = extent_end + 1; 158 start = extent_end + 1;
176 } else if (extent_start > start && extent_start < end) { 159 } else if (extent_start > start && extent_start < end) {
177 size = extent_start - start; 160 size = extent_start - start;
178 ret = btrfs_add_free_space(block_group, start, size); 161 ret = btrfs_add_free_space_lock(block_group, start,
162 size);
179 BUG_ON(ret); 163 BUG_ON(ret);
180 start = extent_end + 1; 164 start = extent_end + 1;
181 } else { 165 } else {
@@ -185,9 +169,10 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
185 169
186 if (start < end) { 170 if (start < end) {
187 size = end - start; 171 size = end - start;
188 ret = btrfs_add_free_space(block_group, start, size); 172 ret = btrfs_add_free_space_lock(block_group, start, size);
189 BUG_ON(ret); 173 BUG_ON(ret);
190 } 174 }
175 mutex_unlock(&info->pinned_mutex);
191 176
192 return 0; 177 return 0;
193} 178}
@@ -445,13 +430,11 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
445 430
446 path = btrfs_alloc_path(); 431 path = btrfs_alloc_path();
447 BUG_ON(!path); 432 BUG_ON(!path);
448 maybe_lock_mutex(root);
449 key.objectid = start; 433 key.objectid = start;
450 key.offset = len; 434 key.offset = len;
451 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 435 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
452 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 436 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
453 0, 0); 437 0, 0);
454 maybe_unlock_mutex(root);
455 btrfs_free_path(path); 438 btrfs_free_path(path);
456 return ret; 439 return ret;
457} 440}
@@ -676,8 +659,9 @@ static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
676 659
677 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL); 660 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
678 num_bytes = btrfs_level_size(root, (int)owner_objectid); 661 num_bytes = btrfs_level_size(root, (int)owner_objectid);
662 mutex_lock(&root->fs_info->extent_ins_mutex);
679 if (test_range_bit(&root->fs_info->extent_ins, bytenr, 663 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
680 bytenr + num_bytes - 1, EXTENT_LOCKED, 0)) { 664 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
681 u64 priv; 665 u64 priv;
682 ret = get_state_private(&root->fs_info->extent_ins, 666 ret = get_state_private(&root->fs_info->extent_ins,
683 bytenr, &priv); 667 bytenr, &priv);
@@ -686,6 +670,7 @@ static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
686 (unsigned long)priv; 670 (unsigned long)priv;
687 BUG_ON(extent_op->parent != orig_parent); 671 BUG_ON(extent_op->parent != orig_parent);
688 BUG_ON(extent_op->generation != orig_generation); 672 BUG_ON(extent_op->generation != orig_generation);
673
689 extent_op->parent = parent; 674 extent_op->parent = parent;
690 extent_op->generation = ref_generation; 675 extent_op->generation = ref_generation;
691 } else { 676 } else {
@@ -703,10 +688,11 @@ static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
703 688
704 set_extent_bits(&root->fs_info->extent_ins, 689 set_extent_bits(&root->fs_info->extent_ins,
705 bytenr, bytenr + num_bytes - 1, 690 bytenr, bytenr + num_bytes - 1,
706 EXTENT_LOCKED, GFP_NOFS); 691 EXTENT_WRITEBACK, GFP_NOFS);
707 set_state_private(&root->fs_info->extent_ins, 692 set_state_private(&root->fs_info->extent_ins,
708 bytenr, (unsigned long)extent_op); 693 bytenr, (unsigned long)extent_op);
709 } 694 }
695 mutex_unlock(&root->fs_info->extent_ins_mutex);
710 return 0; 696 return 0;
711 } 697 }
712 698
@@ -742,12 +728,10 @@ int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
742 if (ref_root == BTRFS_TREE_LOG_OBJECTID && 728 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
743 owner_objectid < BTRFS_FIRST_FREE_OBJECTID) 729 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
744 return 0; 730 return 0;
745 maybe_lock_mutex(root);
746 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent, 731 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
747 parent, ref_root, ref_root, 732 parent, ref_root, ref_root,
748 ref_generation, ref_generation, 733 ref_generation, ref_generation,
749 owner_objectid); 734 owner_objectid);
750 maybe_unlock_mutex(root);
751 return ret; 735 return ret;
752} 736}
753 737
@@ -817,11 +801,9 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
817 if (ref_root == BTRFS_TREE_LOG_OBJECTID && 801 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
818 owner_objectid < BTRFS_FIRST_FREE_OBJECTID) 802 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
819 return 0; 803 return 0;
820 maybe_lock_mutex(root);
821 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent, 804 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
822 0, ref_root, 0, ref_generation, 805 0, ref_root, 0, ref_generation,
823 owner_objectid); 806 owner_objectid);
824 maybe_unlock_mutex(root);
825 return ret; 807 return ret;
826} 808}
827 809
@@ -886,7 +868,6 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr,
886 key.type = BTRFS_EXTENT_ITEM_KEY; 868 key.type = BTRFS_EXTENT_ITEM_KEY;
887 869
888 path = btrfs_alloc_path(); 870 path = btrfs_alloc_path();
889 mutex_lock(&root->fs_info->alloc_mutex);
890 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 871 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
891 if (ret < 0) 872 if (ret < 0)
892 goto out; 873 goto out;
@@ -953,7 +934,6 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr,
953 } 934 }
954 ret = 0; 935 ret = 0;
955out: 936out:
956 mutex_unlock(&root->fs_info->alloc_mutex);
957 btrfs_free_path(path); 937 btrfs_free_path(path);
958 return ret; 938 return ret;
959} 939}
@@ -1179,13 +1159,11 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1179 1159
1180 nr_file_extents++; 1160 nr_file_extents++;
1181 1161
1182 maybe_lock_mutex(root);
1183 ret = process_func(trans, root, bytenr, 1162 ret = process_func(trans, root, bytenr,
1184 orig_buf->start, buf->start, 1163 orig_buf->start, buf->start,
1185 orig_root, ref_root, 1164 orig_root, ref_root,
1186 orig_generation, ref_generation, 1165 orig_generation, ref_generation,
1187 key.objectid); 1166 key.objectid);
1188 maybe_unlock_mutex(root);
1189 1167
1190 if (ret) { 1168 if (ret) {
1191 faili = i; 1169 faili = i;
@@ -1194,13 +1172,11 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1194 } 1172 }
1195 } else { 1173 } else {
1196 bytenr = btrfs_node_blockptr(buf, i); 1174 bytenr = btrfs_node_blockptr(buf, i);
1197 maybe_lock_mutex(root);
1198 ret = process_func(trans, root, bytenr, 1175 ret = process_func(trans, root, bytenr,
1199 orig_buf->start, buf->start, 1176 orig_buf->start, buf->start,
1200 orig_root, ref_root, 1177 orig_root, ref_root,
1201 orig_generation, ref_generation, 1178 orig_generation, ref_generation,
1202 level - 1); 1179 level - 1);
1203 maybe_unlock_mutex(root);
1204 if (ret) { 1180 if (ret) {
1205 faili = i; 1181 faili = i;
1206 WARN_ON(1); 1182 WARN_ON(1);
@@ -1270,24 +1246,20 @@ int btrfs_update_ref(struct btrfs_trans_handle *trans,
1270 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 1246 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1271 if (bytenr == 0) 1247 if (bytenr == 0)
1272 continue; 1248 continue;
1273 maybe_lock_mutex(root);
1274 ret = __btrfs_update_extent_ref(trans, root, bytenr, 1249 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1275 orig_buf->start, buf->start, 1250 orig_buf->start, buf->start,
1276 orig_root, ref_root, 1251 orig_root, ref_root,
1277 orig_generation, ref_generation, 1252 orig_generation, ref_generation,
1278 key.objectid); 1253 key.objectid);
1279 maybe_unlock_mutex(root);
1280 if (ret) 1254 if (ret)
1281 goto fail; 1255 goto fail;
1282 } else { 1256 } else {
1283 bytenr = btrfs_node_blockptr(buf, slot); 1257 bytenr = btrfs_node_blockptr(buf, slot);
1284 maybe_lock_mutex(root);
1285 ret = __btrfs_update_extent_ref(trans, root, bytenr, 1258 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1286 orig_buf->start, buf->start, 1259 orig_buf->start, buf->start,
1287 orig_root, ref_root, 1260 orig_root, ref_root,
1288 orig_generation, ref_generation, 1261 orig_generation, ref_generation,
1289 level - 1); 1262 level - 1);
1290 maybe_unlock_mutex(root);
1291 if (ret) 1263 if (ret)
1292 goto fail; 1264 goto fail;
1293 } 1265 }
@@ -1344,7 +1316,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1344 if (!path) 1316 if (!path)
1345 return -ENOMEM; 1317 return -ENOMEM;
1346 1318
1347 mutex_lock(&root->fs_info->alloc_mutex);
1348 while(1) { 1319 while(1) {
1349 cache = NULL; 1320 cache = NULL;
1350 spin_lock(&root->fs_info->block_group_cache_lock); 1321 spin_lock(&root->fs_info->block_group_cache_lock);
@@ -1378,7 +1349,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1378 } 1349 }
1379 } 1350 }
1380 btrfs_free_path(path); 1351 btrfs_free_path(path);
1381 mutex_unlock(&root->fs_info->alloc_mutex);
1382 return werr; 1352 return werr;
1383} 1353}
1384 1354
@@ -1390,9 +1360,11 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1390 1360
1391 found = __find_space_info(info, flags); 1361 found = __find_space_info(info, flags);
1392 if (found) { 1362 if (found) {
1363 spin_lock(&found->lock);
1393 found->total_bytes += total_bytes; 1364 found->total_bytes += total_bytes;
1394 found->bytes_used += bytes_used; 1365 found->bytes_used += bytes_used;
1395 found->full = 0; 1366 found->full = 0;
1367 spin_unlock(&found->lock);
1396 *space_info = found; 1368 *space_info = found;
1397 return 0; 1369 return 0;
1398 } 1370 }
@@ -1479,43 +1451,53 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1479 } 1451 }
1480 BUG_ON(!space_info); 1452 BUG_ON(!space_info);
1481 1453
1454 spin_lock(&space_info->lock);
1482 if (space_info->force_alloc) { 1455 if (space_info->force_alloc) {
1483 force = 1; 1456 force = 1;
1484 space_info->force_alloc = 0; 1457 space_info->force_alloc = 0;
1485 } 1458 }
1486 if (space_info->full) 1459 if (space_info->full) {
1460 spin_unlock(&space_info->lock);
1487 goto out; 1461 goto out;
1462 }
1488 1463
1489 thresh = div_factor(space_info->total_bytes, 6); 1464 thresh = div_factor(space_info->total_bytes, 6);
1490 if (!force && 1465 if (!force &&
1491 (space_info->bytes_used + space_info->bytes_pinned + 1466 (space_info->bytes_used + space_info->bytes_pinned +
1492 space_info->bytes_reserved + alloc_bytes) < thresh) 1467 space_info->bytes_reserved + alloc_bytes) < thresh) {
1468 spin_unlock(&space_info->lock);
1493 goto out; 1469 goto out;
1470 }
1494 1471
1495 while (!mutex_trylock(&extent_root->fs_info->chunk_mutex)) { 1472 spin_unlock(&space_info->lock);
1496 if (!force) 1473
1497 goto out; 1474 ret = mutex_trylock(&extent_root->fs_info->chunk_mutex);
1498 mutex_unlock(&extent_root->fs_info->alloc_mutex); 1475 if (!ret && !force) {
1499 cond_resched(); 1476 goto out;
1500 mutex_lock(&extent_root->fs_info->alloc_mutex); 1477 } else if (!ret) {
1478 mutex_lock(&extent_root->fs_info->chunk_mutex);
1501 waited = 1; 1479 waited = 1;
1502 } 1480 }
1503 1481
1504 if (waited && space_info->full) 1482 if (waited) {
1505 goto out_unlock; 1483 spin_lock(&space_info->lock);
1484 if (space_info->full) {
1485 spin_unlock(&space_info->lock);
1486 goto out_unlock;
1487 }
1488 spin_unlock(&space_info->lock);
1489 }
1506 1490
1507 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags); 1491 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1508 if (ret == -ENOSPC) { 1492 if (ret) {
1509printk("space info full %Lu\n", flags); 1493printk("space info full %Lu\n", flags);
1510 space_info->full = 1; 1494 space_info->full = 1;
1511 goto out_unlock; 1495 goto out_unlock;
1512 } 1496 }
1513 BUG_ON(ret);
1514 1497
1515 ret = btrfs_make_block_group(trans, extent_root, 0, flags, 1498 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1516 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes); 1499 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1517 BUG_ON(ret); 1500 BUG_ON(ret);
1518
1519out_unlock: 1501out_unlock:
1520 mutex_unlock(&extent_root->fs_info->chunk_mutex); 1502 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1521out: 1503out:
@@ -1533,7 +1515,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
1533 u64 old_val; 1515 u64 old_val;
1534 u64 byte_in_group; 1516 u64 byte_in_group;
1535 1517
1536 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1537 while(total) { 1518 while(total) {
1538 cache = btrfs_lookup_block_group(info, bytenr); 1519 cache = btrfs_lookup_block_group(info, bytenr);
1539 if (!cache) { 1520 if (!cache) {
@@ -1542,6 +1523,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
1542 byte_in_group = bytenr - cache->key.objectid; 1523 byte_in_group = bytenr - cache->key.objectid;
1543 WARN_ON(byte_in_group > cache->key.offset); 1524 WARN_ON(byte_in_group > cache->key.offset);
1544 1525
1526 spin_lock(&cache->space_info->lock);
1545 spin_lock(&cache->lock); 1527 spin_lock(&cache->lock);
1546 cache->dirty = 1; 1528 cache->dirty = 1;
1547 old_val = btrfs_block_group_used(&cache->item); 1529 old_val = btrfs_block_group_used(&cache->item);
@@ -1551,11 +1533,13 @@ static int update_block_group(struct btrfs_trans_handle *trans,
1551 cache->space_info->bytes_used += num_bytes; 1533 cache->space_info->bytes_used += num_bytes;
1552 btrfs_set_block_group_used(&cache->item, old_val); 1534 btrfs_set_block_group_used(&cache->item, old_val);
1553 spin_unlock(&cache->lock); 1535 spin_unlock(&cache->lock);
1536 spin_unlock(&cache->space_info->lock);
1554 } else { 1537 } else {
1555 old_val -= num_bytes; 1538 old_val -= num_bytes;
1556 cache->space_info->bytes_used -= num_bytes; 1539 cache->space_info->bytes_used -= num_bytes;
1557 btrfs_set_block_group_used(&cache->item, old_val); 1540 btrfs_set_block_group_used(&cache->item, old_val);
1558 spin_unlock(&cache->lock); 1541 spin_unlock(&cache->lock);
1542 spin_unlock(&cache->space_info->lock);
1559 if (mark_free) { 1543 if (mark_free) {
1560 int ret; 1544 int ret;
1561 ret = btrfs_add_free_space(cache, bytenr, 1545 ret = btrfs_add_free_space(cache, bytenr,
@@ -1588,7 +1572,7 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
1588 struct btrfs_block_group_cache *cache; 1572 struct btrfs_block_group_cache *cache;
1589 struct btrfs_fs_info *fs_info = root->fs_info; 1573 struct btrfs_fs_info *fs_info = root->fs_info;
1590 1574
1591 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); 1575 WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
1592 if (pin) { 1576 if (pin) {
1593 set_extent_dirty(&fs_info->pinned_extents, 1577 set_extent_dirty(&fs_info->pinned_extents,
1594 bytenr, bytenr + num - 1, GFP_NOFS); 1578 bytenr, bytenr + num - 1, GFP_NOFS);
@@ -1602,16 +1586,20 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
1602 len = min(num, cache->key.offset - 1586 len = min(num, cache->key.offset -
1603 (bytenr - cache->key.objectid)); 1587 (bytenr - cache->key.objectid));
1604 if (pin) { 1588 if (pin) {
1589 spin_lock(&cache->space_info->lock);
1605 spin_lock(&cache->lock); 1590 spin_lock(&cache->lock);
1606 cache->pinned += len; 1591 cache->pinned += len;
1607 cache->space_info->bytes_pinned += len; 1592 cache->space_info->bytes_pinned += len;
1608 spin_unlock(&cache->lock); 1593 spin_unlock(&cache->lock);
1594 spin_unlock(&cache->space_info->lock);
1609 fs_info->total_pinned += len; 1595 fs_info->total_pinned += len;
1610 } else { 1596 } else {
1597 spin_lock(&cache->space_info->lock);
1611 spin_lock(&cache->lock); 1598 spin_lock(&cache->lock);
1612 cache->pinned -= len; 1599 cache->pinned -= len;
1613 cache->space_info->bytes_pinned -= len; 1600 cache->space_info->bytes_pinned -= len;
1614 spin_unlock(&cache->lock); 1601 spin_unlock(&cache->lock);
1602 spin_unlock(&cache->space_info->lock);
1615 fs_info->total_pinned -= len; 1603 fs_info->total_pinned -= len;
1616 } 1604 }
1617 bytenr += len; 1605 bytenr += len;
@@ -1627,23 +1615,23 @@ static int update_reserved_extents(struct btrfs_root *root,
1627 struct btrfs_block_group_cache *cache; 1615 struct btrfs_block_group_cache *cache;
1628 struct btrfs_fs_info *fs_info = root->fs_info; 1616 struct btrfs_fs_info *fs_info = root->fs_info;
1629 1617
1630 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1631 while (num > 0) { 1618 while (num > 0) {
1632 cache = btrfs_lookup_block_group(fs_info, bytenr); 1619 cache = btrfs_lookup_block_group(fs_info, bytenr);
1633 BUG_ON(!cache); 1620 BUG_ON(!cache);
1634 len = min(num, cache->key.offset - 1621 len = min(num, cache->key.offset -
1635 (bytenr - cache->key.objectid)); 1622 (bytenr - cache->key.objectid));
1623
1624 spin_lock(&cache->space_info->lock);
1625 spin_lock(&cache->lock);
1636 if (reserve) { 1626 if (reserve) {
1637 spin_lock(&cache->lock);
1638 cache->reserved += len; 1627 cache->reserved += len;
1639 cache->space_info->bytes_reserved += len; 1628 cache->space_info->bytes_reserved += len;
1640 spin_unlock(&cache->lock);
1641 } else { 1629 } else {
1642 spin_lock(&cache->lock);
1643 cache->reserved -= len; 1630 cache->reserved -= len;
1644 cache->space_info->bytes_reserved -= len; 1631 cache->space_info->bytes_reserved -= len;
1645 spin_unlock(&cache->lock);
1646 } 1632 }
1633 spin_unlock(&cache->lock);
1634 spin_unlock(&cache->space_info->lock);
1647 bytenr += len; 1635 bytenr += len;
1648 num -= len; 1636 num -= len;
1649 } 1637 }
@@ -1658,6 +1646,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1658 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents; 1646 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1659 int ret; 1647 int ret;
1660 1648
1649 mutex_lock(&root->fs_info->pinned_mutex);
1661 while(1) { 1650 while(1) {
1662 ret = find_first_extent_bit(pinned_extents, last, 1651 ret = find_first_extent_bit(pinned_extents, last,
1663 &start, &end, EXTENT_DIRTY); 1652 &start, &end, EXTENT_DIRTY);
@@ -1666,6 +1655,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1666 set_extent_dirty(copy, start, end, GFP_NOFS); 1655 set_extent_dirty(copy, start, end, GFP_NOFS);
1667 last = end + 1; 1656 last = end + 1;
1668 } 1657 }
1658 mutex_unlock(&root->fs_info->pinned_mutex);
1669 return 0; 1659 return 0;
1670} 1660}
1671 1661
@@ -1678,7 +1668,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1678 int ret; 1668 int ret;
1679 struct btrfs_block_group_cache *cache; 1669 struct btrfs_block_group_cache *cache;
1680 1670
1681 mutex_lock(&root->fs_info->alloc_mutex); 1671 mutex_lock(&root->fs_info->pinned_mutex);
1682 while(1) { 1672 while(1) {
1683 ret = find_first_extent_bit(unpin, 0, &start, &end, 1673 ret = find_first_extent_bit(unpin, 0, &start, &end,
1684 EXTENT_DIRTY); 1674 EXTENT_DIRTY);
@@ -1690,12 +1680,12 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1690 if (cache->cached) 1680 if (cache->cached)
1691 btrfs_add_free_space(cache, start, end - start + 1); 1681 btrfs_add_free_space(cache, start, end - start + 1);
1692 if (need_resched()) { 1682 if (need_resched()) {
1693 mutex_unlock(&root->fs_info->alloc_mutex); 1683 mutex_unlock(&root->fs_info->pinned_mutex);
1694 cond_resched(); 1684 cond_resched();
1695 mutex_lock(&root->fs_info->alloc_mutex); 1685 mutex_lock(&root->fs_info->pinned_mutex);
1696 } 1686 }
1697 } 1687 }
1698 mutex_unlock(&root->fs_info->alloc_mutex); 1688 mutex_unlock(&root->fs_info->pinned_mutex);
1699 return 0; 1689 return 0;
1700} 1690}
1701 1691
@@ -1705,6 +1695,7 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
1705 u64 start; 1695 u64 start;
1706 u64 end; 1696 u64 end;
1707 u64 priv; 1697 u64 priv;
1698 u64 search = 0;
1708 struct btrfs_fs_info *info = extent_root->fs_info; 1699 struct btrfs_fs_info *info = extent_root->fs_info;
1709 struct btrfs_path *path; 1700 struct btrfs_path *path;
1710 struct btrfs_extent_ref *ref; 1701 struct btrfs_extent_ref *ref;
@@ -1714,20 +1705,37 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
1714 int ret; 1705 int ret;
1715 int err = 0; 1706 int err = 0;
1716 1707
1717 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1718 btrfs_set_stack_extent_refs(&extent_item, 1); 1708 btrfs_set_stack_extent_refs(&extent_item, 1);
1719 path = btrfs_alloc_path(); 1709 path = btrfs_alloc_path();
1720 1710
1721 while(1) { 1711 while(1) {
1722 ret = find_first_extent_bit(&info->extent_ins, 0, &start, 1712 mutex_lock(&info->extent_ins_mutex);
1723 &end, EXTENT_LOCKED); 1713 ret = find_first_extent_bit(&info->extent_ins, search, &start,
1724 if (ret) 1714 &end, EXTENT_WRITEBACK);
1715 if (ret) {
1716 mutex_unlock(&info->extent_ins_mutex);
1717 if (search) {
1718 search = 0;
1719 continue;
1720 }
1725 break; 1721 break;
1722 }
1723
1724 ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
1725 if (!ret) {
1726 search = end+1;
1727 mutex_unlock(&info->extent_ins_mutex);
1728 cond_resched();
1729 continue;
1730 }
1731 BUG_ON(ret < 0);
1726 1732
1727 ret = get_state_private(&info->extent_ins, start, &priv); 1733 ret = get_state_private(&info->extent_ins, start, &priv);
1728 BUG_ON(ret); 1734 BUG_ON(ret);
1729 extent_op = (struct pending_extent_op *)(unsigned long)priv; 1735 extent_op = (struct pending_extent_op *)(unsigned long)priv;
1730 1736
1737 mutex_unlock(&info->extent_ins_mutex);
1738
1731 if (extent_op->type == PENDING_EXTENT_INSERT) { 1739 if (extent_op->type == PENDING_EXTENT_INSERT) {
1732 key.objectid = start; 1740 key.objectid = start;
1733 key.offset = end + 1 - start; 1741 key.offset = end + 1 - start;
@@ -1736,8 +1744,10 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
1736 &extent_item, sizeof(extent_item)); 1744 &extent_item, sizeof(extent_item));
1737 BUG_ON(err); 1745 BUG_ON(err);
1738 1746
1747 mutex_lock(&info->extent_ins_mutex);
1739 clear_extent_bits(&info->extent_ins, start, end, 1748 clear_extent_bits(&info->extent_ins, start, end,
1740 EXTENT_LOCKED, GFP_NOFS); 1749 EXTENT_WRITEBACK, GFP_NOFS);
1750 mutex_unlock(&info->extent_ins_mutex);
1741 1751
1742 err = insert_extent_backref(trans, extent_root, path, 1752 err = insert_extent_backref(trans, extent_root, path,
1743 start, extent_op->parent, 1753 start, extent_op->parent,
@@ -1753,8 +1763,10 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
1753 extent_op->level, 0); 1763 extent_op->level, 0);
1754 BUG_ON(err); 1764 BUG_ON(err);
1755 1765
1766 mutex_lock(&info->extent_ins_mutex);
1756 clear_extent_bits(&info->extent_ins, start, end, 1767 clear_extent_bits(&info->extent_ins, start, end,
1757 EXTENT_LOCKED, GFP_NOFS); 1768 EXTENT_WRITEBACK, GFP_NOFS);
1769 mutex_unlock(&info->extent_ins_mutex);
1758 1770
1759 key.objectid = start; 1771 key.objectid = start;
1760 key.offset = extent_op->parent; 1772 key.offset = extent_op->parent;
@@ -1772,12 +1784,10 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
1772 BUG_ON(1); 1784 BUG_ON(1);
1773 } 1785 }
1774 kfree(extent_op); 1786 kfree(extent_op);
1787 unlock_extent(&info->extent_ins, start, end, GFP_NOFS);
1788 search = 0;
1775 1789
1776 if (need_resched()) { 1790 cond_resched();
1777 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1778 cond_resched();
1779 mutex_lock(&extent_root->fs_info->alloc_mutex);
1780 }
1781 } 1791 }
1782 btrfs_free_path(path); 1792 btrfs_free_path(path);
1783 return 0; 1793 return 0;
@@ -1790,7 +1800,6 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
1790 int err = 0; 1800 int err = 0;
1791 struct extent_buffer *buf; 1801 struct extent_buffer *buf;
1792 1802
1793 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1794 if (is_data) 1803 if (is_data)
1795 goto pinit; 1804 goto pinit;
1796 1805
@@ -1847,7 +1856,6 @@ static int __free_extent(struct btrfs_trans_handle *trans,
1847 struct btrfs_extent_item *ei; 1856 struct btrfs_extent_item *ei;
1848 u32 refs; 1857 u32 refs;
1849 1858
1850 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1851 key.objectid = bytenr; 1859 key.objectid = bytenr;
1852 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 1860 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1853 key.offset = num_bytes; 1861 key.offset = num_bytes;
@@ -1935,8 +1943,10 @@ static int __free_extent(struct btrfs_trans_handle *trans,
1935#endif 1943#endif
1936 1944
1937 if (pin) { 1945 if (pin) {
1946 mutex_lock(&root->fs_info->pinned_mutex);
1938 ret = pin_down_bytes(trans, root, bytenr, num_bytes, 1947 ret = pin_down_bytes(trans, root, bytenr, num_bytes,
1939 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID); 1948 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
1949 mutex_unlock(&root->fs_info->pinned_mutex);
1940 if (ret > 0) 1950 if (ret > 0)
1941 mark_free = 1; 1951 mark_free = 1;
1942 BUG_ON(ret < 0); 1952 BUG_ON(ret < 0);
@@ -1956,6 +1966,7 @@ static int __free_extent(struct btrfs_trans_handle *trans,
1956 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 1966 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1957 num_to_del); 1967 num_to_del);
1958 BUG_ON(ret); 1968 BUG_ON(ret);
1969 btrfs_release_path(extent_root, path);
1959 ret = update_block_group(trans, root, bytenr, num_bytes, 0, 1970 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1960 mark_free); 1971 mark_free);
1961 BUG_ON(ret); 1972 BUG_ON(ret);
@@ -1994,70 +2005,91 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1994{ 2005{
1995 int ret; 2006 int ret;
1996 int err = 0; 2007 int err = 0;
1997 int mark_free = 0;
1998 u64 start; 2008 u64 start;
1999 u64 end; 2009 u64 end;
2000 u64 priv; 2010 u64 priv;
2011 u64 search = 0;
2001 struct extent_io_tree *pending_del; 2012 struct extent_io_tree *pending_del;
2002 struct extent_io_tree *extent_ins; 2013 struct extent_io_tree *extent_ins;
2003 struct pending_extent_op *extent_op; 2014 struct pending_extent_op *extent_op;
2015 struct btrfs_fs_info *info = extent_root->fs_info;
2004 2016
2005 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
2006 extent_ins = &extent_root->fs_info->extent_ins; 2017 extent_ins = &extent_root->fs_info->extent_ins;
2007 pending_del = &extent_root->fs_info->pending_del; 2018 pending_del = &extent_root->fs_info->pending_del;
2008 2019
2009 while(1) { 2020 while(1) {
2010 ret = find_first_extent_bit(pending_del, 0, &start, &end, 2021 mutex_lock(&info->extent_ins_mutex);
2011 EXTENT_LOCKED); 2022 ret = find_first_extent_bit(pending_del, search, &start, &end,
2012 if (ret) 2023 EXTENT_WRITEBACK);
2024 if (ret) {
2025 mutex_unlock(&info->extent_ins_mutex);
2026 if (search) {
2027 search = 0;
2028 continue;
2029 }
2013 break; 2030 break;
2031 }
2032
2033 ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
2034 if (!ret) {
2035 search = end+1;
2036 mutex_unlock(&info->extent_ins_mutex);
2037 cond_resched();
2038 continue;
2039 }
2040 BUG_ON(ret < 0);
2014 2041
2015 ret = get_state_private(pending_del, start, &priv); 2042 ret = get_state_private(pending_del, start, &priv);
2016 BUG_ON(ret); 2043 BUG_ON(ret);
2017 extent_op = (struct pending_extent_op *)(unsigned long)priv; 2044 extent_op = (struct pending_extent_op *)(unsigned long)priv;
2018 2045
2019 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED, 2046 clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
2020 GFP_NOFS); 2047 GFP_NOFS);
2021
2022 ret = pin_down_bytes(trans, extent_root, start,
2023 end + 1 - start, 0);
2024 mark_free = ret > 0;
2025 if (!test_range_bit(extent_ins, start, end, 2048 if (!test_range_bit(extent_ins, start, end,
2026 EXTENT_LOCKED, 0)) { 2049 EXTENT_WRITEBACK, 0)) {
2050 mutex_unlock(&info->extent_ins_mutex);
2027free_extent: 2051free_extent:
2028 ret = __free_extent(trans, extent_root, 2052 ret = __free_extent(trans, extent_root,
2029 start, end + 1 - start, 2053 start, end + 1 - start,
2030 extent_op->orig_parent, 2054 extent_op->orig_parent,
2031 extent_root->root_key.objectid, 2055 extent_root->root_key.objectid,
2032 extent_op->orig_generation, 2056 extent_op->orig_generation,
2033 extent_op->level, 0, mark_free); 2057 extent_op->level, 1, 0);
2034 kfree(extent_op); 2058 kfree(extent_op);
2035 } else { 2059 } else {
2036 kfree(extent_op); 2060 kfree(extent_op);
2037 ret = get_state_private(extent_ins, start, &priv); 2061
2062 ret = get_state_private(&info->extent_ins, start,
2063 &priv);
2038 BUG_ON(ret); 2064 BUG_ON(ret);
2039 extent_op = (struct pending_extent_op *) 2065 extent_op = (struct pending_extent_op *)
2040 (unsigned long)priv; 2066 (unsigned long)priv;
2067
2068 clear_extent_bits(&info->extent_ins, start, end,
2069 EXTENT_WRITEBACK, GFP_NOFS);
2041 2070
2042 clear_extent_bits(extent_ins, start, end, 2071 mutex_unlock(&info->extent_ins_mutex);
2043 EXTENT_LOCKED, GFP_NOFS);
2044 2072
2045 if (extent_op->type == PENDING_BACKREF_UPDATE) 2073 if (extent_op->type == PENDING_BACKREF_UPDATE)
2046 goto free_extent; 2074 goto free_extent;
2047 2075
2076 mutex_lock(&extent_root->fs_info->pinned_mutex);
2077 ret = pin_down_bytes(trans, extent_root, start,
2078 end + 1 - start, 0);
2079 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2080
2048 ret = update_block_group(trans, extent_root, start, 2081 ret = update_block_group(trans, extent_root, start,
2049 end + 1 - start, 0, mark_free); 2082 end + 1 - start, 0, ret > 0);
2083
2050 BUG_ON(ret); 2084 BUG_ON(ret);
2051 kfree(extent_op); 2085 kfree(extent_op);
2052 } 2086 }
2053 if (ret) 2087 if (ret)
2054 err = ret; 2088 err = ret;
2089 unlock_extent(extent_ins, start, end, GFP_NOFS);
2055 2090
2056 if (need_resched()) { 2091 search = 0;
2057 mutex_unlock(&extent_root->fs_info->alloc_mutex); 2092 cond_resched();
2058 cond_resched();
2059 mutex_lock(&extent_root->fs_info->alloc_mutex);
2060 }
2061 } 2093 }
2062 return err; 2094 return err;
2063} 2095}
@@ -2091,11 +2123,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2091 extent_op->orig_generation = ref_generation; 2123 extent_op->orig_generation = ref_generation;
2092 extent_op->level = (int)owner_objectid; 2124 extent_op->level = (int)owner_objectid;
2093 2125
2126 mutex_lock(&root->fs_info->extent_ins_mutex);
2094 set_extent_bits(&root->fs_info->pending_del, 2127 set_extent_bits(&root->fs_info->pending_del,
2095 bytenr, bytenr + num_bytes - 1, 2128 bytenr, bytenr + num_bytes - 1,
2096 EXTENT_LOCKED, GFP_NOFS); 2129 EXTENT_WRITEBACK, GFP_NOFS);
2097 set_state_private(&root->fs_info->pending_del, 2130 set_state_private(&root->fs_info->pending_del,
2098 bytenr, (unsigned long)extent_op); 2131 bytenr, (unsigned long)extent_op);
2132 mutex_unlock(&root->fs_info->extent_ins_mutex);
2099 return 0; 2133 return 0;
2100 } 2134 }
2101 /* if metadata always pin */ 2135 /* if metadata always pin */
@@ -2134,11 +2168,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
2134{ 2168{
2135 int ret; 2169 int ret;
2136 2170
2137 maybe_lock_mutex(root);
2138 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent, 2171 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
2139 root_objectid, ref_generation, 2172 root_objectid, ref_generation,
2140 owner_objectid, pin); 2173 owner_objectid, pin);
2141 maybe_unlock_mutex(root);
2142 return ret; 2174 return ret;
2143} 2175}
2144 2176
@@ -2214,12 +2246,16 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
2214 * group thats not of the proper type, while looping this 2246 * group thats not of the proper type, while looping this
2215 * should never happen 2247 * should never happen
2216 */ 2248 */
2249 WARN_ON(!block_group);
2250 mutex_lock(&block_group->alloc_mutex);
2217 if (unlikely(!block_group_bits(block_group, data))) 2251 if (unlikely(!block_group_bits(block_group, data)))
2218 goto new_group; 2252 goto new_group;
2219 2253
2220 ret = cache_block_group(root, block_group); 2254 ret = cache_block_group(root, block_group);
2221 if (ret) 2255 if (ret) {
2256 mutex_unlock(&block_group->alloc_mutex);
2222 break; 2257 break;
2258 }
2223 2259
2224 if (block_group->ro) 2260 if (block_group->ro)
2225 goto new_group; 2261 goto new_group;
@@ -2250,8 +2286,10 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
2250 * then we just re-search this block group 2286 * then we just re-search this block group
2251 */ 2287 */
2252 if (search_start >= start && 2288 if (search_start >= start &&
2253 search_start < end) 2289 search_start < end) {
2290 mutex_unlock(&block_group->alloc_mutex);
2254 continue; 2291 continue;
2292 }
2255 2293
2256 /* else we go to the next block group */ 2294 /* else we go to the next block group */
2257 goto new_group; 2295 goto new_group;
@@ -2259,10 +2297,15 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
2259 2297
2260 ins->objectid = search_start; 2298 ins->objectid = search_start;
2261 ins->offset = num_bytes; 2299 ins->offset = num_bytes;
2300
2301 btrfs_remove_free_space_lock(block_group, search_start,
2302 num_bytes);
2262 /* we are all good, lets return */ 2303 /* we are all good, lets return */
2304 mutex_unlock(&block_group->alloc_mutex);
2263 break; 2305 break;
2264 } 2306 }
2265new_group: 2307new_group:
2308 mutex_unlock(&block_group->alloc_mutex);
2266 /* 2309 /*
2267 * Here's how this works. 2310 * Here's how this works.
2268 * loop == 0: we were searching a block group via a hint 2311 * loop == 0: we were searching a block group via a hint
@@ -2363,7 +2406,6 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2363 u64 search_start = 0; 2406 u64 search_start = 0;
2364 u64 alloc_profile; 2407 u64 alloc_profile;
2365 struct btrfs_fs_info *info = root->fs_info; 2408 struct btrfs_fs_info *info = root->fs_info;
2366 struct btrfs_block_group_cache *cache;
2367 2409
2368 if (data) { 2410 if (data) {
2369 alloc_profile = info->avail_data_alloc_bits & 2411 alloc_profile = info->avail_data_alloc_bits &
@@ -2419,13 +2461,6 @@ again:
2419 dump_space_info(sinfo, num_bytes); 2461 dump_space_info(sinfo, num_bytes);
2420 BUG(); 2462 BUG();
2421 } 2463 }
2422 cache = btrfs_lookup_block_group(root->fs_info, ins->objectid);
2423 if (!cache) {
2424 printk(KERN_ERR "Unable to find block group for %Lu\n", ins->objectid);
2425 return -ENOSPC;
2426 }
2427
2428 ret = btrfs_remove_free_space(cache, ins->objectid, ins->offset);
2429 2464
2430 return ret; 2465 return ret;
2431} 2466}
@@ -2434,16 +2469,13 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2434{ 2469{
2435 struct btrfs_block_group_cache *cache; 2470 struct btrfs_block_group_cache *cache;
2436 2471
2437 maybe_lock_mutex(root);
2438 cache = btrfs_lookup_block_group(root->fs_info, start); 2472 cache = btrfs_lookup_block_group(root->fs_info, start);
2439 if (!cache) { 2473 if (!cache) {
2440 printk(KERN_ERR "Unable to find block group for %Lu\n", start); 2474 printk(KERN_ERR "Unable to find block group for %Lu\n", start);
2441 maybe_unlock_mutex(root);
2442 return -ENOSPC; 2475 return -ENOSPC;
2443 } 2476 }
2444 btrfs_add_free_space(cache, start, len); 2477 btrfs_add_free_space(cache, start, len);
2445 update_reserved_extents(root, start, len, 0); 2478 update_reserved_extents(root, start, len, 0);
2446 maybe_unlock_mutex(root);
2447 return 0; 2479 return 0;
2448} 2480}
2449 2481
@@ -2455,12 +2487,10 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2455 u64 data) 2487 u64 data)
2456{ 2488{
2457 int ret; 2489 int ret;
2458 maybe_lock_mutex(root);
2459 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, 2490 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2460 empty_size, hint_byte, search_end, ins, 2491 empty_size, hint_byte, search_end, ins,
2461 data); 2492 data);
2462 update_reserved_extents(root, ins->objectid, ins->offset, 1); 2493 update_reserved_extents(root, ins->objectid, ins->offset, 1);
2463 maybe_unlock_mutex(root);
2464 return ret; 2494 return ret;
2465} 2495}
2466 2496
@@ -2510,11 +2540,13 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2510 extent_op->orig_generation = 0; 2540 extent_op->orig_generation = 0;
2511 extent_op->level = (int)owner; 2541 extent_op->level = (int)owner;
2512 2542
2543 mutex_lock(&root->fs_info->extent_ins_mutex);
2513 set_extent_bits(&root->fs_info->extent_ins, ins->objectid, 2544 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2514 ins->objectid + ins->offset - 1, 2545 ins->objectid + ins->offset - 1,
2515 EXTENT_LOCKED, GFP_NOFS); 2546 EXTENT_WRITEBACK, GFP_NOFS);
2516 set_state_private(&root->fs_info->extent_ins, 2547 set_state_private(&root->fs_info->extent_ins,
2517 ins->objectid, (unsigned long)extent_op); 2548 ins->objectid, (unsigned long)extent_op);
2549 mutex_unlock(&root->fs_info->extent_ins_mutex);
2518 goto update_block; 2550 goto update_block;
2519 } 2551 }
2520 2552
@@ -2578,11 +2610,9 @@ int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2578 2610
2579 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) 2611 if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
2580 return 0; 2612 return 0;
2581 maybe_lock_mutex(root);
2582 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid, 2613 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
2583 ref_generation, owner, ins); 2614 ref_generation, owner, ins);
2584 update_reserved_extents(root, ins->objectid, ins->offset, 0); 2615 update_reserved_extents(root, ins->objectid, ins->offset, 0);
2585 maybe_unlock_mutex(root);
2586 return ret; 2616 return ret;
2587} 2617}
2588 2618
@@ -2599,15 +2629,16 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
2599 int ret; 2629 int ret;
2600 struct btrfs_block_group_cache *block_group; 2630 struct btrfs_block_group_cache *block_group;
2601 2631
2602 maybe_lock_mutex(root);
2603 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 2632 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
2633 mutex_lock(&block_group->alloc_mutex);
2604 cache_block_group(root, block_group); 2634 cache_block_group(root, block_group);
2605 2635
2606 ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset); 2636 ret = btrfs_remove_free_space_lock(block_group, ins->objectid,
2637 ins->offset);
2638 mutex_unlock(&block_group->alloc_mutex);
2607 BUG_ON(ret); 2639 BUG_ON(ret);
2608 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid, 2640 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
2609 ref_generation, owner, ins); 2641 ref_generation, owner, ins);
2610 maybe_unlock_mutex(root);
2611 return ret; 2642 return ret;
2612} 2643}
2613 2644
@@ -2627,8 +2658,6 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2627{ 2658{
2628 int ret; 2659 int ret;
2629 2660
2630 maybe_lock_mutex(root);
2631
2632 ret = __btrfs_reserve_extent(trans, root, num_bytes, 2661 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2633 min_alloc_size, empty_size, hint_byte, 2662 min_alloc_size, empty_size, hint_byte,
2634 search_end, ins, data); 2663 search_end, ins, data);
@@ -2642,7 +2671,6 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2642 } else { 2671 } else {
2643 update_reserved_extents(root, ins->objectid, ins->offset, 1); 2672 update_reserved_extents(root, ins->objectid, ins->offset, 1);
2644 } 2673 }
2645 maybe_unlock_mutex(root);
2646 return ret; 2674 return ret;
2647} 2675}
2648 2676
@@ -2734,12 +2762,10 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
2734 if (disk_bytenr == 0) 2762 if (disk_bytenr == 0)
2735 continue; 2763 continue;
2736 2764
2737 mutex_lock(&root->fs_info->alloc_mutex);
2738 ret = __btrfs_free_extent(trans, root, disk_bytenr, 2765 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2739 btrfs_file_extent_disk_num_bytes(leaf, fi), 2766 btrfs_file_extent_disk_num_bytes(leaf, fi),
2740 leaf->start, leaf_owner, leaf_generation, 2767 leaf->start, leaf_owner, leaf_generation,
2741 key.objectid, 0); 2768 key.objectid, 0);
2742 mutex_unlock(&root->fs_info->alloc_mutex);
2743 BUG_ON(ret); 2769 BUG_ON(ret);
2744 2770
2745 atomic_inc(&root->fs_info->throttle_gen); 2771 atomic_inc(&root->fs_info->throttle_gen);
@@ -2758,12 +2784,10 @@ static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
2758 struct btrfs_extent_info *info = ref->extents; 2784 struct btrfs_extent_info *info = ref->extents;
2759 2785
2760 for (i = 0; i < ref->nritems; i++) { 2786 for (i = 0; i < ref->nritems; i++) {
2761 mutex_lock(&root->fs_info->alloc_mutex);
2762 ret = __btrfs_free_extent(trans, root, info->bytenr, 2787 ret = __btrfs_free_extent(trans, root, info->bytenr,
2763 info->num_bytes, ref->bytenr, 2788 info->num_bytes, ref->bytenr,
2764 ref->owner, ref->generation, 2789 ref->owner, ref->generation,
2765 info->objectid, 0); 2790 info->objectid, 0);
2766 mutex_unlock(&root->fs_info->alloc_mutex);
2767 2791
2768 atomic_inc(&root->fs_info->throttle_gen); 2792 atomic_inc(&root->fs_info->throttle_gen);
2769 wake_up(&root->fs_info->transaction_throttle); 2793 wake_up(&root->fs_info->transaction_throttle);
@@ -2875,13 +2899,11 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2875 root_gen = btrfs_header_generation(parent); 2899 root_gen = btrfs_header_generation(parent);
2876 path->slots[*level]++; 2900 path->slots[*level]++;
2877 2901
2878 mutex_lock(&root->fs_info->alloc_mutex);
2879 ret = __btrfs_free_extent(trans, root, bytenr, 2902 ret = __btrfs_free_extent(trans, root, bytenr,
2880 blocksize, parent->start, 2903 blocksize, parent->start,
2881 root_owner, root_gen, 2904 root_owner, root_gen,
2882 *level - 1, 1); 2905 *level - 1, 1);
2883 BUG_ON(ret); 2906 BUG_ON(ret);
2884 mutex_unlock(&root->fs_info->alloc_mutex);
2885 2907
2886 atomic_inc(&root->fs_info->throttle_gen); 2908 atomic_inc(&root->fs_info->throttle_gen);
2887 wake_up(&root->fs_info->transaction_throttle); 2909 wake_up(&root->fs_info->transaction_throttle);
@@ -2957,11 +2979,9 @@ out:
2957 root_owner = btrfs_header_owner(parent); 2979 root_owner = btrfs_header_owner(parent);
2958 root_gen = btrfs_header_generation(parent); 2980 root_gen = btrfs_header_generation(parent);
2959 2981
2960 mutex_lock(&root->fs_info->alloc_mutex);
2961 ret = __btrfs_free_extent(trans, root, bytenr, blocksize, 2982 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2962 parent->start, root_owner, root_gen, 2983 parent->start, root_owner, root_gen,
2963 *level, 1); 2984 *level, 1);
2964 mutex_unlock(&root->fs_info->alloc_mutex);
2965 free_extent_buffer(path->nodes[*level]); 2985 free_extent_buffer(path->nodes[*level]);
2966 path->nodes[*level] = NULL; 2986 path->nodes[*level] = NULL;
2967 *level += 1; 2987 *level += 1;
@@ -3440,8 +3460,6 @@ static int noinline __next_ref_path(struct btrfs_trans_handle *trans,
3440 if (!path) 3460 if (!path)
3441 return -ENOMEM; 3461 return -ENOMEM;
3442 3462
3443 mutex_lock(&extent_root->fs_info->alloc_mutex);
3444
3445 if (first_time) { 3463 if (first_time) {
3446 ref_path->lowest_level = -1; 3464 ref_path->lowest_level = -1;
3447 ref_path->current_level = -1; 3465 ref_path->current_level = -1;
@@ -3498,9 +3516,7 @@ next:
3498 level--; 3516 level--;
3499 btrfs_release_path(extent_root, path); 3517 btrfs_release_path(extent_root, path);
3500 if (need_resched()) { 3518 if (need_resched()) {
3501 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3502 cond_resched(); 3519 cond_resched();
3503 mutex_lock(&extent_root->fs_info->alloc_mutex);
3504 } 3520 }
3505 } 3521 }
3506 /* reached lowest level */ 3522 /* reached lowest level */
@@ -3613,15 +3629,12 @@ found:
3613 3629
3614 btrfs_release_path(extent_root, path); 3630 btrfs_release_path(extent_root, path);
3615 if (need_resched()) { 3631 if (need_resched()) {
3616 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3617 cond_resched(); 3632 cond_resched();
3618 mutex_lock(&extent_root->fs_info->alloc_mutex);
3619 } 3633 }
3620 } 3634 }
3621 /* reached max tree level, but no tree root found. */ 3635 /* reached max tree level, but no tree root found. */
3622 BUG(); 3636 BUG();
3623out: 3637out:
3624 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3625 btrfs_free_path(path); 3638 btrfs_free_path(path);
3626 return ret; 3639 return ret;
3627} 3640}
@@ -4556,14 +4569,6 @@ static int noinline relocate_tree_block(struct btrfs_trans_handle *trans,
4556 struct btrfs_ref_path *ref_path) 4569 struct btrfs_ref_path *ref_path)
4557{ 4570{
4558 int ret; 4571 int ret;
4559 int needs_lock = 0;
4560
4561 if (root == root->fs_info->extent_root ||
4562 root == root->fs_info->chunk_root ||
4563 root == root->fs_info->dev_root) {
4564 needs_lock = 1;
4565 mutex_lock(&root->fs_info->alloc_mutex);
4566 }
4567 4572
4568 ret = relocate_one_path(trans, root, path, first_key, 4573 ret = relocate_one_path(trans, root, path, first_key,
4569 ref_path, NULL, NULL); 4574 ref_path, NULL, NULL);
@@ -4571,8 +4576,6 @@ static int noinline relocate_tree_block(struct btrfs_trans_handle *trans,
4571 4576
4572 if (root == root->fs_info->extent_root) 4577 if (root == root->fs_info->extent_root)
4573 btrfs_extent_post_op(trans, root); 4578 btrfs_extent_post_op(trans, root);
4574 if (needs_lock)
4575 mutex_unlock(&root->fs_info->alloc_mutex);
4576 4579
4577 return 0; 4580 return 0;
4578} 4581}
@@ -4584,14 +4587,12 @@ static int noinline del_extent_zero(struct btrfs_trans_handle *trans,
4584{ 4587{
4585 int ret; 4588 int ret;
4586 4589
4587 mutex_lock(&extent_root->fs_info->alloc_mutex);
4588 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1); 4590 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
4589 if (ret) 4591 if (ret)
4590 goto out; 4592 goto out;
4591 ret = btrfs_del_item(trans, extent_root, path); 4593 ret = btrfs_del_item(trans, extent_root, path);
4592out: 4594out:
4593 btrfs_release_path(extent_root, path); 4595 btrfs_release_path(extent_root, path);
4594 mutex_unlock(&extent_root->fs_info->alloc_mutex);
4595 return ret; 4596 return ret;
4596} 4597}
4597 4598
@@ -4627,7 +4628,6 @@ static int noinline relocate_one_extent(struct btrfs_root *extent_root,
4627 struct btrfs_key first_key; 4628 struct btrfs_key first_key;
4628 u64 prev_block = 0; 4629 u64 prev_block = 0;
4629 4630
4630 mutex_unlock(&extent_root->fs_info->alloc_mutex);
4631 4631
4632 trans = btrfs_start_transaction(extent_root, 1); 4632 trans = btrfs_start_transaction(extent_root, 1);
4633 BUG_ON(!trans); 4633 BUG_ON(!trans);
@@ -4754,7 +4754,6 @@ out:
4754 btrfs_end_transaction(trans, extent_root); 4754 btrfs_end_transaction(trans, extent_root);
4755 kfree(new_extents); 4755 kfree(new_extents);
4756 kfree(ref_path); 4756 kfree(ref_path);
4757 mutex_lock(&extent_root->fs_info->alloc_mutex);
4758 return ret; 4757 return ret;
4759} 4758}
4760 4759
@@ -4807,10 +4806,8 @@ int __alloc_chunk_for_shrink(struct btrfs_root *root,
4807 spin_lock(&shrink_block_group->lock); 4806 spin_lock(&shrink_block_group->lock);
4808 if (btrfs_block_group_used(&shrink_block_group->item) > 0) { 4807 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
4809 spin_unlock(&shrink_block_group->lock); 4808 spin_unlock(&shrink_block_group->lock);
4810 mutex_unlock(&root->fs_info->alloc_mutex);
4811 4809
4812 trans = btrfs_start_transaction(root, 1); 4810 trans = btrfs_start_transaction(root, 1);
4813 mutex_lock(&root->fs_info->alloc_mutex);
4814 spin_lock(&shrink_block_group->lock); 4811 spin_lock(&shrink_block_group->lock);
4815 4812
4816 new_alloc_flags = update_block_group_flags(root, 4813 new_alloc_flags = update_block_group_flags(root,
@@ -4826,9 +4823,7 @@ int __alloc_chunk_for_shrink(struct btrfs_root *root,
4826 do_chunk_alloc(trans, root->fs_info->extent_root, 4823 do_chunk_alloc(trans, root->fs_info->extent_root,
4827 calc + 2 * 1024 * 1024, new_alloc_flags, force); 4824 calc + 2 * 1024 * 1024, new_alloc_flags, force);
4828 4825
4829 mutex_unlock(&root->fs_info->alloc_mutex);
4830 btrfs_end_transaction(trans, root); 4826 btrfs_end_transaction(trans, root);
4831 mutex_lock(&root->fs_info->alloc_mutex);
4832 } else 4827 } else
4833 spin_unlock(&shrink_block_group->lock); 4828 spin_unlock(&shrink_block_group->lock);
4834 return 0; 4829 return 0;
@@ -4952,14 +4947,10 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
4952 reloc_inode = create_reloc_inode(info, block_group); 4947 reloc_inode = create_reloc_inode(info, block_group);
4953 BUG_ON(IS_ERR(reloc_inode)); 4948 BUG_ON(IS_ERR(reloc_inode));
4954 4949
4955 mutex_lock(&root->fs_info->alloc_mutex);
4956
4957 __alloc_chunk_for_shrink(root, block_group, 1); 4950 __alloc_chunk_for_shrink(root, block_group, 1);
4958 block_group->ro = 1; 4951 block_group->ro = 1;
4959 block_group->space_info->total_bytes -= block_group->key.offset; 4952 block_group->space_info->total_bytes -= block_group->key.offset;
4960 4953
4961 mutex_unlock(&root->fs_info->alloc_mutex);
4962
4963 btrfs_start_delalloc_inodes(info->tree_root); 4954 btrfs_start_delalloc_inodes(info->tree_root);
4964 btrfs_wait_ordered_extents(info->tree_root, 0); 4955 btrfs_wait_ordered_extents(info->tree_root, 0);
4965again: 4956again:
@@ -4978,8 +4969,6 @@ again:
4978 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1); 4969 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
4979 mutex_unlock(&root->fs_info->cleaner_mutex); 4970 mutex_unlock(&root->fs_info->cleaner_mutex);
4980 4971
4981 mutex_lock(&root->fs_info->alloc_mutex);
4982
4983 while(1) { 4972 while(1) {
4984 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4973 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4985 if (ret < 0) 4974 if (ret < 0)
@@ -5007,9 +4996,7 @@ next:
5007 4996
5008 if (progress && need_resched()) { 4997 if (progress && need_resched()) {
5009 btrfs_release_path(root, path); 4998 btrfs_release_path(root, path);
5010 mutex_unlock(&root->fs_info->alloc_mutex);
5011 cond_resched(); 4999 cond_resched();
5012 mutex_lock(&root->fs_info->alloc_mutex);
5013 progress = 0; 5000 progress = 0;
5014 continue; 5001 continue;
5015 } 5002 }
@@ -5036,7 +5023,6 @@ next:
5036 } 5023 }
5037 5024
5038 btrfs_release_path(root, path); 5025 btrfs_release_path(root, path);
5039 mutex_unlock(&root->fs_info->alloc_mutex);
5040 5026
5041 if (pass == 0) { 5027 if (pass == 0) {
5042 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1); 5028 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
@@ -5058,8 +5044,6 @@ next:
5058 trans = btrfs_start_transaction(info->tree_root, 1); 5044 trans = btrfs_start_transaction(info->tree_root, 1);
5059 btrfs_commit_transaction(trans, info->tree_root); 5045 btrfs_commit_transaction(trans, info->tree_root);
5060 5046
5061 mutex_lock(&root->fs_info->alloc_mutex);
5062
5063 spin_lock(&block_group->lock); 5047 spin_lock(&block_group->lock);
5064 WARN_ON(block_group->pinned > 0); 5048 WARN_ON(block_group->pinned > 0);
5065 WARN_ON(block_group->reserved > 0); 5049 WARN_ON(block_group->reserved > 0);
@@ -5067,7 +5051,6 @@ next:
5067 spin_unlock(&block_group->lock); 5051 spin_unlock(&block_group->lock);
5068 ret = 0; 5052 ret = 0;
5069out: 5053out:
5070 mutex_unlock(&root->fs_info->alloc_mutex);
5071 btrfs_free_path(path); 5054 btrfs_free_path(path);
5072 return ret; 5055 return ret;
5073} 5056}
@@ -5114,7 +5097,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
5114 struct btrfs_block_group_cache *block_group; 5097 struct btrfs_block_group_cache *block_group;
5115 struct rb_node *n; 5098 struct rb_node *n;
5116 5099
5117 mutex_lock(&info->alloc_mutex);
5118 spin_lock(&info->block_group_cache_lock); 5100 spin_lock(&info->block_group_cache_lock);
5119 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { 5101 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
5120 block_group = rb_entry(n, struct btrfs_block_group_cache, 5102 block_group = rb_entry(n, struct btrfs_block_group_cache,
@@ -5132,7 +5114,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
5132 kfree(block_group); 5114 kfree(block_group);
5133 } 5115 }
5134 spin_unlock(&info->block_group_cache_lock); 5116 spin_unlock(&info->block_group_cache_lock);
5135 mutex_unlock(&info->alloc_mutex);
5136 return 0; 5117 return 0;
5137} 5118}
5138 5119
@@ -5155,7 +5136,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
5155 if (!path) 5136 if (!path)
5156 return -ENOMEM; 5137 return -ENOMEM;
5157 5138
5158 mutex_lock(&root->fs_info->alloc_mutex);
5159 while(1) { 5139 while(1) {
5160 ret = find_first_block_group(root, path, &key); 5140 ret = find_first_block_group(root, path, &key);
5161 if (ret > 0) { 5141 if (ret > 0) {
@@ -5174,6 +5154,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
5174 } 5154 }
5175 5155
5176 spin_lock_init(&cache->lock); 5156 spin_lock_init(&cache->lock);
5157 mutex_init(&cache->alloc_mutex);
5177 INIT_LIST_HEAD(&cache->list); 5158 INIT_LIST_HEAD(&cache->list);
5178 read_extent_buffer(leaf, &cache->item, 5159 read_extent_buffer(leaf, &cache->item,
5179 btrfs_item_ptr_offset(leaf, path->slots[0]), 5160 btrfs_item_ptr_offset(leaf, path->slots[0]),
@@ -5201,7 +5182,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
5201 ret = 0; 5182 ret = 0;
5202error: 5183error:
5203 btrfs_free_path(path); 5184 btrfs_free_path(path);
5204 mutex_unlock(&root->fs_info->alloc_mutex);
5205 return ret; 5185 return ret;
5206} 5186}
5207 5187
@@ -5214,7 +5194,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5214 struct btrfs_root *extent_root; 5194 struct btrfs_root *extent_root;
5215 struct btrfs_block_group_cache *cache; 5195 struct btrfs_block_group_cache *cache;
5216 5196
5217 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
5218 extent_root = root->fs_info->extent_root; 5197 extent_root = root->fs_info->extent_root;
5219 5198
5220 root->fs_info->last_trans_new_blockgroup = trans->transid; 5199 root->fs_info->last_trans_new_blockgroup = trans->transid;
@@ -5226,6 +5205,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5226 cache->key.objectid = chunk_offset; 5205 cache->key.objectid = chunk_offset;
5227 cache->key.offset = size; 5206 cache->key.offset = size;
5228 spin_lock_init(&cache->lock); 5207 spin_lock_init(&cache->lock);
5208 mutex_init(&cache->alloc_mutex);
5229 INIT_LIST_HEAD(&cache->list); 5209 INIT_LIST_HEAD(&cache->list);
5230 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY); 5210 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5231 5211
@@ -5264,7 +5244,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5264 struct btrfs_key key; 5244 struct btrfs_key key;
5265 int ret; 5245 int ret;
5266 5246
5267 BUG_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
5268 root = root->fs_info->extent_root; 5247 root = root->fs_info->extent_root;
5269 5248
5270 block_group = btrfs_lookup_block_group(root->fs_info, group_start); 5249 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 314041fdfa43..7503bd46819b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -938,6 +938,20 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
938} 938}
939EXPORT_SYMBOL(lock_extent); 939EXPORT_SYMBOL(lock_extent);
940 940
941int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
942 gfp_t mask)
943{
944 int err;
945 u64 failed_start;
946
947 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
948 &failed_start, mask);
949 if (err == -EEXIST)
950 return 0;
951 return 1;
952}
953EXPORT_SYMBOL(try_lock_extent);
954
941int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, 955int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
942 gfp_t mask) 956 gfp_t mask)
943{ 957{
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 86f859b87a6e..283110ec4ee0 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -128,6 +128,8 @@ int try_release_extent_state(struct extent_map_tree *map,
128 gfp_t mask); 128 gfp_t mask);
129int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 129int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
130int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 130int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
131int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
132 gfp_t mask);
131int extent_read_full_page(struct extent_io_tree *tree, struct page *page, 133int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
132 get_extent_t *get_extent); 134 get_extent_t *get_extent);
133int __init extent_io_init(void); 135int __init extent_io_init(void);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 96241f01fa0a..f4926c0f3c8c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -184,8 +184,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
184 return ret; 184 return ret;
185} 185}
186 186
187int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 187static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
188 u64 offset, u64 bytes) 188 u64 offset, u64 bytes)
189{ 189{
190 struct btrfs_free_space *right_info; 190 struct btrfs_free_space *right_info;
191 struct btrfs_free_space *left_info; 191 struct btrfs_free_space *left_info;
@@ -202,8 +202,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
202 * are adding, if there is remove that struct and add a new one to 202 * are adding, if there is remove that struct and add a new one to
203 * cover the entire range 203 * cover the entire range
204 */ 204 */
205 spin_lock(&block_group->lock);
206
207 right_info = tree_search_offset(&block_group->free_space_offset, 205 right_info = tree_search_offset(&block_group->free_space_offset,
208 offset+bytes, 0, 1); 206 offset+bytes, 0, 1);
209 left_info = tree_search_offset(&block_group->free_space_offset, 207 left_info = tree_search_offset(&block_group->free_space_offset,
@@ -261,7 +259,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
261 if (ret) 259 if (ret)
262 kfree(info); 260 kfree(info);
263out: 261out:
264 spin_unlock(&block_group->lock);
265 if (ret) { 262 if (ret) {
266 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); 263 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
267 if (ret == -EEXIST) 264 if (ret == -EEXIST)
@@ -274,13 +271,13 @@ out:
274 return ret; 271 return ret;
275} 272}
276 273
277int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 274static int
278 u64 offset, u64 bytes) 275__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
276 u64 offset, u64 bytes)
279{ 277{
280 struct btrfs_free_space *info; 278 struct btrfs_free_space *info;
281 int ret = 0; 279 int ret = 0;
282 280
283 spin_lock(&block_group->lock);
284 info = tree_search_offset(&block_group->free_space_offset, offset, 0, 281 info = tree_search_offset(&block_group->free_space_offset, offset, 0,
285 1); 282 1);
286 283
@@ -334,17 +331,63 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
334 /* step two, insert a new info struct to cover anything 331 /* step two, insert a new info struct to cover anything
335 * before the hole 332 * before the hole
336 */ 333 */
337 spin_unlock(&block_group->lock); 334 ret = __btrfs_add_free_space(block_group, old_start,
338 ret = btrfs_add_free_space(block_group, old_start, 335 offset - old_start);
339 offset - old_start);
340 BUG_ON(ret); 336 BUG_ON(ret);
341 goto out_nolock;
342 } else { 337 } else {
343 WARN_ON(1); 338 WARN_ON(1);
344 } 339 }
345out: 340out:
346 spin_unlock(&block_group->lock); 341 return ret;
347out_nolock: 342}
343
344int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
345 u64 offset, u64 bytes)
346{
347 int ret;
348 struct btrfs_free_space *sp;
349
350 mutex_lock(&block_group->alloc_mutex);
351 ret = __btrfs_add_free_space(block_group, offset, bytes);
352 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
353 BUG_ON(!sp);
354 mutex_unlock(&block_group->alloc_mutex);
355
356 return ret;
357}
358
359int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
360 u64 offset, u64 bytes)
361{
362 int ret;
363 struct btrfs_free_space *sp;
364
365 ret = __btrfs_add_free_space(block_group, offset, bytes);
366 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
367 BUG_ON(!sp);
368
369 return ret;
370}
371
372int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
373 u64 offset, u64 bytes)
374{
375 int ret = 0;
376
377 mutex_lock(&block_group->alloc_mutex);
378 ret = __btrfs_remove_free_space(block_group, offset, bytes);
379 mutex_unlock(&block_group->alloc_mutex);
380
381 return ret;
382}
383
384int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
385 u64 offset, u64 bytes)
386{
387 int ret;
388
389 ret = __btrfs_remove_free_space(block_group, offset, bytes);
390
348 return ret; 391 return ret;
349} 392}
350 393
@@ -386,18 +429,18 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
386 struct btrfs_free_space *info; 429 struct btrfs_free_space *info;
387 struct rb_node *node; 430 struct rb_node *node;
388 431
389 spin_lock(&block_group->lock); 432 mutex_lock(&block_group->alloc_mutex);
390 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { 433 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
391 info = rb_entry(node, struct btrfs_free_space, bytes_index); 434 info = rb_entry(node, struct btrfs_free_space, bytes_index);
392 unlink_free_space(block_group, info); 435 unlink_free_space(block_group, info);
393 kfree(info); 436 kfree(info);
394 if (need_resched()) { 437 if (need_resched()) {
395 spin_unlock(&block_group->lock); 438 mutex_unlock(&block_group->alloc_mutex);
396 cond_resched(); 439 cond_resched();
397 spin_lock(&block_group->lock); 440 mutex_lock(&block_group->alloc_mutex);
398 } 441 }
399 } 442 }
400 spin_unlock(&block_group->lock); 443 mutex_unlock(&block_group->alloc_mutex);
401} 444}
402 445
403struct btrfs_free_space *btrfs_find_free_space_offset(struct 446struct btrfs_free_space *btrfs_find_free_space_offset(struct
@@ -407,10 +450,10 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct
407{ 450{
408 struct btrfs_free_space *ret; 451 struct btrfs_free_space *ret;
409 452
410 spin_lock(&block_group->lock); 453 mutex_lock(&block_group->alloc_mutex);
411 ret = tree_search_offset(&block_group->free_space_offset, offset, 454 ret = tree_search_offset(&block_group->free_space_offset, offset,
412 bytes, 0); 455 bytes, 0);
413 spin_unlock(&block_group->lock); 456 mutex_unlock(&block_group->alloc_mutex);
414 457
415 return ret; 458 return ret;
416} 459}
@@ -422,10 +465,10 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct
422{ 465{
423 struct btrfs_free_space *ret; 466 struct btrfs_free_space *ret;
424 467
425 spin_lock(&block_group->lock); 468 mutex_lock(&block_group->alloc_mutex);
426 469
427 ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); 470 ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
428 spin_unlock(&block_group->lock); 471 mutex_unlock(&block_group->alloc_mutex);
429 472
430 return ret; 473 return ret;
431} 474}
@@ -434,16 +477,13 @@ struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
434 *block_group, u64 offset, 477 *block_group, u64 offset,
435 u64 bytes) 478 u64 bytes)
436{ 479{
437 struct btrfs_free_space *ret; 480 struct btrfs_free_space *ret = NULL;
438 481
439 spin_lock(&block_group->lock);
440 ret = tree_search_offset(&block_group->free_space_offset, offset, 482 ret = tree_search_offset(&block_group->free_space_offset, offset,
441 bytes, 0); 483 bytes, 0);
442 if (!ret) 484 if (!ret)
443 ret = tree_search_bytes(&block_group->free_space_bytes, 485 ret = tree_search_bytes(&block_group->free_space_bytes,
444 offset, bytes); 486 offset, bytes);
445 487
446 spin_unlock(&block_group->lock);
447
448 return ret; 488 return ret;
449} 489}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 1df67129cc3d..48b455fdaac5 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -670,7 +670,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
670 atomic_dec(&root->fs_info->throttles); 670 atomic_dec(&root->fs_info->throttles);
671 wake_up(&root->fs_info->transaction_throttle); 671 wake_up(&root->fs_info->transaction_throttle);
672 672
673 mutex_lock(&root->fs_info->alloc_mutex);
674 num_bytes -= btrfs_root_used(&dirty->root->root_item); 673 num_bytes -= btrfs_root_used(&dirty->root->root_item);
675 bytes_used = btrfs_root_used(&root->root_item); 674 bytes_used = btrfs_root_used(&root->root_item);
676 if (num_bytes) { 675 if (num_bytes) {
@@ -678,7 +677,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
678 btrfs_set_root_used(&root->root_item, 677 btrfs_set_root_used(&root->root_item,
679 bytes_used - num_bytes); 678 bytes_used - num_bytes);
680 } 679 }
681 mutex_unlock(&root->fs_info->alloc_mutex);
682 680
683 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key); 681 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
684 if (ret) { 682 if (ret) {
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 6f57d0889b1e..a6a3956cedfb 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -125,9 +125,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
125 if (is_extent) 125 if (is_extent)
126 btrfs_extent_post_op(trans, root); 126 btrfs_extent_post_op(trans, root);
127out: 127out:
128 if (is_extent)
129 mutex_unlock(&root->fs_info->alloc_mutex);
130
131 if (path) 128 if (path)
132 btrfs_free_path(path); 129 btrfs_free_path(path);
133 if (ret == -EAGAIN) { 130 if (ret == -EAGAIN) {
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e6d579053a47..835daed5561f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -271,10 +271,10 @@ static int process_one_buffer(struct btrfs_root *log,
271 struct walk_control *wc, u64 gen) 271 struct walk_control *wc, u64 gen)
272{ 272{
273 if (wc->pin) { 273 if (wc->pin) {
274 mutex_lock(&log->fs_info->alloc_mutex); 274 mutex_lock(&log->fs_info->pinned_mutex);
275 btrfs_update_pinned_extents(log->fs_info->extent_root, 275 btrfs_update_pinned_extents(log->fs_info->extent_root,
276 eb->start, eb->len, 1); 276 eb->start, eb->len, 1);
277 mutex_unlock(&log->fs_info->alloc_mutex); 277 mutex_unlock(&log->fs_info->pinned_mutex);
278 } 278 }
279 279
280 if (btrfs_buffer_uptodate(eb, gen)) { 280 if (btrfs_buffer_uptodate(eb, gen)) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 7db4cfd03a98..cbb9bb31431d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -58,14 +58,12 @@ void btrfs_unlock_volumes(void)
58 58
59static void lock_chunks(struct btrfs_root *root) 59static void lock_chunks(struct btrfs_root *root)
60{ 60{
61 mutex_lock(&root->fs_info->alloc_mutex);
62 mutex_lock(&root->fs_info->chunk_mutex); 61 mutex_lock(&root->fs_info->chunk_mutex);
63} 62}
64 63
65static void unlock_chunks(struct btrfs_root *root) 64static void unlock_chunks(struct btrfs_root *root)
66{ 65{
67 mutex_unlock(&root->fs_info->chunk_mutex); 66 mutex_unlock(&root->fs_info->chunk_mutex);
68 mutex_unlock(&root->fs_info->alloc_mutex);
69} 67}
70 68
71int btrfs_cleanup_fs_uuids(void) 69int btrfs_cleanup_fs_uuids(void)