aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-03-13 11:00:37 -0400
committerChris Mason <chris.mason@oracle.com>2009-03-24 16:14:28 -0400
commitb9473439d3e84d9fc1a0a83faca69cc1b7566341 (patch)
treebef8321b80589026b617d61d0fabaf545d459269
parent89573b9c516b24af8a3b9958dd5afca8fa874e3d (diff)
Btrfs: leave btree locks spinning more often
btrfs_mark_buffer dirty would set dirty bits in the extent_io tree for the buffers it was dirtying. This may require a kmalloc and it was not atomic. So, anyone who called btrfs_mark_buffer_dirty had to set any btree locks they were holding to blocking first. This commit changes dirty tracking for extent buffers to just use a flag in the extent buffer. Now that we have one and only one extent buffer per page, this can be safely done without losing dirty bits along the way. This also introduces a path->leave_spinning flag that callers of btrfs_search_slot can use to indicate they will properly deal with a path returned where all the locks are spinning instead of blocking. Many of the btree search callers now expect spinning paths, resulting in better btree concurrency overall. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/ctree.c19
-rw-r--r--fs/btrfs/ctree.h12
-rw-r--r--fs/btrfs/dir-item.c3
-rw-r--r--fs/btrfs/disk-io.c67
-rw-r--r--fs/btrfs/disk-io.h1
-rw-r--r--fs/btrfs/extent-tree.c69
-rw-r--r--fs/btrfs/extent_io.c51
-rw-r--r--fs/btrfs/extent_io.h3
-rw-r--r--fs/btrfs/file-item.c7
-rw-r--r--fs/btrfs/file.c4
-rw-r--r--fs/btrfs/inode-item.c3
-rw-r--r--fs/btrfs/inode.c17
-rw-r--r--fs/btrfs/locking.c11
-rw-r--r--fs/btrfs/tree-log.c1
14 files changed, 172 insertions, 96 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 3764248bdc05..8686a3d2ab3a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1684,7 +1684,8 @@ done:
1684 * we don't really know what they plan on doing with the path 1684 * we don't really know what they plan on doing with the path
1685 * from here on, so for now just mark it as blocking 1685 * from here on, so for now just mark it as blocking
1686 */ 1686 */
1687 btrfs_set_path_blocking(p); 1687 if (!p->leave_spinning)
1688 btrfs_set_path_blocking(p);
1688 return ret; 1689 return ret;
1689} 1690}
1690 1691
@@ -3032,26 +3033,27 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
3032 return -EAGAIN; 3033 return -EAGAIN;
3033 } 3034 }
3034 3035
3036 btrfs_set_path_blocking(path);
3035 ret = split_leaf(trans, root, &orig_key, path, 3037 ret = split_leaf(trans, root, &orig_key, path,
3036 sizeof(struct btrfs_item), 1); 3038 sizeof(struct btrfs_item), 1);
3037 path->keep_locks = 0; 3039 path->keep_locks = 0;
3038 BUG_ON(ret); 3040 BUG_ON(ret);
3039 3041
3042 btrfs_unlock_up_safe(path, 1);
3043 leaf = path->nodes[0];
3044 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3045
3046split:
3040 /* 3047 /*
3041 * make sure any changes to the path from split_leaf leave it 3048 * make sure any changes to the path from split_leaf leave it
3042 * in a blocking state 3049 * in a blocking state
3043 */ 3050 */
3044 btrfs_set_path_blocking(path); 3051 btrfs_set_path_blocking(path);
3045 3052
3046 leaf = path->nodes[0];
3047 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3048
3049split:
3050 item = btrfs_item_nr(leaf, path->slots[0]); 3053 item = btrfs_item_nr(leaf, path->slots[0]);
3051 orig_offset = btrfs_item_offset(leaf, item); 3054 orig_offset = btrfs_item_offset(leaf, item);
3052 item_size = btrfs_item_size(leaf, item); 3055 item_size = btrfs_item_size(leaf, item);
3053 3056
3054
3055 buf = kmalloc(item_size, GFP_NOFS); 3057 buf = kmalloc(item_size, GFP_NOFS);
3056 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3058 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3057 path->slots[0]), item_size); 3059 path->slots[0]), item_size);
@@ -3545,7 +3547,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
3545 } 3547 }
3546 3548
3547 btrfs_set_header_nritems(leaf, nritems + nr); 3549 btrfs_set_header_nritems(leaf, nritems + nr);
3548 btrfs_mark_buffer_dirty(leaf);
3549 3550
3550 ret = 0; 3551 ret = 0;
3551 if (slot == 0) { 3552 if (slot == 0) {
@@ -3553,6 +3554,8 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
3553 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 3554 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3554 ret = fixup_low_keys(trans, root, path, &disk_key, 1); 3555 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3555 } 3556 }
3557 btrfs_unlock_up_safe(path, 1);
3558 btrfs_mark_buffer_dirty(leaf);
3556 3559
3557 if (btrfs_leaf_free_space(root, leaf) < 0) { 3560 if (btrfs_leaf_free_space(root, leaf) < 0) {
3558 btrfs_print_leaf(root, leaf); 3561 btrfs_print_leaf(root, leaf);
@@ -3596,7 +3599,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3596 total_data, total_size, nr); 3599 total_data, total_size, nr);
3597 3600
3598out: 3601out:
3599 btrfs_unlock_up_safe(path, 1);
3600 return ret; 3602 return ret;
3601} 3603}
3602 3604
@@ -3792,6 +3794,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3792 slot = path->slots[1]; 3794 slot = path->slots[1];
3793 extent_buffer_get(leaf); 3795 extent_buffer_get(leaf);
3794 3796
3797 btrfs_set_path_blocking(path);
3795 wret = push_leaf_left(trans, root, path, 1, 1); 3798 wret = push_leaf_left(trans, root, path, 1, 1);
3796 if (wret < 0 && wret != -ENOSPC) 3799 if (wret < 0 && wret != -ENOSPC)
3797 ret = wret; 3800 ret = wret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 08d9f8d15538..4ddce91cf3f9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -401,15 +401,16 @@ struct btrfs_path {
401 int locks[BTRFS_MAX_LEVEL]; 401 int locks[BTRFS_MAX_LEVEL];
402 int reada; 402 int reada;
403 /* keep some upper locks as we walk down */ 403 /* keep some upper locks as we walk down */
404 int keep_locks;
405 int skip_locking;
406 int lowest_level; 404 int lowest_level;
407 405
408 /* 406 /*
409 * set by btrfs_split_item, tells search_slot to keep all locks 407 * set by btrfs_split_item, tells search_slot to keep all locks
410 * and to force calls to keep space in the nodes 408 * and to force calls to keep space in the nodes
411 */ 409 */
412 int search_for_split; 410 unsigned int search_for_split:1;
411 unsigned int keep_locks:1;
412 unsigned int skip_locking:1;
413 unsigned int leave_spinning:1;
413}; 414};
414 415
415/* 416/*
@@ -779,6 +780,11 @@ struct btrfs_fs_info {
779 atomic_t throttle_gen; 780 atomic_t throttle_gen;
780 781
781 u64 total_pinned; 782 u64 total_pinned;
783
784 /* protected by the delalloc lock, used to keep from writing
785 * metadata until there is a nice batch
786 */
787 u64 dirty_metadata_bytes;
782 struct list_head dirty_cowonly_roots; 788 struct list_head dirty_cowonly_roots;
783 789
784 struct btrfs_fs_devices *fs_devices; 790 struct btrfs_fs_devices *fs_devices;
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 926a0b287a7d..1d70236ba00c 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -145,7 +145,10 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
145 key.objectid = dir; 145 key.objectid = dir;
146 btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); 146 btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
147 key.offset = btrfs_name_hash(name, name_len); 147 key.offset = btrfs_name_hash(name, name_len);
148
148 path = btrfs_alloc_path(); 149 path = btrfs_alloc_path();
150 path->leave_spinning = 1;
151
149 data_size = sizeof(*dir_item) + name_len; 152 data_size = sizeof(*dir_item) + name_len;
150 dir_item = insert_with_overflow(trans, root, path, &key, data_size, 153 dir_item = insert_with_overflow(trans, root, path, &key, data_size,
151 name, name_len); 154 name, name_len);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1f1d89b18818..9244cd7313d4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -668,14 +668,31 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
668static int btree_writepage(struct page *page, struct writeback_control *wbc) 668static int btree_writepage(struct page *page, struct writeback_control *wbc)
669{ 669{
670 struct extent_io_tree *tree; 670 struct extent_io_tree *tree;
671 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
672 struct extent_buffer *eb;
673 int was_dirty;
674
671 tree = &BTRFS_I(page->mapping->host)->io_tree; 675 tree = &BTRFS_I(page->mapping->host)->io_tree;
676 if (!(current->flags & PF_MEMALLOC)) {
677 return extent_write_full_page(tree, page,
678 btree_get_extent, wbc);
679 }
672 680
673 if (current->flags & PF_MEMALLOC) { 681 redirty_page_for_writepage(wbc, page);
674 redirty_page_for_writepage(wbc, page); 682 eb = btrfs_find_tree_block(root, page_offset(page),
675 unlock_page(page); 683 PAGE_CACHE_SIZE);
676 return 0; 684 WARN_ON(!eb);
685
686 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
687 if (!was_dirty) {
688 spin_lock(&root->fs_info->delalloc_lock);
689 root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
690 spin_unlock(&root->fs_info->delalloc_lock);
677 } 691 }
678 return extent_write_full_page(tree, page, btree_get_extent, wbc); 692 free_extent_buffer(eb);
693
694 unlock_page(page);
695 return 0;
679} 696}
680 697
681static int btree_writepages(struct address_space *mapping, 698static int btree_writepages(struct address_space *mapping,
@@ -684,15 +701,15 @@ static int btree_writepages(struct address_space *mapping,
684 struct extent_io_tree *tree; 701 struct extent_io_tree *tree;
685 tree = &BTRFS_I(mapping->host)->io_tree; 702 tree = &BTRFS_I(mapping->host)->io_tree;
686 if (wbc->sync_mode == WB_SYNC_NONE) { 703 if (wbc->sync_mode == WB_SYNC_NONE) {
704 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
687 u64 num_dirty; 705 u64 num_dirty;
688 u64 start = 0;
689 unsigned long thresh = 32 * 1024 * 1024; 706 unsigned long thresh = 32 * 1024 * 1024;
690 707
691 if (wbc->for_kupdate) 708 if (wbc->for_kupdate)
692 return 0; 709 return 0;
693 710
694 num_dirty = count_range_bits(tree, &start, (u64)-1, 711 /* this is a bit racy, but that's ok */
695 thresh, EXTENT_DIRTY); 712 num_dirty = root->fs_info->dirty_metadata_bytes;
696 if (num_dirty < thresh) 713 if (num_dirty < thresh)
697 return 0; 714 return 0;
698 } 715 }
@@ -859,9 +876,17 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
859 root->fs_info->running_transaction->transid) { 876 root->fs_info->running_transaction->transid) {
860 btrfs_assert_tree_locked(buf); 877 btrfs_assert_tree_locked(buf);
861 878
862 /* ugh, clear_extent_buffer_dirty can be expensive */ 879 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
863 btrfs_set_lock_blocking(buf); 880 spin_lock(&root->fs_info->delalloc_lock);
881 if (root->fs_info->dirty_metadata_bytes >= buf->len)
882 root->fs_info->dirty_metadata_bytes -= buf->len;
883 else
884 WARN_ON(1);
885 spin_unlock(&root->fs_info->delalloc_lock);
886 }
864 887
888 /* ugh, clear_extent_buffer_dirty needs to lock the page */
889 btrfs_set_lock_blocking(buf);
865 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, 890 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
866 buf); 891 buf);
867 } 892 }
@@ -2348,8 +2373,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2348 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 2373 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2349 u64 transid = btrfs_header_generation(buf); 2374 u64 transid = btrfs_header_generation(buf);
2350 struct inode *btree_inode = root->fs_info->btree_inode; 2375 struct inode *btree_inode = root->fs_info->btree_inode;
2351 2376 int was_dirty;
2352 btrfs_set_lock_blocking(buf);
2353 2377
2354 btrfs_assert_tree_locked(buf); 2378 btrfs_assert_tree_locked(buf);
2355 if (transid != root->fs_info->generation) { 2379 if (transid != root->fs_info->generation) {
@@ -2360,7 +2384,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2360 (unsigned long long)root->fs_info->generation); 2384 (unsigned long long)root->fs_info->generation);
2361 WARN_ON(1); 2385 WARN_ON(1);
2362 } 2386 }
2363 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); 2387 was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2388 buf);
2389 if (!was_dirty) {
2390 spin_lock(&root->fs_info->delalloc_lock);
2391 root->fs_info->dirty_metadata_bytes += buf->len;
2392 spin_unlock(&root->fs_info->delalloc_lock);
2393 }
2364} 2394}
2365 2395
2366void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) 2396void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
@@ -2400,6 +2430,7 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2400int btree_lock_page_hook(struct page *page) 2430int btree_lock_page_hook(struct page *page)
2401{ 2431{
2402 struct inode *inode = page->mapping->host; 2432 struct inode *inode = page->mapping->host;
2433 struct btrfs_root *root = BTRFS_I(inode)->root;
2403 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2434 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2404 struct extent_buffer *eb; 2435 struct extent_buffer *eb;
2405 unsigned long len; 2436 unsigned long len;
@@ -2415,6 +2446,16 @@ int btree_lock_page_hook(struct page *page)
2415 2446
2416 btrfs_tree_lock(eb); 2447 btrfs_tree_lock(eb);
2417 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); 2448 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2449
2450 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2451 spin_lock(&root->fs_info->delalloc_lock);
2452 if (root->fs_info->dirty_metadata_bytes >= eb->len)
2453 root->fs_info->dirty_metadata_bytes -= eb->len;
2454 else
2455 WARN_ON(1);
2456 spin_unlock(&root->fs_info->delalloc_lock);
2457 }
2458
2418 btrfs_tree_unlock(eb); 2459 btrfs_tree_unlock(eb);
2419 free_extent_buffer(eb); 2460 free_extent_buffer(eb);
2420out: 2461out:
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 95029db227be..c958ecbc1916 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -72,6 +72,7 @@ int btrfs_insert_dev_radix(struct btrfs_root *root,
72void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); 72void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
73int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); 73int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
74void btrfs_mark_buffer_dirty(struct extent_buffer *buf); 74void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
75void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
75int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); 76int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
76int btrfs_set_buffer_uptodate(struct extent_buffer *buf); 77int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
77int wait_on_tree_block_writeback(struct btrfs_root *root, 78int wait_on_tree_block_writeback(struct btrfs_root *root,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a421c32c6cfe..8933d15a240f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -56,9 +56,6 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
56 int ref_mod); 56 int ref_mod);
57static int update_reserved_extents(struct btrfs_root *root, 57static int update_reserved_extents(struct btrfs_root *root,
58 u64 bytenr, u64 num, int reserve); 58 u64 bytenr, u64 num, int reserve);
59static int pin_down_bytes(struct btrfs_trans_handle *trans,
60 struct btrfs_root *root,
61 u64 bytenr, u64 num_bytes, int is_data);
62static int update_block_group(struct btrfs_trans_handle *trans, 59static int update_block_group(struct btrfs_trans_handle *trans,
63 struct btrfs_root *root, 60 struct btrfs_root *root,
64 u64 bytenr, u64 num_bytes, int alloc, 61 u64 bytenr, u64 num_bytes, int alloc,
@@ -618,6 +615,7 @@ static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
618 } else { 615 } else {
619 goto out; 616 goto out;
620 } 617 }
618 btrfs_unlock_up_safe(path, 1);
621 btrfs_mark_buffer_dirty(path->nodes[0]); 619 btrfs_mark_buffer_dirty(path->nodes[0]);
622out: 620out:
623 btrfs_release_path(root, path); 621 btrfs_release_path(root, path);
@@ -760,6 +758,7 @@ static noinline_for_stack int add_extent_ref(struct btrfs_trans_handle *trans,
760 return -ENOMEM; 758 return -ENOMEM;
761 759
762 path->reada = 1; 760 path->reada = 1;
761 path->leave_spinning = 1;
763 key.objectid = bytenr; 762 key.objectid = bytenr;
764 key.type = BTRFS_EXTENT_ITEM_KEY; 763 key.type = BTRFS_EXTENT_ITEM_KEY;
765 key.offset = num_bytes; 764 key.offset = num_bytes;
@@ -767,8 +766,10 @@ static noinline_for_stack int add_extent_ref(struct btrfs_trans_handle *trans,
767 /* first find the extent item and update its reference count */ 766 /* first find the extent item and update its reference count */
768 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, 767 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
769 path, 0, 1); 768 path, 0, 1);
770 if (ret < 0) 769 if (ret < 0) {
770 btrfs_set_path_blocking(path);
771 return ret; 771 return ret;
772 }
772 773
773 if (ret > 0) { 774 if (ret > 0) {
774 WARN_ON(1); 775 WARN_ON(1);
@@ -791,11 +792,15 @@ static noinline_for_stack int add_extent_ref(struct btrfs_trans_handle *trans,
791 792
792 refs = btrfs_extent_refs(l, item); 793 refs = btrfs_extent_refs(l, item);
793 btrfs_set_extent_refs(l, item, refs + refs_to_add); 794 btrfs_set_extent_refs(l, item, refs + refs_to_add);
795 btrfs_unlock_up_safe(path, 1);
796
794 btrfs_mark_buffer_dirty(path->nodes[0]); 797 btrfs_mark_buffer_dirty(path->nodes[0]);
795 798
796 btrfs_release_path(root->fs_info->extent_root, path); 799 btrfs_release_path(root->fs_info->extent_root, path);
797 800
798 path->reada = 1; 801 path->reada = 1;
802 path->leave_spinning = 1;
803
799 /* now insert the actual backref */ 804 /* now insert the actual backref */
800 ret = insert_extent_backref(trans, root->fs_info->extent_root, 805 ret = insert_extent_backref(trans, root->fs_info->extent_root,
801 path, bytenr, parent, 806 path, bytenr, parent,
@@ -2050,6 +2055,8 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
2050 clear_extent_dirty(&fs_info->pinned_extents, 2055 clear_extent_dirty(&fs_info->pinned_extents,
2051 bytenr, bytenr + num - 1, GFP_NOFS); 2056 bytenr, bytenr + num - 1, GFP_NOFS);
2052 } 2057 }
2058 mutex_unlock(&root->fs_info->pinned_mutex);
2059
2053 while (num > 0) { 2060 while (num > 0) {
2054 cache = btrfs_lookup_block_group(fs_info, bytenr); 2061 cache = btrfs_lookup_block_group(fs_info, bytenr);
2055 BUG_ON(!cache); 2062 BUG_ON(!cache);
@@ -2141,8 +2148,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2141 u64 end; 2148 u64 end;
2142 int ret; 2149 int ret;
2143 2150
2144 mutex_lock(&root->fs_info->pinned_mutex);
2145 while (1) { 2151 while (1) {
2152 mutex_lock(&root->fs_info->pinned_mutex);
2146 ret = find_first_extent_bit(unpin, 0, &start, &end, 2153 ret = find_first_extent_bit(unpin, 0, &start, &end,
2147 EXTENT_DIRTY); 2154 EXTENT_DIRTY);
2148 if (ret) 2155 if (ret)
@@ -2150,14 +2157,11 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2150 2157
2151 ret = btrfs_discard_extent(root, start, end + 1 - start); 2158 ret = btrfs_discard_extent(root, start, end + 1 - start);
2152 2159
2160 /* unlocks the pinned mutex */
2153 btrfs_update_pinned_extents(root, start, end + 1 - start, 0); 2161 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
2154 clear_extent_dirty(unpin, start, end, GFP_NOFS); 2162 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2155 2163
2156 if (need_resched()) { 2164 cond_resched();
2157 mutex_unlock(&root->fs_info->pinned_mutex);
2158 cond_resched();
2159 mutex_lock(&root->fs_info->pinned_mutex);
2160 }
2161 } 2165 }
2162 mutex_unlock(&root->fs_info->pinned_mutex); 2166 mutex_unlock(&root->fs_info->pinned_mutex);
2163 return ret; 2167 return ret;
@@ -2165,7 +2169,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2165 2169
2166static int pin_down_bytes(struct btrfs_trans_handle *trans, 2170static int pin_down_bytes(struct btrfs_trans_handle *trans,
2167 struct btrfs_root *root, 2171 struct btrfs_root *root,
2168 u64 bytenr, u64 num_bytes, int is_data) 2172 struct btrfs_path *path,
2173 u64 bytenr, u64 num_bytes, int is_data,
2174 struct extent_buffer **must_clean)
2169{ 2175{
2170 int err = 0; 2176 int err = 0;
2171 struct extent_buffer *buf; 2177 struct extent_buffer *buf;
@@ -2191,15 +2197,16 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
2191 header_owner != BTRFS_DATA_RELOC_TREE_OBJECTID && 2197 header_owner != BTRFS_DATA_RELOC_TREE_OBJECTID &&
2192 header_transid == trans->transid && 2198 header_transid == trans->transid &&
2193 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 2199 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2194 clean_tree_block(NULL, root, buf); 2200 *must_clean = buf;
2195 btrfs_tree_unlock(buf);
2196 free_extent_buffer(buf);
2197 return 1; 2201 return 1;
2198 } 2202 }
2199 btrfs_tree_unlock(buf); 2203 btrfs_tree_unlock(buf);
2200 } 2204 }
2201 free_extent_buffer(buf); 2205 free_extent_buffer(buf);
2202pinit: 2206pinit:
2207 btrfs_set_path_blocking(path);
2208 mutex_lock(&root->fs_info->pinned_mutex);
2209 /* unlocks the pinned mutex */
2203 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); 2210 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2204 2211
2205 BUG_ON(err < 0); 2212 BUG_ON(err < 0);
@@ -2236,6 +2243,7 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2236 return -ENOMEM; 2243 return -ENOMEM;
2237 2244
2238 path->reada = 1; 2245 path->reada = 1;
2246 path->leave_spinning = 1;
2239 ret = lookup_extent_backref(trans, extent_root, path, 2247 ret = lookup_extent_backref(trans, extent_root, path,
2240 bytenr, parent, root_objectid, 2248 bytenr, parent, root_objectid,
2241 ref_generation, owner_objectid, 1); 2249 ref_generation, owner_objectid, 1);
@@ -2261,6 +2269,7 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2261 refs_to_drop); 2269 refs_to_drop);
2262 BUG_ON(ret); 2270 BUG_ON(ret);
2263 btrfs_release_path(extent_root, path); 2271 btrfs_release_path(extent_root, path);
2272 path->leave_spinning = 1;
2264 ret = btrfs_search_slot(trans, extent_root, 2273 ret = btrfs_search_slot(trans, extent_root,
2265 &key, path, -1, 1); 2274 &key, path, -1, 1);
2266 if (ret) { 2275 if (ret) {
@@ -2318,6 +2327,7 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2318 /* if refs are 0, we need to setup the path for deletion */ 2327 /* if refs are 0, we need to setup the path for deletion */
2319 if (refs == 0) { 2328 if (refs == 0) {
2320 btrfs_release_path(extent_root, path); 2329 btrfs_release_path(extent_root, path);
2330 path->leave_spinning = 1;
2321 ret = btrfs_search_slot(trans, extent_root, &key, path, 2331 ret = btrfs_search_slot(trans, extent_root, &key, path,
2322 -1, 1); 2332 -1, 1);
2323 BUG_ON(ret); 2333 BUG_ON(ret);
@@ -2327,16 +2337,18 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2327 if (refs == 0) { 2337 if (refs == 0) {
2328 u64 super_used; 2338 u64 super_used;
2329 u64 root_used; 2339 u64 root_used;
2340 struct extent_buffer *must_clean = NULL;
2330 2341
2331 if (pin) { 2342 if (pin) {
2332 mutex_lock(&root->fs_info->pinned_mutex); 2343 ret = pin_down_bytes(trans, root, path,
2333 ret = pin_down_bytes(trans, root, bytenr, num_bytes, 2344 bytenr, num_bytes,
2334 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID); 2345 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID,
2335 mutex_unlock(&root->fs_info->pinned_mutex); 2346 &must_clean);
2336 if (ret > 0) 2347 if (ret > 0)
2337 mark_free = 1; 2348 mark_free = 1;
2338 BUG_ON(ret < 0); 2349 BUG_ON(ret < 0);
2339 } 2350 }
2351
2340 /* block accounting for super block */ 2352 /* block accounting for super block */
2341 spin_lock(&info->delalloc_lock); 2353 spin_lock(&info->delalloc_lock);
2342 super_used = btrfs_super_bytes_used(&info->super_copy); 2354 super_used = btrfs_super_bytes_used(&info->super_copy);
@@ -2348,11 +2360,27 @@ static int __free_extent(struct btrfs_trans_handle *trans,
2348 btrfs_set_root_used(&root->root_item, 2360 btrfs_set_root_used(&root->root_item,
2349 root_used - num_bytes); 2361 root_used - num_bytes);
2350 spin_unlock(&info->delalloc_lock); 2362 spin_unlock(&info->delalloc_lock);
2363
2364 /*
2365 * it is going to be very rare for someone to be waiting
2366 * on the block we're freeing. del_items might need to
2367 * schedule, so rather than get fancy, just force it
2368 * to blocking here
2369 */
2370 if (must_clean)
2371 btrfs_set_lock_blocking(must_clean);
2372
2351 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 2373 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
2352 num_to_del); 2374 num_to_del);
2353 BUG_ON(ret); 2375 BUG_ON(ret);
2354 btrfs_release_path(extent_root, path); 2376 btrfs_release_path(extent_root, path);
2355 2377
2378 if (must_clean) {
2379 clean_tree_block(NULL, root, must_clean);
2380 btrfs_tree_unlock(must_clean);
2381 free_extent_buffer(must_clean);
2382 }
2383
2356 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { 2384 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2357 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 2385 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
2358 BUG_ON(ret); 2386 BUG_ON(ret);
@@ -2480,8 +2508,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
2480 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID && 2508 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID &&
2481 owner_objectid < BTRFS_FIRST_FREE_OBJECTID) { 2509 owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2482 mutex_lock(&root->fs_info->pinned_mutex); 2510 mutex_lock(&root->fs_info->pinned_mutex);
2511
2512 /* unlocks the pinned mutex */
2483 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); 2513 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2484 mutex_unlock(&root->fs_info->pinned_mutex);
2485 update_reserved_extents(root, bytenr, num_bytes, 0); 2514 update_reserved_extents(root, bytenr, num_bytes, 0);
2486 ret = 0; 2515 ret = 0;
2487 } else { 2516 } else {
@@ -2931,6 +2960,7 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2931 path = btrfs_alloc_path(); 2960 path = btrfs_alloc_path();
2932 BUG_ON(!path); 2961 BUG_ON(!path);
2933 2962
2963 path->leave_spinning = 1;
2934 ret = btrfs_insert_empty_items(trans, extent_root, path, keys, 2964 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2935 sizes, 2); 2965 sizes, 2);
2936 BUG_ON(ret); 2966 BUG_ON(ret);
@@ -5435,6 +5465,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5435 if (!path) 5465 if (!path)
5436 return -ENOMEM; 5466 return -ENOMEM;
5437 5467
5468 path->leave_spinning = 1;
5438 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 5469 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
5439 if (ret) 5470 if (ret)
5440 goto out; 5471 goto out;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ebe6b29e6069..08085af089e2 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3124,20 +3124,15 @@ void free_extent_buffer(struct extent_buffer *eb)
3124int clear_extent_buffer_dirty(struct extent_io_tree *tree, 3124int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3125 struct extent_buffer *eb) 3125 struct extent_buffer *eb)
3126{ 3126{
3127 int set;
3128 unsigned long i; 3127 unsigned long i;
3129 unsigned long num_pages; 3128 unsigned long num_pages;
3130 struct page *page; 3129 struct page *page;
3131 3130
3132 u64 start = eb->start;
3133 u64 end = start + eb->len - 1;
3134
3135 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
3136 num_pages = num_extent_pages(eb->start, eb->len); 3131 num_pages = num_extent_pages(eb->start, eb->len);
3137 3132
3138 for (i = 0; i < num_pages; i++) { 3133 for (i = 0; i < num_pages; i++) {
3139 page = extent_buffer_page(eb, i); 3134 page = extent_buffer_page(eb, i);
3140 if (!set && !PageDirty(page)) 3135 if (!PageDirty(page))
3141 continue; 3136 continue;
3142 3137
3143 lock_page(page); 3138 lock_page(page);
@@ -3146,22 +3141,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3146 else 3141 else
3147 set_page_private(page, EXTENT_PAGE_PRIVATE); 3142 set_page_private(page, EXTENT_PAGE_PRIVATE);
3148 3143
3149 /*
3150 * if we're on the last page or the first page and the
3151 * block isn't aligned on a page boundary, do extra checks
3152 * to make sure we don't clean page that is partially dirty
3153 */
3154 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3155 ((i == num_pages - 1) &&
3156 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3157 start = (u64)page->index << PAGE_CACHE_SHIFT;
3158 end = start + PAGE_CACHE_SIZE - 1;
3159 if (test_range_bit(tree, start, end,
3160 EXTENT_DIRTY, 0)) {
3161 unlock_page(page);
3162 continue;
3163 }
3164 }
3165 clear_page_dirty_for_io(page); 3144 clear_page_dirty_for_io(page);
3166 spin_lock_irq(&page->mapping->tree_lock); 3145 spin_lock_irq(&page->mapping->tree_lock);
3167 if (!PageDirty(page)) { 3146 if (!PageDirty(page)) {
@@ -3187,29 +3166,13 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
3187{ 3166{
3188 unsigned long i; 3167 unsigned long i;
3189 unsigned long num_pages; 3168 unsigned long num_pages;
3169 int was_dirty = 0;
3190 3170
3171 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3191 num_pages = num_extent_pages(eb->start, eb->len); 3172 num_pages = num_extent_pages(eb->start, eb->len);
3192 for (i = 0; i < num_pages; i++) { 3173 for (i = 0; i < num_pages; i++)
3193 struct page *page = extent_buffer_page(eb, i);
3194 /* writepage may need to do something special for the
3195 * first page, we have to make sure page->private is
3196 * properly set. releasepage may drop page->private
3197 * on us if the page isn't already dirty.
3198 */
3199 lock_page(page);
3200 if (i == 0) {
3201 set_page_extent_head(page, eb->len);
3202 } else if (PagePrivate(page) &&
3203 page->private != EXTENT_PAGE_PRIVATE) {
3204 set_page_extent_mapped(page);
3205 }
3206 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 3174 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3207 set_extent_dirty(tree, page_offset(page), 3175 return was_dirty;
3208 page_offset(page) + PAGE_CACHE_SIZE - 1,
3209 GFP_NOFS);
3210 unlock_page(page);
3211 }
3212 return 0;
3213} 3176}
3214 3177
3215int clear_extent_buffer_uptodate(struct extent_io_tree *tree, 3178int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
@@ -3789,6 +3752,10 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3789 ret = 0; 3752 ret = 0;
3790 goto out; 3753 goto out;
3791 } 3754 }
3755 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3756 ret = 0;
3757 goto out;
3758 }
3792 /* at this point we can safely release the extent buffer */ 3759 /* at this point we can safely release the extent buffer */
3793 num_pages = num_extent_pages(eb->start, eb->len); 3760 num_pages = num_extent_pages(eb->start, eb->len);
3794 for (i = 0; i < num_pages; i++) 3761 for (i = 0; i < num_pages; i++)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 1f9df88afbf6..5bc20abf3f3d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -25,6 +25,7 @@
25/* these are bit numbers for test/set bit */ 25/* these are bit numbers for test/set bit */
26#define EXTENT_BUFFER_UPTODATE 0 26#define EXTENT_BUFFER_UPTODATE 0
27#define EXTENT_BUFFER_BLOCKING 1 27#define EXTENT_BUFFER_BLOCKING 1
28#define EXTENT_BUFFER_DIRTY 2
28 29
29/* 30/*
30 * page->private values. Every page that is controlled by the extent 31 * page->private values. Every page that is controlled by the extent
@@ -254,6 +255,8 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
254 struct extent_buffer *eb); 255 struct extent_buffer *eb);
255int set_extent_buffer_dirty(struct extent_io_tree *tree, 256int set_extent_buffer_dirty(struct extent_io_tree *tree,
256 struct extent_buffer *eb); 257 struct extent_buffer *eb);
258int test_extent_buffer_dirty(struct extent_io_tree *tree,
259 struct extent_buffer *eb);
257int set_extent_buffer_uptodate(struct extent_io_tree *tree, 260int set_extent_buffer_uptodate(struct extent_io_tree *tree,
258 struct extent_buffer *eb); 261 struct extent_buffer *eb);
259int clear_extent_buffer_uptodate(struct extent_io_tree *tree, 262int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 964652435fd1..9b99886562d0 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -52,6 +52,7 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
52 file_key.offset = pos; 52 file_key.offset = pos;
53 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); 53 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
54 54
55 path->leave_spinning = 1;
55 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 56 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
56 sizeof(*item)); 57 sizeof(*item));
57 if (ret < 0) 58 if (ret < 0)
@@ -523,6 +524,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
523 key.offset = end_byte - 1; 524 key.offset = end_byte - 1;
524 key.type = BTRFS_EXTENT_CSUM_KEY; 525 key.type = BTRFS_EXTENT_CSUM_KEY;
525 526
527 path->leave_spinning = 1;
526 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 528 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
527 if (ret > 0) { 529 if (ret > 0) {
528 if (path->slots[0] == 0) 530 if (path->slots[0] == 0)
@@ -757,8 +759,10 @@ insert:
757 } else { 759 } else {
758 ins_size = csum_size; 760 ins_size = csum_size;
759 } 761 }
762 path->leave_spinning = 1;
760 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 763 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
761 ins_size); 764 ins_size);
765 path->leave_spinning = 0;
762 if (ret < 0) 766 if (ret < 0)
763 goto fail_unlock; 767 goto fail_unlock;
764 if (ret != 0) { 768 if (ret != 0) {
@@ -776,7 +780,6 @@ found:
776 item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + 780 item_end = (struct btrfs_csum_item *)((unsigned char *)item_end +
777 btrfs_item_size_nr(leaf, path->slots[0])); 781 btrfs_item_size_nr(leaf, path->slots[0]));
778 eb_token = NULL; 782 eb_token = NULL;
779 cond_resched();
780next_sector: 783next_sector:
781 784
782 if (!eb_token || 785 if (!eb_token ||
@@ -817,9 +820,9 @@ next_sector:
817 eb_token = NULL; 820 eb_token = NULL;
818 } 821 }
819 btrfs_mark_buffer_dirty(path->nodes[0]); 822 btrfs_mark_buffer_dirty(path->nodes[0]);
820 cond_resched();
821 if (total_bytes < sums->len) { 823 if (total_bytes < sums->len) {
822 btrfs_release_path(root, path); 824 btrfs_release_path(root, path);
825 cond_resched();
823 goto again; 826 goto again;
824 } 827 }
825out: 828out:
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c80075497645..f06c275644b7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -606,6 +606,7 @@ next_slot:
606 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY); 606 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
607 607
608 btrfs_release_path(root, path); 608 btrfs_release_path(root, path);
609 path->leave_spinning = 1;
609 ret = btrfs_insert_empty_item(trans, root, path, &ins, 610 ret = btrfs_insert_empty_item(trans, root, path, &ins,
610 sizeof(*extent)); 611 sizeof(*extent));
611 BUG_ON(ret); 612 BUG_ON(ret);
@@ -639,7 +640,9 @@ next_slot:
639 ram_bytes); 640 ram_bytes);
640 btrfs_set_file_extent_type(leaf, extent, found_type); 641 btrfs_set_file_extent_type(leaf, extent, found_type);
641 642
643 btrfs_unlock_up_safe(path, 1);
642 btrfs_mark_buffer_dirty(path->nodes[0]); 644 btrfs_mark_buffer_dirty(path->nodes[0]);
645 btrfs_set_lock_blocking(path->nodes[0]);
643 646
644 if (disk_bytenr != 0) { 647 if (disk_bytenr != 0) {
645 ret = btrfs_update_extent_ref(trans, root, 648 ret = btrfs_update_extent_ref(trans, root,
@@ -652,6 +655,7 @@ next_slot:
652 655
653 BUG_ON(ret); 656 BUG_ON(ret);
654 } 657 }
658 path->leave_spinning = 0;
655 btrfs_release_path(root, path); 659 btrfs_release_path(root, path);
656 if (disk_bytenr != 0) 660 if (disk_bytenr != 0)
657 inode_add_bytes(inode, extent_end - end); 661 inode_add_bytes(inode, extent_end - end);
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 3d46fa1f29a4..6b627c611808 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -73,6 +73,8 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
73 if (!path) 73 if (!path)
74 return -ENOMEM; 74 return -ENOMEM;
75 75
76 path->leave_spinning = 1;
77
76 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 78 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
77 if (ret > 0) { 79 if (ret > 0) {
78 ret = -ENOENT; 80 ret = -ENOENT;
@@ -127,6 +129,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
127 if (!path) 129 if (!path)
128 return -ENOMEM; 130 return -ENOMEM;
129 131
132 path->leave_spinning = 1;
130 ret = btrfs_insert_empty_item(trans, root, path, &key, 133 ret = btrfs_insert_empty_item(trans, root, path, &key,
131 ins_len); 134 ins_len);
132 if (ret == -EEXIST) { 135 if (ret == -EEXIST) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c427011dc453..b83a45dc717e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -134,6 +134,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
134 if (!path) 134 if (!path)
135 return -ENOMEM; 135 return -ENOMEM;
136 136
137 path->leave_spinning = 1;
137 btrfs_set_trans_block_group(trans, inode); 138 btrfs_set_trans_block_group(trans, inode);
138 139
139 key.objectid = inode->i_ino; 140 key.objectid = inode->i_ino;
@@ -167,9 +168,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
167 cur_size = min_t(unsigned long, compressed_size, 168 cur_size = min_t(unsigned long, compressed_size,
168 PAGE_CACHE_SIZE); 169 PAGE_CACHE_SIZE);
169 170
170 kaddr = kmap(cpage); 171 kaddr = kmap_atomic(cpage, KM_USER0);
171 write_extent_buffer(leaf, kaddr, ptr, cur_size); 172 write_extent_buffer(leaf, kaddr, ptr, cur_size);
172 kunmap(cpage); 173 kunmap_atomic(kaddr, KM_USER0);
173 174
174 i++; 175 i++;
175 ptr += cur_size; 176 ptr += cur_size;
@@ -1452,6 +1453,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1452 path = btrfs_alloc_path(); 1453 path = btrfs_alloc_path();
1453 BUG_ON(!path); 1454 BUG_ON(!path);
1454 1455
1456 path->leave_spinning = 1;
1455 ret = btrfs_drop_extents(trans, root, inode, file_pos, 1457 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1456 file_pos + num_bytes, file_pos, &hint); 1458 file_pos + num_bytes, file_pos, &hint);
1457 BUG_ON(ret); 1459 BUG_ON(ret);
@@ -1474,6 +1476,10 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1474 btrfs_set_file_extent_compression(leaf, fi, compression); 1476 btrfs_set_file_extent_compression(leaf, fi, compression);
1475 btrfs_set_file_extent_encryption(leaf, fi, encryption); 1477 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1476 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 1478 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1479
1480 btrfs_unlock_up_safe(path, 1);
1481 btrfs_set_lock_blocking(leaf);
1482
1477 btrfs_mark_buffer_dirty(leaf); 1483 btrfs_mark_buffer_dirty(leaf);
1478 1484
1479 inode_add_bytes(inode, num_bytes); 1485 inode_add_bytes(inode, num_bytes);
@@ -1486,8 +1492,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1486 root->root_key.objectid, 1492 root->root_key.objectid,
1487 trans->transid, inode->i_ino, &ins); 1493 trans->transid, inode->i_ino, &ins);
1488 BUG_ON(ret); 1494 BUG_ON(ret);
1489
1490 btrfs_free_path(path); 1495 btrfs_free_path(path);
1496
1491 return 0; 1497 return 0;
1492} 1498}
1493 1499
@@ -2118,6 +2124,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2118 2124
2119 path = btrfs_alloc_path(); 2125 path = btrfs_alloc_path();
2120 BUG_ON(!path); 2126 BUG_ON(!path);
2127 path->leave_spinning = 1;
2121 ret = btrfs_lookup_inode(trans, root, path, 2128 ret = btrfs_lookup_inode(trans, root, path,
2122 &BTRFS_I(inode)->location, 1); 2129 &BTRFS_I(inode)->location, 1);
2123 if (ret) { 2130 if (ret) {
@@ -2164,6 +2171,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2164 goto err; 2171 goto err;
2165 } 2172 }
2166 2173
2174 path->leave_spinning = 1;
2167 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 2175 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2168 name, name_len, -1); 2176 name, name_len, -1);
2169 if (IS_ERR(di)) { 2177 if (IS_ERR(di)) {
@@ -2515,6 +2523,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2515 key.type = (u8)-1; 2523 key.type = (u8)-1;
2516 2524
2517search_again: 2525search_again:
2526 path->leave_spinning = 1;
2518 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 2527 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2519 if (ret < 0) 2528 if (ret < 0)
2520 goto error; 2529 goto error;
@@ -2661,6 +2670,7 @@ delete:
2661 break; 2670 break;
2662 } 2671 }
2663 if (found_extent) { 2672 if (found_extent) {
2673 btrfs_set_path_blocking(path);
2664 ret = btrfs_free_extent(trans, root, extent_start, 2674 ret = btrfs_free_extent(trans, root, extent_start,
2665 extent_num_bytes, 2675 extent_num_bytes,
2666 leaf->start, root_owner, 2676 leaf->start, root_owner,
@@ -3466,6 +3476,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3466 sizes[0] = sizeof(struct btrfs_inode_item); 3476 sizes[0] = sizeof(struct btrfs_inode_item);
3467 sizes[1] = name_len + sizeof(*ref); 3477 sizes[1] = name_len + sizeof(*ref);
3468 3478
3479 path->leave_spinning = 1;
3469 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2); 3480 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3470 if (ret != 0) 3481 if (ret != 0)
3471 goto fail; 3482 goto fail;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 6d8db2f5c38d..a5310c0f41e2 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -96,11 +96,12 @@ int btrfs_try_spin_lock(struct extent_buffer *eb)
96{ 96{
97 int i; 97 int i;
98 98
99 spin_nested(eb); 99 if (btrfs_spin_on_block(eb)) {
100 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) 100 spin_nested(eb);
101 return 1; 101 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
102 spin_unlock(&eb->lock); 102 return 1;
103 103 spin_unlock(&eb->lock);
104 }
104 /* spin for a bit on the BLOCKING flag */ 105 /* spin for a bit on the BLOCKING flag */
105 for (i = 0; i < 2; i++) { 106 for (i = 0; i < 2; i++) {
106 cpu_relax(); 107 cpu_relax();
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9c462fbd60fa..a93934fc93bd 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -203,7 +203,6 @@ static int process_one_buffer(struct btrfs_root *log,
203 mutex_lock(&log->fs_info->pinned_mutex); 203 mutex_lock(&log->fs_info->pinned_mutex);
204 btrfs_update_pinned_extents(log->fs_info->extent_root, 204 btrfs_update_pinned_extents(log->fs_info->extent_root,
205 eb->start, eb->len, 1); 205 eb->start, eb->len, 1);
206 mutex_unlock(&log->fs_info->pinned_mutex);
207 } 206 }
208 207
209 if (btrfs_buffer_uptodate(eb, gen)) { 208 if (btrfs_buffer_uptodate(eb, gen)) {