aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-03-13 11:00:37 -0400
committerChris Mason <chris.mason@oracle.com>2009-03-24 16:14:28 -0400
commitb9473439d3e84d9fc1a0a83faca69cc1b7566341 (patch)
treebef8321b80589026b617d61d0fabaf545d459269 /fs/btrfs/ctree.c
parent89573b9c516b24af8a3b9958dd5afca8fa874e3d (diff)
Btrfs: leave btree locks spinning more often
btrfs_mark_buffer dirty would set dirty bits in the extent_io tree for the buffers it was dirtying. This may require a kmalloc and it was not atomic. So, anyone who called btrfs_mark_buffer_dirty had to set any btree locks they were holding to blocking first. This commit changes dirty tracking for extent buffers to just use a flag in the extent buffer. Now that we have one and only one extent buffer per page, this can be safely done without losing dirty bits along the way. This also introduces a path->leave_spinning flag that callers of btrfs_search_slot can use to indicate they will properly deal with a path returned where all the locks are spinning instead of blocking. Many of the btree search callers now expect spinning paths, resulting in better btree concurrency overall. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ctree.c')
-rw-r--r--fs/btrfs/ctree.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 3764248bdc05..8686a3d2ab3a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1684,7 +1684,8 @@ done:
1684 * we don't really know what they plan on doing with the path 1684 * we don't really know what they plan on doing with the path
1685 * from here on, so for now just mark it as blocking 1685 * from here on, so for now just mark it as blocking
1686 */ 1686 */
1687 btrfs_set_path_blocking(p); 1687 if (!p->leave_spinning)
1688 btrfs_set_path_blocking(p);
1688 return ret; 1689 return ret;
1689} 1690}
1690 1691
@@ -3032,26 +3033,27 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
3032 return -EAGAIN; 3033 return -EAGAIN;
3033 } 3034 }
3034 3035
3036 btrfs_set_path_blocking(path);
3035 ret = split_leaf(trans, root, &orig_key, path, 3037 ret = split_leaf(trans, root, &orig_key, path,
3036 sizeof(struct btrfs_item), 1); 3038 sizeof(struct btrfs_item), 1);
3037 path->keep_locks = 0; 3039 path->keep_locks = 0;
3038 BUG_ON(ret); 3040 BUG_ON(ret);
3039 3041
3042 btrfs_unlock_up_safe(path, 1);
3043 leaf = path->nodes[0];
3044 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3045
3046split:
3040 /* 3047 /*
3041 * make sure any changes to the path from split_leaf leave it 3048 * make sure any changes to the path from split_leaf leave it
3042 * in a blocking state 3049 * in a blocking state
3043 */ 3050 */
3044 btrfs_set_path_blocking(path); 3051 btrfs_set_path_blocking(path);
3045 3052
3046 leaf = path->nodes[0];
3047 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3048
3049split:
3050 item = btrfs_item_nr(leaf, path->slots[0]); 3053 item = btrfs_item_nr(leaf, path->slots[0]);
3051 orig_offset = btrfs_item_offset(leaf, item); 3054 orig_offset = btrfs_item_offset(leaf, item);
3052 item_size = btrfs_item_size(leaf, item); 3055 item_size = btrfs_item_size(leaf, item);
3053 3056
3054
3055 buf = kmalloc(item_size, GFP_NOFS); 3057 buf = kmalloc(item_size, GFP_NOFS);
3056 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3058 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3057 path->slots[0]), item_size); 3059 path->slots[0]), item_size);
@@ -3545,7 +3547,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
3545 } 3547 }
3546 3548
3547 btrfs_set_header_nritems(leaf, nritems + nr); 3549 btrfs_set_header_nritems(leaf, nritems + nr);
3548 btrfs_mark_buffer_dirty(leaf);
3549 3550
3550 ret = 0; 3551 ret = 0;
3551 if (slot == 0) { 3552 if (slot == 0) {
@@ -3553,6 +3554,8 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
3553 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 3554 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3554 ret = fixup_low_keys(trans, root, path, &disk_key, 1); 3555 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3555 } 3556 }
3557 btrfs_unlock_up_safe(path, 1);
3558 btrfs_mark_buffer_dirty(leaf);
3556 3559
3557 if (btrfs_leaf_free_space(root, leaf) < 0) { 3560 if (btrfs_leaf_free_space(root, leaf) < 0) {
3558 btrfs_print_leaf(root, leaf); 3561 btrfs_print_leaf(root, leaf);
@@ -3596,7 +3599,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3596 total_data, total_size, nr); 3599 total_data, total_size, nr);
3597 3600
3598out: 3601out:
3599 btrfs_unlock_up_safe(path, 1);
3600 return ret; 3602 return ret;
3601} 3603}
3602 3604
@@ -3792,6 +3794,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3792 slot = path->slots[1]; 3794 slot = path->slots[1];
3793 extent_buffer_get(leaf); 3795 extent_buffer_get(leaf);
3794 3796
3797 btrfs_set_path_blocking(path);
3795 wret = push_leaf_left(trans, root, path, 1, 1); 3798 wret = push_leaf_left(trans, root, path, 1, 1);
3796 if (wret < 0 && wret != -ENOSPC) 3799 if (wret < 0 && wret != -ENOSPC)
3797 ret = wret; 3800 ret = wret;