diff options
author | Chris Mason <chris.mason@oracle.com> | 2009-03-13 11:00:37 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2009-03-24 16:14:28 -0400 |
commit | b9473439d3e84d9fc1a0a83faca69cc1b7566341 (patch) | |
tree | bef8321b80589026b617d61d0fabaf545d459269 /fs/btrfs/ctree.h | |
parent | 89573b9c516b24af8a3b9958dd5afca8fa874e3d (diff) |
Btrfs: leave btree locks spinning more often
btrfs_mark_buffer dirty would set dirty bits in the extent_io tree
for the buffers it was dirtying. This may require a kmalloc and it
was not atomic. So, anyone who called btrfs_mark_buffer_dirty had to
set any btree locks they were holding to blocking first.
This commit changes dirty tracking for extent buffers to just use a flag
in the extent buffer. Now that we have one and only one extent buffer
per page, this can be safely done without losing dirty bits along the way.
This also introduces a path->leave_spinning flag that callers of
btrfs_search_slot can use to indicate they will properly deal with a
path returned where all the locks are spinning instead of blocking.
Many of the btree search callers now expect spinning paths,
resulting in better btree concurrency overall.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ctree.h')
-rw-r--r-- | fs/btrfs/ctree.h | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 08d9f8d15538..4ddce91cf3f9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -401,15 +401,16 @@ struct btrfs_path { | |||
401 | int locks[BTRFS_MAX_LEVEL]; | 401 | int locks[BTRFS_MAX_LEVEL]; |
402 | int reada; | 402 | int reada; |
403 | /* keep some upper locks as we walk down */ | 403 | /* keep some upper locks as we walk down */ |
404 | int keep_locks; | ||
405 | int skip_locking; | ||
406 | int lowest_level; | 404 | int lowest_level; |
407 | 405 | ||
408 | /* | 406 | /* |
409 | * set by btrfs_split_item, tells search_slot to keep all locks | 407 | * set by btrfs_split_item, tells search_slot to keep all locks |
410 | * and to force calls to keep space in the nodes | 408 | * and to force calls to keep space in the nodes |
411 | */ | 409 | */ |
412 | int search_for_split; | 410 | unsigned int search_for_split:1; |
411 | unsigned int keep_locks:1; | ||
412 | unsigned int skip_locking:1; | ||
413 | unsigned int leave_spinning:1; | ||
413 | }; | 414 | }; |
414 | 415 | ||
415 | /* | 416 | /* |
@@ -779,6 +780,11 @@ struct btrfs_fs_info { | |||
779 | atomic_t throttle_gen; | 780 | atomic_t throttle_gen; |
780 | 781 | ||
781 | u64 total_pinned; | 782 | u64 total_pinned; |
783 | |||
784 | /* protected by the delalloc lock, used to keep from writing | ||
785 | * metadata until there is a nice batch | ||
786 | */ | ||
787 | u64 dirty_metadata_bytes; | ||
782 | struct list_head dirty_cowonly_roots; | 788 | struct list_head dirty_cowonly_roots; |
783 | 789 | ||
784 | struct btrfs_fs_devices *fs_devices; | 790 | struct btrfs_fs_devices *fs_devices; |