aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-06-25 16:01:30 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:03 -0400
commit925baeddc5b0764a53f2214a1253251bab0e0324 (patch)
tree0e069bf9cc1c4ecd17c812fd1fb81bf807909ee6 /fs/btrfs/extent_io.c
parent1cc127b5d1b71453091859301de4a7dd6ee96fa8 (diff)
Btrfs: Start btree concurrency work.
The allocation trees and the chunk trees are serialized via their own dedicated mutexes. This means allocation location is still not very fine grained. The main FS btree is protected by locks on each block in the btree. Locks are taken top / down, and as processing finishes on a given level of the tree, the lock is released after locking the lower level. The end result of a search is now a path where only the lowest level is locked. Releasing or freeing the path drops any locks held. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 17c508a941eb..bd15cdcaba95 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2889,7 +2889,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2889 2889
2890 for (i = 0; i < num_pages; i++) { 2890 for (i = 0; i < num_pages; i++) {
2891 page = extent_buffer_page(eb, i); 2891 page = extent_buffer_page(eb, i);
2892 lock_page(page);
2893 if (i == 0) 2892 if (i == 0)
2894 set_page_extent_head(page, eb->len); 2893 set_page_extent_head(page, eb->len);
2895 else 2894 else
@@ -2907,7 +2906,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2907 end = start + PAGE_CACHE_SIZE - 1; 2906 end = start + PAGE_CACHE_SIZE - 1;
2908 if (test_range_bit(tree, start, end, 2907 if (test_range_bit(tree, start, end,
2909 EXTENT_DIRTY, 0)) { 2908 EXTENT_DIRTY, 0)) {
2910 unlock_page(page);
2911 continue; 2909 continue;
2912 } 2910 }
2913 } 2911 }
@@ -2919,7 +2917,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2919 PAGECACHE_TAG_DIRTY); 2917 PAGECACHE_TAG_DIRTY);
2920 } 2918 }
2921 read_unlock_irq(&page->mapping->tree_lock); 2919 read_unlock_irq(&page->mapping->tree_lock);
2922 unlock_page(page);
2923 } 2920 }
2924 return 0; 2921 return 0;
2925} 2922}
@@ -2948,17 +2945,12 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
2948 * on us if the page isn't already dirty. 2945 * on us if the page isn't already dirty.
2949 */ 2946 */
2950 if (i == 0) { 2947 if (i == 0) {
2951 lock_page(page);
2952 set_page_extent_head(page, eb->len); 2948 set_page_extent_head(page, eb->len);
2953 } else if (PagePrivate(page) && 2949 } else if (PagePrivate(page) &&
2954 page->private != EXTENT_PAGE_PRIVATE) { 2950 page->private != EXTENT_PAGE_PRIVATE) {
2955 lock_page(page);
2956 set_page_extent_mapped(page); 2951 set_page_extent_mapped(page);
2957 unlock_page(page);
2958 } 2952 }
2959 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 2953 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2960 if (i == 0)
2961 unlock_page(page);
2962 } 2954 }
2963 return set_extent_dirty(tree, eb->start, 2955 return set_extent_dirty(tree, eb->start,
2964 eb->start + eb->len - 1, GFP_NOFS); 2956 eb->start + eb->len - 1, GFP_NOFS);