aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-22 11:18:08 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commita61e6f29dc7c9d56a776a518eed92bbc61848263 (patch)
tree6856ad3e5822eacfc2ac94693b6023ef81567fd2 /fs/btrfs/extent_io.c
parent6af118ce51b52ceda357c671550c79628b9c4a65 (diff)
Btrfs: Use a mutex in the extent buffer for tree block locking
This replaces the use of the page cache lock bit for locking, which wasn't suitable for block size < page size and couldn't be used recursively. The mutexes alone don't fix either problem, but they are the first step. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 32bb4ed3723d..7380449cb5b3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2690,6 +2690,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2690 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 2690 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2691 eb->start = start; 2691 eb->start = start;
2692 eb->len = len; 2692 eb->len = len;
2693 mutex_init(&eb->mutex);
2693 spin_lock_irqsave(&leak_lock, flags); 2694 spin_lock_irqsave(&leak_lock, flags);
2694 list_add(&eb->leak_list, &buffers); 2695 list_add(&eb->leak_list, &buffers);
2695 spin_unlock_irqrestore(&leak_lock, flags); 2696 spin_unlock_irqrestore(&leak_lock, flags);
@@ -2837,6 +2838,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2837 2838
2838 for (i = 0; i < num_pages; i++) { 2839 for (i = 0; i < num_pages; i++) {
2839 page = extent_buffer_page(eb, i); 2840 page = extent_buffer_page(eb, i);
2841 lock_page(page);
2840 if (i == 0) 2842 if (i == 0)
2841 set_page_extent_head(page, eb->len); 2843 set_page_extent_head(page, eb->len);
2842 else 2844 else
@@ -2854,6 +2856,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2854 end = start + PAGE_CACHE_SIZE - 1; 2856 end = start + PAGE_CACHE_SIZE - 1;
2855 if (test_range_bit(tree, start, end, 2857 if (test_range_bit(tree, start, end,
2856 EXTENT_DIRTY, 0)) { 2858 EXTENT_DIRTY, 0)) {
2859 unlock_page(page);
2857 continue; 2860 continue;
2858 } 2861 }
2859 } 2862 }
@@ -2865,6 +2868,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2865 PAGECACHE_TAG_DIRTY); 2868 PAGECACHE_TAG_DIRTY);
2866 } 2869 }
2867 read_unlock_irq(&page->mapping->tree_lock); 2870 read_unlock_irq(&page->mapping->tree_lock);
2871 unlock_page(page);
2868 } 2872 }
2869 return 0; 2873 return 0;
2870} 2874}
@@ -2893,12 +2897,17 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
2893 * on us if the page isn't already dirty. 2897 * on us if the page isn't already dirty.
2894 */ 2898 */
2895 if (i == 0) { 2899 if (i == 0) {
2900 lock_page(page);
2896 set_page_extent_head(page, eb->len); 2901 set_page_extent_head(page, eb->len);
2897 } else if (PagePrivate(page) && 2902 } else if (PagePrivate(page) &&
2898 page->private != EXTENT_PAGE_PRIVATE) { 2903 page->private != EXTENT_PAGE_PRIVATE) {
2904 lock_page(page);
2899 set_page_extent_mapped(page); 2905 set_page_extent_mapped(page);
2906 unlock_page(page);
2900 } 2907 }
2901 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 2908 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2909 if (i == 0)
2910 unlock_page(page);
2902 } 2911 }
2903 return set_extent_dirty(tree, eb->start, 2912 return set_extent_dirty(tree, eb->start,
2904 eb->start + eb->len - 1, GFP_NOFS); 2913 eb->start + eb->len - 1, GFP_NOFS);