aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorJan Schmidt <list.btrfs@jan-o-sch.net>2012-05-16 11:00:02 -0400
committerJan Schmidt <list.btrfs@jan-o-sch.net>2012-05-26 06:17:54 -0400
commit815a51c74ad14864d0a8fff5eea983819c18feae (patch)
treebe0835a915143fe9fa63a1c6f2a40dbfdc0c8646 /fs/btrfs/extent_io.c
parent64947ec0d16dd20d6542b58cf82c8d5f9678cabf (diff)
Btrfs: dummy extent buffers for tree mod log
The tree modification log needs two ways to create dummy extent buffers, once by allocating a fresh one (to rebuild an old root) and once by cloning an existing one (to make private rewind modifications) to it. Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c80
1 files changed, 73 insertions, 7 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2fb52c26c677..3daed70a401a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3930,6 +3930,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3930 eb->start = start; 3930 eb->start = start;
3931 eb->len = len; 3931 eb->len = len;
3932 eb->tree = tree; 3932 eb->tree = tree;
3933 eb->bflags = 0;
3933 rwlock_init(&eb->lock); 3934 rwlock_init(&eb->lock);
3934 atomic_set(&eb->write_locks, 0); 3935 atomic_set(&eb->write_locks, 0);
3935 atomic_set(&eb->read_locks, 0); 3936 atomic_set(&eb->read_locks, 0);
@@ -3967,6 +3968,60 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3967 return eb; 3968 return eb;
3968} 3969}
3969 3970
3971struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
3972{
3973 unsigned long i;
3974 struct page *p;
3975 struct extent_buffer *new;
3976 unsigned long num_pages = num_extent_pages(src->start, src->len);
3977
3978 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
3979 if (new == NULL)
3980 return NULL;
3981
3982 for (i = 0; i < num_pages; i++) {
3983 p = alloc_page(GFP_ATOMIC);
3984 BUG_ON(!p);
3985 attach_extent_buffer_page(new, p);
3986 WARN_ON(PageDirty(p));
3987 SetPageUptodate(p);
3988 new->pages[i] = p;
3989 }
3990
3991 copy_extent_buffer(new, src, 0, 0, src->len);
3992 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
3993 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
3994
3995 return new;
3996}
3997
3998struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
3999{
4000 struct extent_buffer *eb;
4001 unsigned long num_pages = num_extent_pages(0, len);
4002 unsigned long i;
4003
4004 eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
4005 if (!eb)
4006 return NULL;
4007
4008 for (i = 0; i < num_pages; i++) {
4009 eb->pages[i] = alloc_page(GFP_ATOMIC);
4010 if (!eb->pages[i])
4011 goto err;
4012 }
4013 set_extent_buffer_uptodate(eb);
4014 btrfs_set_header_nritems(eb, 0);
4015 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4016
4017 return eb;
4018err:
4019 for (i--; i > 0; i--)
4020 __free_page(eb->pages[i]);
4021 __free_extent_buffer(eb);
4022 return NULL;
4023}
4024
3970static int extent_buffer_under_io(struct extent_buffer *eb) 4025static int extent_buffer_under_io(struct extent_buffer *eb)
3971{ 4026{
3972 return (atomic_read(&eb->io_pages) || 4027 return (atomic_read(&eb->io_pages) ||
@@ -3982,6 +4037,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3982{ 4037{
3983 unsigned long index; 4038 unsigned long index;
3984 struct page *page; 4039 struct page *page;
4040 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
3985 4041
3986 BUG_ON(extent_buffer_under_io(eb)); 4042 BUG_ON(extent_buffer_under_io(eb));
3987 4043
@@ -3992,7 +4048,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3992 do { 4048 do {
3993 index--; 4049 index--;
3994 page = extent_buffer_page(eb, index); 4050 page = extent_buffer_page(eb, index);
3995 if (page) { 4051 if (page && mapped) {
3996 spin_lock(&page->mapping->private_lock); 4052 spin_lock(&page->mapping->private_lock);
3997 /* 4053 /*
3998 * We do this since we'll remove the pages after we've 4054 * We do this since we'll remove the pages after we've
@@ -4017,6 +4073,8 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4017 } 4073 }
4018 spin_unlock(&page->mapping->private_lock); 4074 spin_unlock(&page->mapping->private_lock);
4019 4075
4076 }
4077 if (page) {
4020 /* One for when we alloced the page */ 4078 /* One for when we alloced the page */
4021 page_cache_release(page); 4079 page_cache_release(page);
4022 } 4080 }
@@ -4235,14 +4293,18 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4235{ 4293{
4236 WARN_ON(atomic_read(&eb->refs) == 0); 4294 WARN_ON(atomic_read(&eb->refs) == 0);
4237 if (atomic_dec_and_test(&eb->refs)) { 4295 if (atomic_dec_and_test(&eb->refs)) {
4238 struct extent_io_tree *tree = eb->tree; 4296 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4297 spin_unlock(&eb->refs_lock);
4298 } else {
4299 struct extent_io_tree *tree = eb->tree;
4239 4300
4240 spin_unlock(&eb->refs_lock); 4301 spin_unlock(&eb->refs_lock);
4241 4302
4242 spin_lock(&tree->buffer_lock); 4303 spin_lock(&tree->buffer_lock);
4243 radix_tree_delete(&tree->buffer, 4304 radix_tree_delete(&tree->buffer,
4244 eb->start >> PAGE_CACHE_SHIFT); 4305 eb->start >> PAGE_CACHE_SHIFT);
4245 spin_unlock(&tree->buffer_lock); 4306 spin_unlock(&tree->buffer_lock);
4307 }
4246 4308
4247 /* Should be safe to release our pages at this point */ 4309 /* Should be safe to release our pages at this point */
4248 btrfs_release_extent_buffer_page(eb, 0); 4310 btrfs_release_extent_buffer_page(eb, 0);
@@ -4260,6 +4322,10 @@ void free_extent_buffer(struct extent_buffer *eb)
4260 4322
4261 spin_lock(&eb->refs_lock); 4323 spin_lock(&eb->refs_lock);
4262 if (atomic_read(&eb->refs) == 2 && 4324 if (atomic_read(&eb->refs) == 2 &&
4325 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4326 atomic_dec(&eb->refs);
4327
4328 if (atomic_read(&eb->refs) == 2 &&
4263 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && 4329 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4264 !extent_buffer_under_io(eb) && 4330 !extent_buffer_under_io(eb) &&
4265 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 4331 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))