aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-01-24 16:13:08 -0500
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:03:59 -0400
commitd1310b2e0cd98eb1348553e69b73827b436dca7b (patch)
treef55e9c380df0f3a7e9fb792b2308b52b632b90aa /fs/btrfs
parent5f56406aabdf5444d040c5955effc665b1d0dbaf (diff)
Btrfs: Split the extent_map code into two parts
There is now extent_map for mapping offsets in the file to disk and extent_io for state tracking, IO submission and extent_bufers. The new extent_map code shifts from [start,end] pairs to [start,len], and pushes the locking out into the caller. This allows a few performance optimizations and is easier to use. A number of extent_map usage bugs were fixed, mostly with failing to remove extent_map entries when changing the file. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/Makefile3
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/ctree.h15
-rw-r--r--fs/btrfs/disk-io.c123
-rw-r--r--fs/btrfs/extent-tree.c43
-rw-r--r--fs/btrfs/extent_io.c3089
-rw-r--r--fs/btrfs/extent_io.h193
-rw-r--r--fs/btrfs/extent_map.c3119
-rw-r--r--fs/btrfs/extent_map.h214
-rw-r--r--fs/btrfs/file.c29
-rw-r--r--fs/btrfs/inode.c175
-rw-r--r--fs/btrfs/super.c10
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/transaction.h2
14 files changed, 3612 insertions, 3413 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 72cc3e16741f..9c61609f3714 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -5,7 +5,8 @@ obj-m := btrfs.o
5btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ 5btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
6 hash.o file-item.o inode-item.o inode-map.o disk-io.o \ 6 hash.o file-item.o inode-item.o inode-map.o disk-io.o \
7 transaction.o bit-radix.o inode.o file.o tree-defrag.o \ 7 transaction.o bit-radix.o inode.o file.o tree-defrag.o \
8 extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o 8 extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
9 extent_io.o
9 10
10ifeq ($(CONFIG_FS_POSIX_ACL),y) 11ifeq ($(CONFIG_FS_POSIX_ACL),y)
11btrfs-y += acl.o 12btrfs-y += acl.o
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 9fc99883c70a..9a0647dc5a0b 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -20,6 +20,7 @@
20#define __BTRFS_I__ 20#define __BTRFS_I__
21 21
22#include "extent_map.h" 22#include "extent_map.h"
23#include "extent_io.h"
23 24
24/* in memory btrfs inode */ 25/* in memory btrfs inode */
25struct btrfs_inode { 26struct btrfs_inode {
@@ -27,6 +28,7 @@ struct btrfs_inode {
27 struct btrfs_block_group_cache *block_group; 28 struct btrfs_block_group_cache *block_group;
28 struct btrfs_key location; 29 struct btrfs_key location;
29 struct extent_map_tree extent_tree; 30 struct extent_map_tree extent_tree;
31 struct extent_io_tree io_tree;
30 struct inode vfs_inode; 32 struct inode vfs_inode;
31 33
32 u64 ordered_trans; 34 u64 ordered_trans;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b9f2975b55c9..6c65473e0fe3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -27,6 +27,7 @@
27#include <linux/completion.h> 27#include <linux/completion.h>
28#include <asm/kmap_types.h> 28#include <asm/kmap_types.h>
29#include "bit-radix.h" 29#include "bit-radix.h"
30#include "extent_io.h"
30#include "extent_map.h" 31#include "extent_map.h"
31 32
32struct btrfs_trans_handle; 33struct btrfs_trans_handle;
@@ -314,11 +315,11 @@ struct btrfs_fs_info {
314 struct btrfs_root *tree_root; 315 struct btrfs_root *tree_root;
315 struct radix_tree_root fs_roots_radix; 316 struct radix_tree_root fs_roots_radix;
316 317
317 struct extent_map_tree free_space_cache; 318 struct extent_io_tree free_space_cache;
318 struct extent_map_tree block_group_cache; 319 struct extent_io_tree block_group_cache;
319 struct extent_map_tree pinned_extents; 320 struct extent_io_tree pinned_extents;
320 struct extent_map_tree pending_del; 321 struct extent_io_tree pending_del;
321 struct extent_map_tree extent_ins; 322 struct extent_io_tree extent_ins;
322 323
323 u64 generation; 324 u64 generation;
324 u64 last_trans_committed; 325 u64 last_trans_committed;
@@ -956,7 +957,7 @@ u32 btrfs_count_snapshots_in_path(struct btrfs_root *root,
956 u64 first_extent); 957 u64 first_extent);
957int btrfs_extent_post_op(struct btrfs_trans_handle *trans, 958int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
958 struct btrfs_root *root); 959 struct btrfs_root *root);
959int btrfs_copy_pinned(struct btrfs_root *root, struct extent_map_tree *copy); 960int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
960struct btrfs_block_group_cache *btrfs_lookup_block_group(struct 961struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
961 btrfs_fs_info *info, 962 btrfs_fs_info *info,
962 u64 bytenr); 963 u64 bytenr);
@@ -1001,7 +1002,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1001 u64 owner_objectid, u64 owner_offset, int pin); 1002 u64 owner_objectid, u64 owner_offset, int pin);
1002int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 1003int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1003 struct btrfs_root *root, 1004 struct btrfs_root *root,
1004 struct extent_map_tree *unpin); 1005 struct extent_io_tree *unpin);
1005int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1006int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1006 struct btrfs_root *root, 1007 struct btrfs_root *root,
1007 u64 bytenr, u64 num_bytes, 1008 u64 bytenr, u64 num_bytes,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5d1f9bca2712..4c4ebea0b2a9 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -43,14 +43,14 @@ static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
43} 43}
44#endif 44#endif
45 45
46static struct extent_map_ops btree_extent_map_ops; 46static struct extent_io_ops btree_extent_io_ops;
47 47
48struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, 48struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
49 u64 bytenr, u32 blocksize) 49 u64 bytenr, u32 blocksize)
50{ 50{
51 struct inode *btree_inode = root->fs_info->btree_inode; 51 struct inode *btree_inode = root->fs_info->btree_inode;
52 struct extent_buffer *eb; 52 struct extent_buffer *eb;
53 eb = find_extent_buffer(&BTRFS_I(btree_inode)->extent_tree, 53 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
54 bytenr, blocksize, GFP_NOFS); 54 bytenr, blocksize, GFP_NOFS);
55 return eb; 55 return eb;
56} 56}
@@ -61,13 +61,13 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
61 struct inode *btree_inode = root->fs_info->btree_inode; 61 struct inode *btree_inode = root->fs_info->btree_inode;
62 struct extent_buffer *eb; 62 struct extent_buffer *eb;
63 63
64 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->extent_tree, 64 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
65 bytenr, blocksize, NULL, GFP_NOFS); 65 bytenr, blocksize, NULL, GFP_NOFS);
66 return eb; 66 return eb;
67} 67}
68 68
69struct extent_map *btree_get_extent(struct inode *inode, struct page *page, 69struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
70 size_t page_offset, u64 start, u64 end, 70 size_t page_offset, u64 start, u64 len,
71 int create) 71 int create)
72{ 72{
73 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 73 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -75,7 +75,9 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
75 int ret; 75 int ret;
76 76
77again: 77again:
78 em = lookup_extent_mapping(em_tree, start, end); 78 spin_lock(&em_tree->lock);
79 em = lookup_extent_mapping(em_tree, start, len);
80 spin_unlock(&em_tree->lock);
79 if (em) { 81 if (em) {
80 goto out; 82 goto out;
81 } 83 }
@@ -85,11 +87,14 @@ again:
85 goto out; 87 goto out;
86 } 88 }
87 em->start = 0; 89 em->start = 0;
88 em->end = (i_size_read(inode) & ~((u64)PAGE_CACHE_SIZE -1)) - 1; 90 em->len = i_size_read(inode);
89 em->block_start = 0; 91 em->block_start = 0;
90 em->block_end = em->end;
91 em->bdev = inode->i_sb->s_bdev; 92 em->bdev = inode->i_sb->s_bdev;
93
94 spin_lock(&em_tree->lock);
92 ret = add_extent_mapping(em_tree, em); 95 ret = add_extent_mapping(em_tree, em);
96 spin_unlock(&em_tree->lock);
97
93 if (ret == -EEXIST) { 98 if (ret == -EEXIST) {
94 free_extent_map(em); 99 free_extent_map(em);
95 em = NULL; 100 em = NULL;
@@ -175,13 +180,13 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
175 180
176int csum_dirty_buffer(struct btrfs_root *root, struct page *page) 181int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
177{ 182{
178 struct extent_map_tree *tree; 183 struct extent_io_tree *tree;
179 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 184 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
180 u64 found_start; 185 u64 found_start;
181 int found_level; 186 int found_level;
182 unsigned long len; 187 unsigned long len;
183 struct extent_buffer *eb; 188 struct extent_buffer *eb;
184 tree = &BTRFS_I(page->mapping->host)->extent_tree; 189 tree = &BTRFS_I(page->mapping->host)->io_tree;
185 190
186 if (page->private == EXTENT_PAGE_PRIVATE) 191 if (page->private == EXTENT_PAGE_PRIVATE)
187 goto out; 192 goto out;
@@ -230,16 +235,16 @@ static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
230 235
231static int btree_writepage(struct page *page, struct writeback_control *wbc) 236static int btree_writepage(struct page *page, struct writeback_control *wbc)
232{ 237{
233 struct extent_map_tree *tree; 238 struct extent_io_tree *tree;
234 tree = &BTRFS_I(page->mapping->host)->extent_tree; 239 tree = &BTRFS_I(page->mapping->host)->io_tree;
235 return extent_write_full_page(tree, page, btree_get_extent, wbc); 240 return extent_write_full_page(tree, page, btree_get_extent, wbc);
236} 241}
237 242
238static int btree_writepages(struct address_space *mapping, 243static int btree_writepages(struct address_space *mapping,
239 struct writeback_control *wbc) 244 struct writeback_control *wbc)
240{ 245{
241 struct extent_map_tree *tree; 246 struct extent_io_tree *tree;
242 tree = &BTRFS_I(mapping->host)->extent_tree; 247 tree = &BTRFS_I(mapping->host)->io_tree;
243 if (wbc->sync_mode == WB_SYNC_NONE) { 248 if (wbc->sync_mode == WB_SYNC_NONE) {
244 u64 num_dirty; 249 u64 num_dirty;
245 u64 start = 0; 250 u64 start = 0;
@@ -264,18 +269,20 @@ static int btree_writepages(struct address_space *mapping,
264 269
265int btree_readpage(struct file *file, struct page *page) 270int btree_readpage(struct file *file, struct page *page)
266{ 271{
267 struct extent_map_tree *tree; 272 struct extent_io_tree *tree;
268 tree = &BTRFS_I(page->mapping->host)->extent_tree; 273 tree = &BTRFS_I(page->mapping->host)->io_tree;
269 return extent_read_full_page(tree, page, btree_get_extent); 274 return extent_read_full_page(tree, page, btree_get_extent);
270} 275}
271 276
272static int btree_releasepage(struct page *page, gfp_t unused_gfp_flags) 277static int btree_releasepage(struct page *page, gfp_t unused_gfp_flags)
273{ 278{
274 struct extent_map_tree *tree; 279 struct extent_io_tree *tree;
280 struct extent_map_tree *map;
275 int ret; 281 int ret;
276 282
277 tree = &BTRFS_I(page->mapping->host)->extent_tree; 283 tree = &BTRFS_I(page->mapping->host)->io_tree;
278 ret = try_release_extent_mapping(tree, page); 284 map = &BTRFS_I(page->mapping->host)->extent_tree;
285 ret = try_release_extent_mapping(map, tree, page);
279 if (ret == 1) { 286 if (ret == 1) {
280 ClearPagePrivate(page); 287 ClearPagePrivate(page);
281 set_page_private(page, 0); 288 set_page_private(page, 0);
@@ -286,8 +293,8 @@ static int btree_releasepage(struct page *page, gfp_t unused_gfp_flags)
286 293
287static void btree_invalidatepage(struct page *page, unsigned long offset) 294static void btree_invalidatepage(struct page *page, unsigned long offset)
288{ 295{
289 struct extent_map_tree *tree; 296 struct extent_io_tree *tree;
290 tree = &BTRFS_I(page->mapping->host)->extent_tree; 297 tree = &BTRFS_I(page->mapping->host)->io_tree;
291 extent_invalidatepage(tree, page, offset); 298 extent_invalidatepage(tree, page, offset);
292 btree_releasepage(page, GFP_NOFS); 299 btree_releasepage(page, GFP_NOFS);
293} 300}
@@ -331,7 +338,7 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
331 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 338 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
332 if (!buf) 339 if (!buf)
333 return 0; 340 return 0;
334 read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree, 341 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
335 buf, 0, 0); 342 buf, 0, 0);
336 free_extent_buffer(buf); 343 free_extent_buffer(buf);
337 return ret; 344 return ret;
@@ -342,40 +349,39 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
342{ 349{
343 struct extent_buffer *buf = NULL; 350 struct extent_buffer *buf = NULL;
344 struct inode *btree_inode = root->fs_info->btree_inode; 351 struct inode *btree_inode = root->fs_info->btree_inode;
345 struct extent_map_tree *extent_tree; 352 struct extent_io_tree *io_tree;
346 u64 end; 353 u64 end;
347 int ret; 354 int ret;
348 355
349 extent_tree = &BTRFS_I(btree_inode)->extent_tree; 356 io_tree = &BTRFS_I(btree_inode)->io_tree;
350 357
351 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 358 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
352 if (!buf) 359 if (!buf)
353 return NULL; 360 return NULL;
354 read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree, 361 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, 1);
355 buf, 0, 1);
356 362
357 if (buf->flags & EXTENT_CSUM) 363 if (buf->flags & EXTENT_CSUM)
358 return buf; 364 return buf;
359 365
360 end = buf->start + PAGE_CACHE_SIZE - 1; 366 end = buf->start + PAGE_CACHE_SIZE - 1;
361 if (test_range_bit(extent_tree, buf->start, end, EXTENT_CSUM, 1)) { 367 if (test_range_bit(io_tree, buf->start, end, EXTENT_CSUM, 1)) {
362 buf->flags |= EXTENT_CSUM; 368 buf->flags |= EXTENT_CSUM;
363 return buf; 369 return buf;
364 } 370 }
365 371
366 lock_extent(extent_tree, buf->start, end, GFP_NOFS); 372 lock_extent(io_tree, buf->start, end, GFP_NOFS);
367 373
368 if (test_range_bit(extent_tree, buf->start, end, EXTENT_CSUM, 1)) { 374 if (test_range_bit(io_tree, buf->start, end, EXTENT_CSUM, 1)) {
369 buf->flags |= EXTENT_CSUM; 375 buf->flags |= EXTENT_CSUM;
370 goto out_unlock; 376 goto out_unlock;
371 } 377 }
372 378
373 ret = csum_tree_block(root, buf, 1); 379 ret = csum_tree_block(root, buf, 1);
374 set_extent_bits(extent_tree, buf->start, end, EXTENT_CSUM, GFP_NOFS); 380 set_extent_bits(io_tree, buf->start, end, EXTENT_CSUM, GFP_NOFS);
375 buf->flags |= EXTENT_CSUM; 381 buf->flags |= EXTENT_CSUM;
376 382
377out_unlock: 383out_unlock:
378 unlock_extent(extent_tree, buf->start, end, GFP_NOFS); 384 unlock_extent(io_tree, buf->start, end, GFP_NOFS);
379 return buf; 385 return buf;
380} 386}
381 387
@@ -385,7 +391,7 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
385 struct inode *btree_inode = root->fs_info->btree_inode; 391 struct inode *btree_inode = root->fs_info->btree_inode;
386 if (btrfs_header_generation(buf) == 392 if (btrfs_header_generation(buf) ==
387 root->fs_info->running_transaction->transid) 393 root->fs_info->running_transaction->transid)
388 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree, 394 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
389 buf); 395 buf);
390 return 0; 396 return 0;
391} 397}
@@ -394,7 +400,7 @@ int wait_on_tree_block_writeback(struct btrfs_root *root,
394 struct extent_buffer *buf) 400 struct extent_buffer *buf)
395{ 401{
396 struct inode *btree_inode = root->fs_info->btree_inode; 402 struct inode *btree_inode = root->fs_info->btree_inode;
397 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->extent_tree, 403 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
398 buf); 404 buf);
399 return 0; 405 return 0;
400} 406}
@@ -659,20 +665,23 @@ struct btrfs_root *open_ctree(struct super_block *sb)
659 fs_info->btree_inode->i_nlink = 1; 665 fs_info->btree_inode->i_nlink = 1;
660 fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size; 666 fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
661 fs_info->btree_inode->i_mapping->a_ops = &btree_aops; 667 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
662 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, 668 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
663 fs_info->btree_inode->i_mapping, 669 fs_info->btree_inode->i_mapping,
664 GFP_NOFS); 670 GFP_NOFS);
665 BTRFS_I(fs_info->btree_inode)->extent_tree.ops = &btree_extent_map_ops; 671 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
672 GFP_NOFS);
673
674 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
666 675
667 extent_map_tree_init(&fs_info->free_space_cache, 676 extent_io_tree_init(&fs_info->free_space_cache,
668 fs_info->btree_inode->i_mapping, GFP_NOFS); 677 fs_info->btree_inode->i_mapping, GFP_NOFS);
669 extent_map_tree_init(&fs_info->block_group_cache, 678 extent_io_tree_init(&fs_info->block_group_cache,
670 fs_info->btree_inode->i_mapping, GFP_NOFS); 679 fs_info->btree_inode->i_mapping, GFP_NOFS);
671 extent_map_tree_init(&fs_info->pinned_extents, 680 extent_io_tree_init(&fs_info->pinned_extents,
672 fs_info->btree_inode->i_mapping, GFP_NOFS); 681 fs_info->btree_inode->i_mapping, GFP_NOFS);
673 extent_map_tree_init(&fs_info->pending_del, 682 extent_io_tree_init(&fs_info->pending_del,
674 fs_info->btree_inode->i_mapping, GFP_NOFS); 683 fs_info->btree_inode->i_mapping, GFP_NOFS);
675 extent_map_tree_init(&fs_info->extent_ins, 684 extent_io_tree_init(&fs_info->extent_ins,
676 fs_info->btree_inode->i_mapping, GFP_NOFS); 685 fs_info->btree_inode->i_mapping, GFP_NOFS);
677 fs_info->do_barriers = 1; 686 fs_info->do_barriers = 1;
678 fs_info->closing = 0; 687 fs_info->closing = 0;
@@ -787,7 +796,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
787 796
788 if (!btrfs_test_opt(root, NOBARRIER)) 797 if (!btrfs_test_opt(root, NOBARRIER))
789 blkdev_issue_flush(sb->s_bdev, NULL); 798 blkdev_issue_flush(sb->s_bdev, NULL);
790 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree, super); 799 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, super);
791 ret = sync_page_range_nolock(btree_inode, btree_inode->i_mapping, 800 ret = sync_page_range_nolock(btree_inode, btree_inode->i_mapping,
792 super->start, super->len); 801 super->start, super->len);
793 if (!btrfs_test_opt(root, NOBARRIER)) 802 if (!btrfs_test_opt(root, NOBARRIER))
@@ -864,12 +873,12 @@ int close_ctree(struct btrfs_root *root)
864 873
865 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 874 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
866 875
867 extent_map_tree_empty_lru(&fs_info->free_space_cache); 876 extent_io_tree_empty_lru(&fs_info->free_space_cache);
868 extent_map_tree_empty_lru(&fs_info->block_group_cache); 877 extent_io_tree_empty_lru(&fs_info->block_group_cache);
869 extent_map_tree_empty_lru(&fs_info->pinned_extents); 878 extent_io_tree_empty_lru(&fs_info->pinned_extents);
870 extent_map_tree_empty_lru(&fs_info->pending_del); 879 extent_io_tree_empty_lru(&fs_info->pending_del);
871 extent_map_tree_empty_lru(&fs_info->extent_ins); 880 extent_io_tree_empty_lru(&fs_info->extent_ins);
872 extent_map_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->extent_tree); 881 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
873 882
874 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); 883 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
875 884
@@ -892,13 +901,13 @@ int close_ctree(struct btrfs_root *root)
892int btrfs_buffer_uptodate(struct extent_buffer *buf) 901int btrfs_buffer_uptodate(struct extent_buffer *buf)
893{ 902{
894 struct inode *btree_inode = buf->first_page->mapping->host; 903 struct inode *btree_inode = buf->first_page->mapping->host;
895 return extent_buffer_uptodate(&BTRFS_I(btree_inode)->extent_tree, buf); 904 return extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
896} 905}
897 906
898int btrfs_set_buffer_uptodate(struct extent_buffer *buf) 907int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
899{ 908{
900 struct inode *btree_inode = buf->first_page->mapping->host; 909 struct inode *btree_inode = buf->first_page->mapping->host;
901 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->extent_tree, 910 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
902 buf); 911 buf);
903} 912}
904 913
@@ -914,7 +923,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
914 transid, root->fs_info->generation); 923 transid, root->fs_info->generation);
915 WARN_ON(1); 924 WARN_ON(1);
916 } 925 }
917 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree, buf); 926 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
918} 927}
919 928
920void btrfs_throttle(struct btrfs_root *root) 929void btrfs_throttle(struct btrfs_root *root)
@@ -941,7 +950,7 @@ void btrfs_set_buffer_defrag(struct extent_buffer *buf)
941{ 950{
942 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 951 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
943 struct inode *btree_inode = root->fs_info->btree_inode; 952 struct inode *btree_inode = root->fs_info->btree_inode;
944 set_extent_bits(&BTRFS_I(btree_inode)->extent_tree, buf->start, 953 set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
945 buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS); 954 buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS);
946} 955}
947 956
@@ -949,7 +958,7 @@ void btrfs_set_buffer_defrag_done(struct extent_buffer *buf)
949{ 958{
950 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 959 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
951 struct inode *btree_inode = root->fs_info->btree_inode; 960 struct inode *btree_inode = root->fs_info->btree_inode;
952 set_extent_bits(&BTRFS_I(btree_inode)->extent_tree, buf->start, 961 set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
953 buf->start + buf->len - 1, EXTENT_DEFRAG_DONE, 962 buf->start + buf->len - 1, EXTENT_DEFRAG_DONE,
954 GFP_NOFS); 963 GFP_NOFS);
955} 964}
@@ -958,7 +967,7 @@ int btrfs_buffer_defrag(struct extent_buffer *buf)
958{ 967{
959 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 968 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
960 struct inode *btree_inode = root->fs_info->btree_inode; 969 struct inode *btree_inode = root->fs_info->btree_inode;
961 return test_range_bit(&BTRFS_I(btree_inode)->extent_tree, 970 return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
962 buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0); 971 buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0);
963} 972}
964 973
@@ -966,7 +975,7 @@ int btrfs_buffer_defrag_done(struct extent_buffer *buf)
966{ 975{
967 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 976 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
968 struct inode *btree_inode = root->fs_info->btree_inode; 977 struct inode *btree_inode = root->fs_info->btree_inode;
969 return test_range_bit(&BTRFS_I(btree_inode)->extent_tree, 978 return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
970 buf->start, buf->start + buf->len - 1, 979 buf->start, buf->start + buf->len - 1,
971 EXTENT_DEFRAG_DONE, 0); 980 EXTENT_DEFRAG_DONE, 0);
972} 981}
@@ -975,7 +984,7 @@ int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf)
975{ 984{
976 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 985 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
977 struct inode *btree_inode = root->fs_info->btree_inode; 986 struct inode *btree_inode = root->fs_info->btree_inode;
978 return clear_extent_bits(&BTRFS_I(btree_inode)->extent_tree, 987 return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
979 buf->start, buf->start + buf->len - 1, 988 buf->start, buf->start + buf->len - 1,
980 EXTENT_DEFRAG_DONE, GFP_NOFS); 989 EXTENT_DEFRAG_DONE, GFP_NOFS);
981} 990}
@@ -984,7 +993,7 @@ int btrfs_clear_buffer_defrag(struct extent_buffer *buf)
984{ 993{
985 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 994 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
986 struct inode *btree_inode = root->fs_info->btree_inode; 995 struct inode *btree_inode = root->fs_info->btree_inode;
987 return clear_extent_bits(&BTRFS_I(btree_inode)->extent_tree, 996 return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
988 buf->start, buf->start + buf->len - 1, 997 buf->start, buf->start + buf->len - 1,
989 EXTENT_DEFRAG, GFP_NOFS); 998 EXTENT_DEFRAG, GFP_NOFS);
990} 999}
@@ -993,10 +1002,10 @@ int btrfs_read_buffer(struct extent_buffer *buf)
993{ 1002{
994 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 1003 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
995 struct inode *btree_inode = root->fs_info->btree_inode; 1004 struct inode *btree_inode = root->fs_info->btree_inode;
996 return read_extent_buffer_pages(&BTRFS_I(btree_inode)->extent_tree, 1005 return read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
997 buf, 0, 1); 1006 buf, 0, 1);
998} 1007}
999 1008
1000static struct extent_map_ops btree_extent_map_ops = { 1009static struct extent_io_ops btree_extent_io_ops = {
1001 .writepage_io_hook = btree_writepage_io_hook, 1010 .writepage_io_hook = btree_writepage_io_hook,
1002}; 1011};
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b69a46691a96..1cf125ab7822 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -63,7 +63,7 @@ static int cache_block_group(struct btrfs_root *root,
63 int ret; 63 int ret;
64 struct btrfs_key key; 64 struct btrfs_key key;
65 struct extent_buffer *leaf; 65 struct extent_buffer *leaf;
66 struct extent_map_tree *free_space_cache; 66 struct extent_io_tree *free_space_cache;
67 int slot; 67 int slot;
68 u64 last = 0; 68 u64 last = 0;
69 u64 hole_size; 69 u64 hole_size;
@@ -158,7 +158,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
158 btrfs_fs_info *info, 158 btrfs_fs_info *info,
159 u64 bytenr) 159 u64 bytenr)
160{ 160{
161 struct extent_map_tree *block_group_cache; 161 struct extent_io_tree *block_group_cache;
162 struct btrfs_block_group_cache *block_group = NULL; 162 struct btrfs_block_group_cache *block_group = NULL;
163 u64 ptr; 163 u64 ptr;
164 u64 start; 164 u64 start;
@@ -281,7 +281,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
281 int data, int owner) 281 int data, int owner)
282{ 282{
283 struct btrfs_block_group_cache *cache; 283 struct btrfs_block_group_cache *cache;
284 struct extent_map_tree *block_group_cache; 284 struct extent_io_tree *block_group_cache;
285 struct btrfs_block_group_cache *found_group = NULL; 285 struct btrfs_block_group_cache *found_group = NULL;
286 struct btrfs_fs_info *info = root->fs_info; 286 struct btrfs_fs_info *info = root->fs_info;
287 u64 used; 287 u64 used;
@@ -951,7 +951,7 @@ fail:
951int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 951int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
952 struct btrfs_root *root) 952 struct btrfs_root *root)
953{ 953{
954 struct extent_map_tree *block_group_cache; 954 struct extent_io_tree *block_group_cache;
955 struct btrfs_block_group_cache *cache; 955 struct btrfs_block_group_cache *cache;
956 int ret; 956 int ret;
957 int err = 0; 957 int err = 0;
@@ -1107,12 +1107,12 @@ static int update_pinned_extents(struct btrfs_root *root,
1107 return 0; 1107 return 0;
1108} 1108}
1109 1109
1110int btrfs_copy_pinned(struct btrfs_root *root, struct extent_map_tree *copy) 1110int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1111{ 1111{
1112 u64 last = 0; 1112 u64 last = 0;
1113 u64 start; 1113 u64 start;
1114 u64 end; 1114 u64 end;
1115 struct extent_map_tree *pinned_extents = &root->fs_info->pinned_extents; 1115 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1116 int ret; 1116 int ret;
1117 1117
1118 while(1) { 1118 while(1) {
@@ -1128,12 +1128,12 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_map_tree *copy)
1128 1128
1129int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 1129int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1130 struct btrfs_root *root, 1130 struct btrfs_root *root,
1131 struct extent_map_tree *unpin) 1131 struct extent_io_tree *unpin)
1132{ 1132{
1133 u64 start; 1133 u64 start;
1134 u64 end; 1134 u64 end;
1135 int ret; 1135 int ret;
1136 struct extent_map_tree *free_space_cache; 1136 struct extent_io_tree *free_space_cache;
1137 free_space_cache = &root->fs_info->free_space_cache; 1137 free_space_cache = &root->fs_info->free_space_cache;
1138 1138
1139 while(1) { 1139 while(1) {
@@ -1329,8 +1329,8 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1329 int err = 0; 1329 int err = 0;
1330 u64 start; 1330 u64 start;
1331 u64 end; 1331 u64 end;
1332 struct extent_map_tree *pending_del; 1332 struct extent_io_tree *pending_del;
1333 struct extent_map_tree *pinned_extents; 1333 struct extent_io_tree *pinned_extents;
1334 1334
1335 pending_del = &extent_root->fs_info->pending_del; 1335 pending_del = &extent_root->fs_info->pending_del;
1336 pinned_extents = &extent_root->fs_info->pinned_extents; 1336 pinned_extents = &extent_root->fs_info->pinned_extents;
@@ -1802,7 +1802,7 @@ struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1802 1802
1803 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 1803 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
1804 buf->start + buf->len - 1, GFP_NOFS); 1804 buf->start + buf->len - 1, GFP_NOFS);
1805 set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->extent_tree, 1805 set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->io_tree,
1806 buf->start, buf->start + buf->len - 1, 1806 buf->start, buf->start + buf->len - 1,
1807 EXTENT_CSUM, GFP_NOFS); 1807 EXTENT_CSUM, GFP_NOFS);
1808 buf->flags |= EXTENT_CSUM; 1808 buf->flags |= EXTENT_CSUM;
@@ -2166,7 +2166,7 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2166 unsigned long i; 2166 unsigned long i;
2167 struct page *page; 2167 struct page *page;
2168 struct btrfs_root *root = BTRFS_I(inode)->root; 2168 struct btrfs_root *root = BTRFS_I(inode)->root;
2169 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 2169 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2170 struct file_ra_state *ra; 2170 struct file_ra_state *ra;
2171 2171
2172 ra = kzalloc(sizeof(*ra), GFP_NOFS); 2172 ra = kzalloc(sizeof(*ra), GFP_NOFS);
@@ -2195,15 +2195,14 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2195 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 2195 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2196 page_end = page_start + PAGE_CACHE_SIZE - 1; 2196 page_end = page_start + PAGE_CACHE_SIZE - 1;
2197 2197
2198 lock_extent(em_tree, page_start, page_end, GFP_NOFS); 2198 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2199 2199
2200 delalloc_start = page_start; 2200 delalloc_start = page_start;
2201 existing_delalloc = 2201 existing_delalloc = count_range_bits(io_tree,
2202 count_range_bits(&BTRFS_I(inode)->extent_tree, 2202 &delalloc_start, page_end,
2203 &delalloc_start, page_end, 2203 PAGE_CACHE_SIZE, EXTENT_DELALLOC);
2204 PAGE_CACHE_SIZE, EXTENT_DELALLOC);
2205 2204
2206 set_extent_delalloc(em_tree, page_start, 2205 set_extent_delalloc(io_tree, page_start,
2207 page_end, GFP_NOFS); 2206 page_end, GFP_NOFS);
2208 2207
2209 spin_lock(&root->fs_info->delalloc_lock); 2208 spin_lock(&root->fs_info->delalloc_lock);
@@ -2211,7 +2210,7 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2211 existing_delalloc; 2210 existing_delalloc;
2212 spin_unlock(&root->fs_info->delalloc_lock); 2211 spin_unlock(&root->fs_info->delalloc_lock);
2213 2212
2214 unlock_extent(em_tree, page_start, page_end, GFP_NOFS); 2213 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2215 set_page_dirty(page); 2214 set_page_dirty(page);
2216 unlock_page(page); 2215 unlock_page(page);
2217 page_cache_release(page); 2216 page_cache_release(page);
@@ -2379,7 +2378,7 @@ int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 new_size)
2379 u64 cur_byte; 2378 u64 cur_byte;
2380 u64 total_found; 2379 u64 total_found;
2381 struct btrfs_fs_info *info = root->fs_info; 2380 struct btrfs_fs_info *info = root->fs_info;
2382 struct extent_map_tree *block_group_cache; 2381 struct extent_io_tree *block_group_cache;
2383 struct btrfs_key key; 2382 struct btrfs_key key;
2384 struct btrfs_key found_key; 2383 struct btrfs_key found_key;
2385 struct extent_buffer *leaf; 2384 struct extent_buffer *leaf;
@@ -2561,7 +2560,7 @@ int btrfs_grow_extent_tree(struct btrfs_trans_handle *trans,
2561 struct btrfs_block_group_cache *cache; 2560 struct btrfs_block_group_cache *cache;
2562 struct btrfs_block_group_item *item; 2561 struct btrfs_block_group_item *item;
2563 struct btrfs_fs_info *info = root->fs_info; 2562 struct btrfs_fs_info *info = root->fs_info;
2564 struct extent_map_tree *block_group_cache; 2563 struct extent_io_tree *block_group_cache;
2565 struct btrfs_key key; 2564 struct btrfs_key key;
2566 struct extent_buffer *leaf; 2565 struct extent_buffer *leaf;
2567 int ret; 2566 int ret;
@@ -2645,7 +2644,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
2645 int bit; 2644 int bit;
2646 struct btrfs_block_group_cache *cache; 2645 struct btrfs_block_group_cache *cache;
2647 struct btrfs_fs_info *info = root->fs_info; 2646 struct btrfs_fs_info *info = root->fs_info;
2648 struct extent_map_tree *block_group_cache; 2647 struct extent_io_tree *block_group_cache;
2649 struct btrfs_key key; 2648 struct btrfs_key key;
2650 struct btrfs_key found_key; 2649 struct btrfs_key found_key;
2651 struct extent_buffer *leaf; 2650 struct extent_buffer *leaf;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
new file mode 100644
index 000000000000..15cc158a0498
--- /dev/null
+++ b/fs/btrfs/extent_io.c
@@ -0,0 +1,3089 @@
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
17
18/* temporary define until extent_map moves out of btrfs */
19struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
20 unsigned long extra_flags,
21 void (*ctor)(void *, struct kmem_cache *,
22 unsigned long));
23
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
26
27static LIST_HEAD(buffers);
28static LIST_HEAD(states);
29
30static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31#define BUFFER_LRU_MAX 64
32
33struct tree_entry {
34 u64 start;
35 u64 end;
36 int in_tree;
37 struct rb_node rb_node;
38};
39
40struct extent_page_data {
41 struct bio *bio;
42 struct extent_io_tree *tree;
43 get_extent_t *get_extent;
44};
45
46int __init extent_io_init(void)
47{
48 extent_state_cache = btrfs_cache_create("extent_state",
49 sizeof(struct extent_state), 0,
50 NULL);
51 if (!extent_state_cache)
52 return -ENOMEM;
53
54 extent_buffer_cache = btrfs_cache_create("extent_buffers",
55 sizeof(struct extent_buffer), 0,
56 NULL);
57 if (!extent_buffer_cache)
58 goto free_state_cache;
59 return 0;
60
61free_state_cache:
62 kmem_cache_destroy(extent_state_cache);
63 return -ENOMEM;
64}
65
66void extent_io_exit(void)
67{
68 struct extent_state *state;
69
70 while (!list_empty(&states)) {
71 state = list_entry(states.next, struct extent_state, list);
72 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
73 list_del(&state->list);
74 kmem_cache_free(extent_state_cache, state);
75
76 }
77
78 if (extent_state_cache)
79 kmem_cache_destroy(extent_state_cache);
80 if (extent_buffer_cache)
81 kmem_cache_destroy(extent_buffer_cache);
82}
83
84void extent_io_tree_init(struct extent_io_tree *tree,
85 struct address_space *mapping, gfp_t mask)
86{
87 tree->state.rb_node = NULL;
88 tree->ops = NULL;
89 tree->dirty_bytes = 0;
90 rwlock_init(&tree->lock);
91 spin_lock_init(&tree->lru_lock);
92 tree->mapping = mapping;
93 INIT_LIST_HEAD(&tree->buffer_lru);
94 tree->lru_size = 0;
95}
96EXPORT_SYMBOL(extent_io_tree_init);
97
98void extent_io_tree_empty_lru(struct extent_io_tree *tree)
99{
100 struct extent_buffer *eb;
101 while(!list_empty(&tree->buffer_lru)) {
102 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
103 lru);
104 list_del_init(&eb->lru);
105 free_extent_buffer(eb);
106 }
107}
108EXPORT_SYMBOL(extent_io_tree_empty_lru);
109
110struct extent_state *alloc_extent_state(gfp_t mask)
111{
112 struct extent_state *state;
113 unsigned long flags;
114
115 state = kmem_cache_alloc(extent_state_cache, mask);
116 if (!state || IS_ERR(state))
117 return state;
118 state->state = 0;
119 state->in_tree = 0;
120 state->private = 0;
121
122 spin_lock_irqsave(&state_lock, flags);
123 list_add(&state->list, &states);
124 spin_unlock_irqrestore(&state_lock, flags);
125
126 atomic_set(&state->refs, 1);
127 init_waitqueue_head(&state->wq);
128 return state;
129}
130EXPORT_SYMBOL(alloc_extent_state);
131
132void free_extent_state(struct extent_state *state)
133{
134 unsigned long flags;
135 if (!state)
136 return;
137 if (atomic_dec_and_test(&state->refs)) {
138 WARN_ON(state->in_tree);
139 spin_lock_irqsave(&state_lock, flags);
140 list_del(&state->list);
141 spin_unlock_irqrestore(&state_lock, flags);
142 kmem_cache_free(extent_state_cache, state);
143 }
144}
145EXPORT_SYMBOL(free_extent_state);
146
147static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
148 struct rb_node *node)
149{
150 struct rb_node ** p = &root->rb_node;
151 struct rb_node * parent = NULL;
152 struct tree_entry *entry;
153
154 while(*p) {
155 parent = *p;
156 entry = rb_entry(parent, struct tree_entry, rb_node);
157
158 if (offset < entry->start)
159 p = &(*p)->rb_left;
160 else if (offset > entry->end)
161 p = &(*p)->rb_right;
162 else
163 return parent;
164 }
165
166 entry = rb_entry(node, struct tree_entry, rb_node);
167 entry->in_tree = 1;
168 rb_link_node(node, parent, p);
169 rb_insert_color(node, root);
170 return NULL;
171}
172
173static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
174 struct rb_node **prev_ret,
175 struct rb_node **next_ret)
176{
177 struct rb_node * n = root->rb_node;
178 struct rb_node *prev = NULL;
179 struct rb_node *orig_prev = NULL;
180 struct tree_entry *entry;
181 struct tree_entry *prev_entry = NULL;
182
183 while(n) {
184 entry = rb_entry(n, struct tree_entry, rb_node);
185 prev = n;
186 prev_entry = entry;
187
188 if (offset < entry->start)
189 n = n->rb_left;
190 else if (offset > entry->end)
191 n = n->rb_right;
192 else
193 return n;
194 }
195
196 if (prev_ret) {
197 orig_prev = prev;
198 while(prev && offset > prev_entry->end) {
199 prev = rb_next(prev);
200 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
201 }
202 *prev_ret = prev;
203 prev = orig_prev;
204 }
205
206 if (next_ret) {
207 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
208 while(prev && offset < prev_entry->start) {
209 prev = rb_prev(prev);
210 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
211 }
212 *next_ret = prev;
213 }
214 return NULL;
215}
216
217static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
218{
219 struct rb_node *prev;
220 struct rb_node *ret;
221 ret = __tree_search(root, offset, &prev, NULL);
222 if (!ret)
223 return prev;
224 return ret;
225}
226
227/*
228 * utility function to look for merge candidates inside a given range.
229 * Any extents with matching state are merged together into a single
230 * extent in the tree. Extents with EXTENT_IO in their state field
231 * are not merged because the end_io handlers need to be able to do
232 * operations on them without sleeping (or doing allocations/splits).
233 *
234 * This should be called with the tree lock held.
235 */
236static int merge_state(struct extent_io_tree *tree,
237 struct extent_state *state)
238{
239 struct extent_state *other;
240 struct rb_node *other_node;
241
242 if (state->state & EXTENT_IOBITS)
243 return 0;
244
245 other_node = rb_prev(&state->rb_node);
246 if (other_node) {
247 other = rb_entry(other_node, struct extent_state, rb_node);
248 if (other->end == state->start - 1 &&
249 other->state == state->state) {
250 state->start = other->start;
251 other->in_tree = 0;
252 rb_erase(&other->rb_node, &tree->state);
253 free_extent_state(other);
254 }
255 }
256 other_node = rb_next(&state->rb_node);
257 if (other_node) {
258 other = rb_entry(other_node, struct extent_state, rb_node);
259 if (other->start == state->end + 1 &&
260 other->state == state->state) {
261 other->start = state->start;
262 state->in_tree = 0;
263 rb_erase(&state->rb_node, &tree->state);
264 free_extent_state(state);
265 }
266 }
267 return 0;
268}
269
270/*
271 * insert an extent_state struct into the tree. 'bits' are set on the
272 * struct before it is inserted.
273 *
274 * This may return -EEXIST if the extent is already there, in which case the
275 * state struct is freed.
276 *
277 * The tree lock is not taken internally. This is a utility function and
278 * probably isn't what you want to call (see set/clear_extent_bit).
279 */
280static int insert_state(struct extent_io_tree *tree,
281 struct extent_state *state, u64 start, u64 end,
282 int bits)
283{
284 struct rb_node *node;
285
286 if (end < start) {
287 printk("end < start %Lu %Lu\n", end, start);
288 WARN_ON(1);
289 }
290 if (bits & EXTENT_DIRTY)
291 tree->dirty_bytes += end - start + 1;
292 state->state |= bits;
293 state->start = start;
294 state->end = end;
295 node = tree_insert(&tree->state, end, &state->rb_node);
296 if (node) {
297 struct extent_state *found;
298 found = rb_entry(node, struct extent_state, rb_node);
299 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
300 free_extent_state(state);
301 return -EEXIST;
302 }
303 merge_state(tree, state);
304 return 0;
305}
306
307/*
308 * split a given extent state struct in two, inserting the preallocated
309 * struct 'prealloc' as the newly created second half. 'split' indicates an
310 * offset inside 'orig' where it should be split.
311 *
312 * Before calling,
313 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
314 * are two extent state structs in the tree:
315 * prealloc: [orig->start, split - 1]
316 * orig: [ split, orig->end ]
317 *
318 * The tree locks are not taken by this function. They need to be held
319 * by the caller.
320 */
321static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
322 struct extent_state *prealloc, u64 split)
323{
324 struct rb_node *node;
325 prealloc->start = orig->start;
326 prealloc->end = split - 1;
327 prealloc->state = orig->state;
328 orig->start = split;
329
330 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
331 if (node) {
332 struct extent_state *found;
333 found = rb_entry(node, struct extent_state, rb_node);
334 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
335 free_extent_state(prealloc);
336 return -EEXIST;
337 }
338 return 0;
339}
340
341/*
342 * utility function to clear some bits in an extent state struct.
343 * it will optionally wake up any one waiting on this state (wake == 1), or
344 * forcibly remove the state from the tree (delete == 1).
345 *
346 * If no bits are set on the state struct after clearing things, the
347 * struct is freed and removed from the tree
348 */
349static int clear_state_bit(struct extent_io_tree *tree,
350 struct extent_state *state, int bits, int wake,
351 int delete)
352{
353 int ret = state->state & bits;
354
355 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
356 u64 range = state->end - state->start + 1;
357 WARN_ON(range > tree->dirty_bytes);
358 tree->dirty_bytes -= range;
359 }
360 state->state &= ~bits;
361 if (wake)
362 wake_up(&state->wq);
363 if (delete || state->state == 0) {
364 if (state->in_tree) {
365 rb_erase(&state->rb_node, &tree->state);
366 state->in_tree = 0;
367 free_extent_state(state);
368 } else {
369 WARN_ON(1);
370 }
371 } else {
372 merge_state(tree, state);
373 }
374 return ret;
375}
376
377/*
378 * clear some bits on a range in the tree. This may require splitting
379 * or inserting elements in the tree, so the gfp mask is used to
380 * indicate which allocations or sleeping are allowed.
381 *
382 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
383 * the given range from the tree regardless of state (ie for truncate).
384 *
385 * the range [start, end] is inclusive.
386 *
387 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
388 * bits were already set, or zero if none of the bits were already set.
389 */
390int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
391 int bits, int wake, int delete, gfp_t mask)
392{
393 struct extent_state *state;
394 struct extent_state *prealloc = NULL;
395 struct rb_node *node;
396 unsigned long flags;
397 int err;
398 int set = 0;
399
400again:
401 if (!prealloc && (mask & __GFP_WAIT)) {
402 prealloc = alloc_extent_state(mask);
403 if (!prealloc)
404 return -ENOMEM;
405 }
406
407 write_lock_irqsave(&tree->lock, flags);
408 /*
409 * this search will find the extents that end after
410 * our range starts
411 */
412 node = tree_search(&tree->state, start);
413 if (!node)
414 goto out;
415 state = rb_entry(node, struct extent_state, rb_node);
416 if (state->start > end)
417 goto out;
418 WARN_ON(state->end < start);
419
420 /*
421 * | ---- desired range ---- |
422 * | state | or
423 * | ------------- state -------------- |
424 *
425 * We need to split the extent we found, and may flip
426 * bits on second half.
427 *
428 * If the extent we found extends past our range, we
429 * just split and search again. It'll get split again
430 * the next time though.
431 *
432 * If the extent we found is inside our range, we clear
433 * the desired bit on it.
434 */
435
436 if (state->start < start) {
437 err = split_state(tree, state, prealloc, start);
438 BUG_ON(err == -EEXIST);
439 prealloc = NULL;
440 if (err)
441 goto out;
442 if (state->end <= end) {
443 start = state->end + 1;
444 set |= clear_state_bit(tree, state, bits,
445 wake, delete);
446 } else {
447 start = state->start;
448 }
449 goto search_again;
450 }
451 /*
452 * | ---- desired range ---- |
453 * | state |
454 * We need to split the extent, and clear the bit
455 * on the first half
456 */
457 if (state->start <= end && state->end > end) {
458 err = split_state(tree, state, prealloc, end + 1);
459 BUG_ON(err == -EEXIST);
460
461 if (wake)
462 wake_up(&state->wq);
463 set |= clear_state_bit(tree, prealloc, bits,
464 wake, delete);
465 prealloc = NULL;
466 goto out;
467 }
468
469 start = state->end + 1;
470 set |= clear_state_bit(tree, state, bits, wake, delete);
471 goto search_again;
472
473out:
474 write_unlock_irqrestore(&tree->lock, flags);
475 if (prealloc)
476 free_extent_state(prealloc);
477
478 return set;
479
480search_again:
481 if (start > end)
482 goto out;
483 write_unlock_irqrestore(&tree->lock, flags);
484 if (mask & __GFP_WAIT)
485 cond_resched();
486 goto again;
487}
488EXPORT_SYMBOL(clear_extent_bit);
489
490static int wait_on_state(struct extent_io_tree *tree,
491 struct extent_state *state)
492{
493 DEFINE_WAIT(wait);
494 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
495 read_unlock_irq(&tree->lock);
496 schedule();
497 read_lock_irq(&tree->lock);
498 finish_wait(&state->wq, &wait);
499 return 0;
500}
501
502/*
503 * waits for one or more bits to clear on a range in the state tree.
504 * The range [start, end] is inclusive.
505 * The tree lock is taken by this function
506 */
507int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
508{
509 struct extent_state *state;
510 struct rb_node *node;
511
512 read_lock_irq(&tree->lock);
513again:
514 while (1) {
515 /*
516 * this search will find all the extents that end after
517 * our range starts
518 */
519 node = tree_search(&tree->state, start);
520 if (!node)
521 break;
522
523 state = rb_entry(node, struct extent_state, rb_node);
524
525 if (state->start > end)
526 goto out;
527
528 if (state->state & bits) {
529 start = state->start;
530 atomic_inc(&state->refs);
531 wait_on_state(tree, state);
532 free_extent_state(state);
533 goto again;
534 }
535 start = state->end + 1;
536
537 if (start > end)
538 break;
539
540 if (need_resched()) {
541 read_unlock_irq(&tree->lock);
542 cond_resched();
543 read_lock_irq(&tree->lock);
544 }
545 }
546out:
547 read_unlock_irq(&tree->lock);
548 return 0;
549}
550EXPORT_SYMBOL(wait_extent_bit);
551
552static void set_state_bits(struct extent_io_tree *tree,
553 struct extent_state *state,
554 int bits)
555{
556 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
557 u64 range = state->end - state->start + 1;
558 tree->dirty_bytes += range;
559 }
560 state->state |= bits;
561}
562
563/*
564 * set some bits on a range in the tree. This may require allocations
565 * or sleeping, so the gfp mask is used to indicate what is allowed.
566 *
567 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
568 * range already has the desired bits set. The start of the existing
569 * range is returned in failed_start in this case.
570 *
571 * [start, end] is inclusive
572 * This takes the tree lock.
573 */
574int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
575 int exclusive, u64 *failed_start, gfp_t mask)
576{
577 struct extent_state *state;
578 struct extent_state *prealloc = NULL;
579 struct rb_node *node;
580 unsigned long flags;
581 int err = 0;
582 int set;
583 u64 last_start;
584 u64 last_end;
585again:
586 if (!prealloc && (mask & __GFP_WAIT)) {
587 prealloc = alloc_extent_state(mask);
588 if (!prealloc)
589 return -ENOMEM;
590 }
591
592 write_lock_irqsave(&tree->lock, flags);
593 /*
594 * this search will find all the extents that end after
595 * our range starts.
596 */
597 node = tree_search(&tree->state, start);
598 if (!node) {
599 err = insert_state(tree, prealloc, start, end, bits);
600 prealloc = NULL;
601 BUG_ON(err == -EEXIST);
602 goto out;
603 }
604
605 state = rb_entry(node, struct extent_state, rb_node);
606 last_start = state->start;
607 last_end = state->end;
608
609 /*
610 * | ---- desired range ---- |
611 * | state |
612 *
613 * Just lock what we found and keep going
614 */
615 if (state->start == start && state->end <= end) {
616 set = state->state & bits;
617 if (set && exclusive) {
618 *failed_start = state->start;
619 err = -EEXIST;
620 goto out;
621 }
622 set_state_bits(tree, state, bits);
623 start = state->end + 1;
624 merge_state(tree, state);
625 goto search_again;
626 }
627
628 /*
629 * | ---- desired range ---- |
630 * | state |
631 * or
632 * | ------------- state -------------- |
633 *
634 * We need to split the extent we found, and may flip bits on
635 * second half.
636 *
637 * If the extent we found extends past our
638 * range, we just split and search again. It'll get split
639 * again the next time though.
640 *
641 * If the extent we found is inside our range, we set the
642 * desired bit on it.
643 */
644 if (state->start < start) {
645 set = state->state & bits;
646 if (exclusive && set) {
647 *failed_start = start;
648 err = -EEXIST;
649 goto out;
650 }
651 err = split_state(tree, state, prealloc, start);
652 BUG_ON(err == -EEXIST);
653 prealloc = NULL;
654 if (err)
655 goto out;
656 if (state->end <= end) {
657 set_state_bits(tree, state, bits);
658 start = state->end + 1;
659 merge_state(tree, state);
660 } else {
661 start = state->start;
662 }
663 goto search_again;
664 }
665 /*
666 * | ---- desired range ---- |
667 * | state | or | state |
668 *
669 * There's a hole, we need to insert something in it and
670 * ignore the extent we found.
671 */
672 if (state->start > start) {
673 u64 this_end;
674 if (end < last_start)
675 this_end = end;
676 else
677 this_end = last_start -1;
678 err = insert_state(tree, prealloc, start, this_end,
679 bits);
680 prealloc = NULL;
681 BUG_ON(err == -EEXIST);
682 if (err)
683 goto out;
684 start = this_end + 1;
685 goto search_again;
686 }
687 /*
688 * | ---- desired range ---- |
689 * | state |
690 * We need to split the extent, and set the bit
691 * on the first half
692 */
693 if (state->start <= end && state->end > end) {
694 set = state->state & bits;
695 if (exclusive && set) {
696 *failed_start = start;
697 err = -EEXIST;
698 goto out;
699 }
700 err = split_state(tree, state, prealloc, end + 1);
701 BUG_ON(err == -EEXIST);
702
703 set_state_bits(tree, prealloc, bits);
704 merge_state(tree, prealloc);
705 prealloc = NULL;
706 goto out;
707 }
708
709 goto search_again;
710
711out:
712 write_unlock_irqrestore(&tree->lock, flags);
713 if (prealloc)
714 free_extent_state(prealloc);
715
716 return err;
717
718search_again:
719 if (start > end)
720 goto out;
721 write_unlock_irqrestore(&tree->lock, flags);
722 if (mask & __GFP_WAIT)
723 cond_resched();
724 goto again;
725}
726EXPORT_SYMBOL(set_extent_bit);
727
728/* wrappers around set/clear extent bit */
729int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
730 gfp_t mask)
731{
732 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
733 mask);
734}
735EXPORT_SYMBOL(set_extent_dirty);
736
737int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
738 int bits, gfp_t mask)
739{
740 return set_extent_bit(tree, start, end, bits, 0, NULL,
741 mask);
742}
743EXPORT_SYMBOL(set_extent_bits);
744
745int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
746 int bits, gfp_t mask)
747{
748 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
749}
750EXPORT_SYMBOL(clear_extent_bits);
751
752int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
753 gfp_t mask)
754{
755 return set_extent_bit(tree, start, end,
756 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
757 mask);
758}
759EXPORT_SYMBOL(set_extent_delalloc);
760
761int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
762 gfp_t mask)
763{
764 return clear_extent_bit(tree, start, end,
765 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
766}
767EXPORT_SYMBOL(clear_extent_dirty);
768
769int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
770 gfp_t mask)
771{
772 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
773 mask);
774}
775EXPORT_SYMBOL(set_extent_new);
776
777int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
778 gfp_t mask)
779{
780 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
781}
782EXPORT_SYMBOL(clear_extent_new);
783
784int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
785 gfp_t mask)
786{
787 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
788 mask);
789}
790EXPORT_SYMBOL(set_extent_uptodate);
791
792int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
793 gfp_t mask)
794{
795 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
796}
797EXPORT_SYMBOL(clear_extent_uptodate);
798
799int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
800 gfp_t mask)
801{
802 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
803 0, NULL, mask);
804}
805EXPORT_SYMBOL(set_extent_writeback);
806
807int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
808 gfp_t mask)
809{
810 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
811}
812EXPORT_SYMBOL(clear_extent_writeback);
813
814int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
815{
816 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
817}
818EXPORT_SYMBOL(wait_on_extent_writeback);
819
820/*
821 * locks a range in ascending order, waiting for any locked regions
822 * it hits on the way. [start,end] are inclusive, and this will sleep.
823 */
824int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
825{
826 int err;
827 u64 failed_start;
828 while (1) {
829 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
830 &failed_start, mask);
831 if (err == -EEXIST && (mask & __GFP_WAIT)) {
832 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
833 start = failed_start;
834 } else {
835 break;
836 }
837 WARN_ON(start > end);
838 }
839 return err;
840}
841EXPORT_SYMBOL(lock_extent);
842
843int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
844 gfp_t mask)
845{
846 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
847}
848EXPORT_SYMBOL(unlock_extent);
849
850/*
851 * helper function to set pages and extents in the tree dirty
852 */
853int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
854{
855 unsigned long index = start >> PAGE_CACHE_SHIFT;
856 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
857 struct page *page;
858
859 while (index <= end_index) {
860 page = find_get_page(tree->mapping, index);
861 BUG_ON(!page);
862 __set_page_dirty_nobuffers(page);
863 page_cache_release(page);
864 index++;
865 }
866 set_extent_dirty(tree, start, end, GFP_NOFS);
867 return 0;
868}
869EXPORT_SYMBOL(set_range_dirty);
870
871/*
872 * helper function to set both pages and extents in the tree writeback
873 */
874int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
875{
876 unsigned long index = start >> PAGE_CACHE_SHIFT;
877 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
878 struct page *page;
879
880 while (index <= end_index) {
881 page = find_get_page(tree->mapping, index);
882 BUG_ON(!page);
883 set_page_writeback(page);
884 page_cache_release(page);
885 index++;
886 }
887 set_extent_writeback(tree, start, end, GFP_NOFS);
888 return 0;
889}
890EXPORT_SYMBOL(set_range_writeback);
891
892int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
893 u64 *start_ret, u64 *end_ret, int bits)
894{
895 struct rb_node *node;
896 struct extent_state *state;
897 int ret = 1;
898
899 read_lock_irq(&tree->lock);
900 /*
901 * this search will find all the extents that end after
902 * our range starts.
903 */
904 node = tree_search(&tree->state, start);
905 if (!node || IS_ERR(node)) {
906 goto out;
907 }
908
909 while(1) {
910 state = rb_entry(node, struct extent_state, rb_node);
911 if (state->end >= start && (state->state & bits)) {
912 *start_ret = state->start;
913 *end_ret = state->end;
914 ret = 0;
915 break;
916 }
917 node = rb_next(node);
918 if (!node)
919 break;
920 }
921out:
922 read_unlock_irq(&tree->lock);
923 return ret;
924}
925EXPORT_SYMBOL(find_first_extent_bit);
926
927u64 find_lock_delalloc_range(struct extent_io_tree *tree,
928 u64 *start, u64 *end, u64 max_bytes)
929{
930 struct rb_node *node;
931 struct extent_state *state;
932 u64 cur_start = *start;
933 u64 found = 0;
934 u64 total_bytes = 0;
935
936 write_lock_irq(&tree->lock);
937 /*
938 * this search will find all the extents that end after
939 * our range starts.
940 */
941search_again:
942 node = tree_search(&tree->state, cur_start);
943 if (!node || IS_ERR(node)) {
944 *end = (u64)-1;
945 goto out;
946 }
947
948 while(1) {
949 state = rb_entry(node, struct extent_state, rb_node);
950 if (found && state->start != cur_start) {
951 goto out;
952 }
953 if (!(state->state & EXTENT_DELALLOC)) {
954 if (!found)
955 *end = state->end;
956 goto out;
957 }
958 if (!found) {
959 struct extent_state *prev_state;
960 struct rb_node *prev_node = node;
961 while(1) {
962 prev_node = rb_prev(prev_node);
963 if (!prev_node)
964 break;
965 prev_state = rb_entry(prev_node,
966 struct extent_state,
967 rb_node);
968 if (!(prev_state->state & EXTENT_DELALLOC))
969 break;
970 state = prev_state;
971 node = prev_node;
972 }
973 }
974 if (state->state & EXTENT_LOCKED) {
975 DEFINE_WAIT(wait);
976 atomic_inc(&state->refs);
977 prepare_to_wait(&state->wq, &wait,
978 TASK_UNINTERRUPTIBLE);
979 write_unlock_irq(&tree->lock);
980 schedule();
981 write_lock_irq(&tree->lock);
982 finish_wait(&state->wq, &wait);
983 free_extent_state(state);
984 goto search_again;
985 }
986 state->state |= EXTENT_LOCKED;
987 if (!found)
988 *start = state->start;
989 found++;
990 *end = state->end;
991 cur_start = state->end + 1;
992 node = rb_next(node);
993 if (!node)
994 break;
995 total_bytes += state->end - state->start + 1;
996 if (total_bytes >= max_bytes)
997 break;
998 }
999out:
1000 write_unlock_irq(&tree->lock);
1001 return found;
1002}
1003
1004u64 count_range_bits(struct extent_io_tree *tree,
1005 u64 *start, u64 search_end, u64 max_bytes,
1006 unsigned long bits)
1007{
1008 struct rb_node *node;
1009 struct extent_state *state;
1010 u64 cur_start = *start;
1011 u64 total_bytes = 0;
1012 int found = 0;
1013
1014 if (search_end <= cur_start) {
1015 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1016 WARN_ON(1);
1017 return 0;
1018 }
1019
1020 write_lock_irq(&tree->lock);
1021 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1022 total_bytes = tree->dirty_bytes;
1023 goto out;
1024 }
1025 /*
1026 * this search will find all the extents that end after
1027 * our range starts.
1028 */
1029 node = tree_search(&tree->state, cur_start);
1030 if (!node || IS_ERR(node)) {
1031 goto out;
1032 }
1033
1034 while(1) {
1035 state = rb_entry(node, struct extent_state, rb_node);
1036 if (state->start > search_end)
1037 break;
1038 if (state->end >= cur_start && (state->state & bits)) {
1039 total_bytes += min(search_end, state->end) + 1 -
1040 max(cur_start, state->start);
1041 if (total_bytes >= max_bytes)
1042 break;
1043 if (!found) {
1044 *start = state->start;
1045 found = 1;
1046 }
1047 }
1048 node = rb_next(node);
1049 if (!node)
1050 break;
1051 }
1052out:
1053 write_unlock_irq(&tree->lock);
1054 return total_bytes;
1055}
1056/*
1057 * helper function to lock both pages and extents in the tree.
1058 * pages must be locked first.
1059 */
1060int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1061{
1062 unsigned long index = start >> PAGE_CACHE_SHIFT;
1063 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1064 struct page *page;
1065 int err;
1066
1067 while (index <= end_index) {
1068 page = grab_cache_page(tree->mapping, index);
1069 if (!page) {
1070 err = -ENOMEM;
1071 goto failed;
1072 }
1073 if (IS_ERR(page)) {
1074 err = PTR_ERR(page);
1075 goto failed;
1076 }
1077 index++;
1078 }
1079 lock_extent(tree, start, end, GFP_NOFS);
1080 return 0;
1081
1082failed:
1083 /*
1084 * we failed above in getting the page at 'index', so we undo here
1085 * up to but not including the page at 'index'
1086 */
1087 end_index = index;
1088 index = start >> PAGE_CACHE_SHIFT;
1089 while (index < end_index) {
1090 page = find_get_page(tree->mapping, index);
1091 unlock_page(page);
1092 page_cache_release(page);
1093 index++;
1094 }
1095 return err;
1096}
1097EXPORT_SYMBOL(lock_range);
1098
1099/*
1100 * helper function to unlock both pages and extents in the tree.
1101 */
1102int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1103{
1104 unsigned long index = start >> PAGE_CACHE_SHIFT;
1105 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1106 struct page *page;
1107
1108 while (index <= end_index) {
1109 page = find_get_page(tree->mapping, index);
1110 unlock_page(page);
1111 page_cache_release(page);
1112 index++;
1113 }
1114 unlock_extent(tree, start, end, GFP_NOFS);
1115 return 0;
1116}
1117EXPORT_SYMBOL(unlock_range);
1118
1119int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1120{
1121 struct rb_node *node;
1122 struct extent_state *state;
1123 int ret = 0;
1124
1125 write_lock_irq(&tree->lock);
1126 /*
1127 * this search will find all the extents that end after
1128 * our range starts.
1129 */
1130 node = tree_search(&tree->state, start);
1131 if (!node || IS_ERR(node)) {
1132 ret = -ENOENT;
1133 goto out;
1134 }
1135 state = rb_entry(node, struct extent_state, rb_node);
1136 if (state->start != start) {
1137 ret = -ENOENT;
1138 goto out;
1139 }
1140 state->private = private;
1141out:
1142 write_unlock_irq(&tree->lock);
1143 return ret;
1144}
1145
1146int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1147{
1148 struct rb_node *node;
1149 struct extent_state *state;
1150 int ret = 0;
1151
1152 read_lock_irq(&tree->lock);
1153 /*
1154 * this search will find all the extents that end after
1155 * our range starts.
1156 */
1157 node = tree_search(&tree->state, start);
1158 if (!node || IS_ERR(node)) {
1159 ret = -ENOENT;
1160 goto out;
1161 }
1162 state = rb_entry(node, struct extent_state, rb_node);
1163 if (state->start != start) {
1164 ret = -ENOENT;
1165 goto out;
1166 }
1167 *private = state->private;
1168out:
1169 read_unlock_irq(&tree->lock);
1170 return ret;
1171}
1172
1173/*
1174 * searches a range in the state tree for a given mask.
1175 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1176 * has the bits set. Otherwise, 1 is returned if any bit in the
1177 * range is found set.
1178 */
1179int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1180 int bits, int filled)
1181{
1182 struct extent_state *state = NULL;
1183 struct rb_node *node;
1184 int bitset = 0;
1185 unsigned long flags;
1186
1187 read_lock_irqsave(&tree->lock, flags);
1188 node = tree_search(&tree->state, start);
1189 while (node && start <= end) {
1190 state = rb_entry(node, struct extent_state, rb_node);
1191
1192 if (filled && state->start > start) {
1193 bitset = 0;
1194 break;
1195 }
1196
1197 if (state->start > end)
1198 break;
1199
1200 if (state->state & bits) {
1201 bitset = 1;
1202 if (!filled)
1203 break;
1204 } else if (filled) {
1205 bitset = 0;
1206 break;
1207 }
1208 start = state->end + 1;
1209 if (start > end)
1210 break;
1211 node = rb_next(node);
1212 if (!node) {
1213 if (filled)
1214 bitset = 0;
1215 break;
1216 }
1217 }
1218 read_unlock_irqrestore(&tree->lock, flags);
1219 return bitset;
1220}
1221EXPORT_SYMBOL(test_range_bit);
1222
1223/*
1224 * helper function to set a given page up to date if all the
1225 * extents in the tree for that page are up to date
1226 */
1227static int check_page_uptodate(struct extent_io_tree *tree,
1228 struct page *page)
1229{
1230 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1231 u64 end = start + PAGE_CACHE_SIZE - 1;
1232 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1233 SetPageUptodate(page);
1234 return 0;
1235}
1236
1237/*
1238 * helper function to unlock a page if all the extents in the tree
1239 * for that page are unlocked
1240 */
1241static int check_page_locked(struct extent_io_tree *tree,
1242 struct page *page)
1243{
1244 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1245 u64 end = start + PAGE_CACHE_SIZE - 1;
1246 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1247 unlock_page(page);
1248 return 0;
1249}
1250
1251/*
1252 * helper function to end page writeback if all the extents
1253 * in the tree for that page are done with writeback
1254 */
1255static int check_page_writeback(struct extent_io_tree *tree,
1256 struct page *page)
1257{
1258 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1259 u64 end = start + PAGE_CACHE_SIZE - 1;
1260 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1261 end_page_writeback(page);
1262 return 0;
1263}
1264
1265/* lots and lots of room for performance fixes in the end_bio funcs */
1266
1267/*
1268 * after a writepage IO is done, we need to:
1269 * clear the uptodate bits on error
1270 * clear the writeback bits in the extent tree for this IO
1271 * end_page_writeback if the page has no more pending IO
1272 *
1273 * Scheduling is not allowed, so the extent state tree is expected
1274 * to have one and only one object corresponding to this IO.
1275 */
1276#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1277static void end_bio_extent_writepage(struct bio *bio, int err)
1278#else
1279static int end_bio_extent_writepage(struct bio *bio,
1280 unsigned int bytes_done, int err)
1281#endif
1282{
1283 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1284 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1285 struct extent_io_tree *tree = bio->bi_private;
1286 u64 start;
1287 u64 end;
1288 int whole_page;
1289
1290#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1291 if (bio->bi_size)
1292 return 1;
1293#endif
1294
1295 do {
1296 struct page *page = bvec->bv_page;
1297 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1298 bvec->bv_offset;
1299 end = start + bvec->bv_len - 1;
1300
1301 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1302 whole_page = 1;
1303 else
1304 whole_page = 0;
1305
1306 if (--bvec >= bio->bi_io_vec)
1307 prefetchw(&bvec->bv_page->flags);
1308
1309 if (!uptodate) {
1310 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1311 ClearPageUptodate(page);
1312 SetPageError(page);
1313 }
1314 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1315
1316 if (whole_page)
1317 end_page_writeback(page);
1318 else
1319 check_page_writeback(tree, page);
1320 if (tree->ops && tree->ops->writepage_end_io_hook)
1321 tree->ops->writepage_end_io_hook(page, start, end);
1322 } while (bvec >= bio->bi_io_vec);
1323
1324 bio_put(bio);
1325#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1326 return 0;
1327#endif
1328}
1329
1330/*
1331 * after a readpage IO is done, we need to:
1332 * clear the uptodate bits on error
1333 * set the uptodate bits if things worked
1334 * set the page up to date if all extents in the tree are uptodate
1335 * clear the lock bit in the extent tree
1336 * unlock the page if there are no other extents locked for it
1337 *
1338 * Scheduling is not allowed, so the extent state tree is expected
1339 * to have one and only one object corresponding to this IO.
1340 */
1341#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1342static void end_bio_extent_readpage(struct bio *bio, int err)
1343#else
1344static int end_bio_extent_readpage(struct bio *bio,
1345 unsigned int bytes_done, int err)
1346#endif
1347{
1348 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1349 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1350 struct extent_io_tree *tree = bio->bi_private;
1351 u64 start;
1352 u64 end;
1353 int whole_page;
1354 int ret;
1355
1356#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1357 if (bio->bi_size)
1358 return 1;
1359#endif
1360
1361 do {
1362 struct page *page = bvec->bv_page;
1363 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1364 bvec->bv_offset;
1365 end = start + bvec->bv_len - 1;
1366
1367 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1368 whole_page = 1;
1369 else
1370 whole_page = 0;
1371
1372 if (--bvec >= bio->bi_io_vec)
1373 prefetchw(&bvec->bv_page->flags);
1374
1375 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1376 ret = tree->ops->readpage_end_io_hook(page, start, end);
1377 if (ret)
1378 uptodate = 0;
1379 }
1380 if (uptodate) {
1381 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1382 if (whole_page)
1383 SetPageUptodate(page);
1384 else
1385 check_page_uptodate(tree, page);
1386 } else {
1387 ClearPageUptodate(page);
1388 SetPageError(page);
1389 }
1390
1391 unlock_extent(tree, start, end, GFP_ATOMIC);
1392
1393 if (whole_page)
1394 unlock_page(page);
1395 else
1396 check_page_locked(tree, page);
1397 } while (bvec >= bio->bi_io_vec);
1398
1399 bio_put(bio);
1400#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1401 return 0;
1402#endif
1403}
1404
1405/*
1406 * IO done from prepare_write is pretty simple, we just unlock
1407 * the structs in the extent tree when done, and set the uptodate bits
1408 * as appropriate.
1409 */
1410#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1411static void end_bio_extent_preparewrite(struct bio *bio, int err)
1412#else
1413static int end_bio_extent_preparewrite(struct bio *bio,
1414 unsigned int bytes_done, int err)
1415#endif
1416{
1417 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1418 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1419 struct extent_io_tree *tree = bio->bi_private;
1420 u64 start;
1421 u64 end;
1422
1423#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1424 if (bio->bi_size)
1425 return 1;
1426#endif
1427
1428 do {
1429 struct page *page = bvec->bv_page;
1430 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1431 bvec->bv_offset;
1432 end = start + bvec->bv_len - 1;
1433
1434 if (--bvec >= bio->bi_io_vec)
1435 prefetchw(&bvec->bv_page->flags);
1436
1437 if (uptodate) {
1438 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1439 } else {
1440 ClearPageUptodate(page);
1441 SetPageError(page);
1442 }
1443
1444 unlock_extent(tree, start, end, GFP_ATOMIC);
1445
1446 } while (bvec >= bio->bi_io_vec);
1447
1448 bio_put(bio);
1449#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1450 return 0;
1451#endif
1452}
1453
1454static struct bio *
1455extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1456 gfp_t gfp_flags)
1457{
1458 struct bio *bio;
1459
1460 bio = bio_alloc(gfp_flags, nr_vecs);
1461
1462 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1463 while (!bio && (nr_vecs /= 2))
1464 bio = bio_alloc(gfp_flags, nr_vecs);
1465 }
1466
1467 if (bio) {
1468 bio->bi_bdev = bdev;
1469 bio->bi_sector = first_sector;
1470 }
1471 return bio;
1472}
1473
1474static int submit_one_bio(int rw, struct bio *bio)
1475{
1476 u64 maxsector;
1477 int ret = 0;
1478
1479 bio_get(bio);
1480
1481 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1482 if (maxsector < bio->bi_sector) {
1483 printk("sector too large max %Lu got %llu\n", maxsector,
1484 (unsigned long long)bio->bi_sector);
1485 WARN_ON(1);
1486 }
1487
1488 submit_bio(rw, bio);
1489 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1490 ret = -EOPNOTSUPP;
1491 bio_put(bio);
1492 return ret;
1493}
1494
1495static int submit_extent_page(int rw, struct extent_io_tree *tree,
1496 struct page *page, sector_t sector,
1497 size_t size, unsigned long offset,
1498 struct block_device *bdev,
1499 struct bio **bio_ret,
1500 unsigned long max_pages,
1501 bio_end_io_t end_io_func)
1502{
1503 int ret = 0;
1504 struct bio *bio;
1505 int nr;
1506
1507 if (bio_ret && *bio_ret) {
1508 bio = *bio_ret;
1509 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1510 bio_add_page(bio, page, size, offset) < size) {
1511 ret = submit_one_bio(rw, bio);
1512 bio = NULL;
1513 } else {
1514 return 0;
1515 }
1516 }
1517 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1518 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1519 if (!bio) {
1520 printk("failed to allocate bio nr %d\n", nr);
1521 }
1522 bio_add_page(bio, page, size, offset);
1523 bio->bi_end_io = end_io_func;
1524 bio->bi_private = tree;
1525 if (bio_ret) {
1526 *bio_ret = bio;
1527 } else {
1528 ret = submit_one_bio(rw, bio);
1529 }
1530
1531 return ret;
1532}
1533
1534void set_page_extent_mapped(struct page *page)
1535{
1536 if (!PagePrivate(page)) {
1537 SetPagePrivate(page);
1538 WARN_ON(!page->mapping->a_ops->invalidatepage);
1539 set_page_private(page, EXTENT_PAGE_PRIVATE);
1540 page_cache_get(page);
1541 }
1542}
1543
1544void set_page_extent_head(struct page *page, unsigned long len)
1545{
1546 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1547}
1548
1549/*
1550 * basic readpage implementation. Locked extent state structs are inserted
1551 * into the tree that are removed when the IO is done (by the end_io
1552 * handlers)
1553 */
1554static int __extent_read_full_page(struct extent_io_tree *tree,
1555 struct page *page,
1556 get_extent_t *get_extent,
1557 struct bio **bio)
1558{
1559 struct inode *inode = page->mapping->host;
1560 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1561 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1562 u64 end;
1563 u64 cur = start;
1564 u64 extent_offset;
1565 u64 last_byte = i_size_read(inode);
1566 u64 block_start;
1567 u64 cur_end;
1568 sector_t sector;
1569 struct extent_map *em;
1570 struct block_device *bdev;
1571 int ret;
1572 int nr = 0;
1573 size_t page_offset = 0;
1574 size_t iosize;
1575 size_t blocksize = inode->i_sb->s_blocksize;
1576
1577 set_page_extent_mapped(page);
1578
1579 end = page_end;
1580 lock_extent(tree, start, end, GFP_NOFS);
1581
1582 while (cur <= end) {
1583 if (cur >= last_byte) {
1584 char *userpage;
1585 iosize = PAGE_CACHE_SIZE - page_offset;
1586 userpage = kmap_atomic(page, KM_USER0);
1587 memset(userpage + page_offset, 0, iosize);
1588 flush_dcache_page(page);
1589 kunmap_atomic(userpage, KM_USER0);
1590 set_extent_uptodate(tree, cur, cur + iosize - 1,
1591 GFP_NOFS);
1592 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1593 break;
1594 }
1595 em = get_extent(inode, page, page_offset, cur,
1596 end - cur + 1, 0);
1597 if (IS_ERR(em) || !em) {
1598 SetPageError(page);
1599 unlock_extent(tree, cur, end, GFP_NOFS);
1600 break;
1601 }
1602
1603 extent_offset = cur - em->start;
1604 BUG_ON(extent_map_end(em) <= cur);
1605 BUG_ON(end < cur);
1606
1607 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1608 cur_end = min(extent_map_end(em) - 1, end);
1609 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1610 sector = (em->block_start + extent_offset) >> 9;
1611 bdev = em->bdev;
1612 block_start = em->block_start;
1613 free_extent_map(em);
1614 em = NULL;
1615
1616 /* we've found a hole, just zero and go on */
1617 if (block_start == EXTENT_MAP_HOLE) {
1618 char *userpage;
1619 userpage = kmap_atomic(page, KM_USER0);
1620 memset(userpage + page_offset, 0, iosize);
1621 flush_dcache_page(page);
1622 kunmap_atomic(userpage, KM_USER0);
1623
1624 set_extent_uptodate(tree, cur, cur + iosize - 1,
1625 GFP_NOFS);
1626 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1627 cur = cur + iosize;
1628 page_offset += iosize;
1629 continue;
1630 }
1631 /* the get_extent function already copied into the page */
1632 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1633 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1634 cur = cur + iosize;
1635 page_offset += iosize;
1636 continue;
1637 }
1638
1639 ret = 0;
1640 if (tree->ops && tree->ops->readpage_io_hook) {
1641 ret = tree->ops->readpage_io_hook(page, cur,
1642 cur + iosize - 1);
1643 }
1644 if (!ret) {
1645 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1646 nr -= page->index;
1647 ret = submit_extent_page(READ, tree, page,
1648 sector, iosize, page_offset,
1649 bdev, bio, nr,
1650 end_bio_extent_readpage);
1651 }
1652 if (ret)
1653 SetPageError(page);
1654 cur = cur + iosize;
1655 page_offset += iosize;
1656 nr++;
1657 }
1658 if (!nr) {
1659 if (!PageError(page))
1660 SetPageUptodate(page);
1661 unlock_page(page);
1662 }
1663 return 0;
1664}
1665
1666int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1667 get_extent_t *get_extent)
1668{
1669 struct bio *bio = NULL;
1670 int ret;
1671
1672 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1673 if (bio)
1674 submit_one_bio(READ, bio);
1675 return ret;
1676}
1677EXPORT_SYMBOL(extent_read_full_page);
1678
1679/*
1680 * the writepage semantics are similar to regular writepage. extent
1681 * records are inserted to lock ranges in the tree, and as dirty areas
1682 * are found, they are marked writeback. Then the lock bits are removed
1683 * and the end_io handler clears the writeback ranges
1684 */
1685static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1686 void *data)
1687{
1688 struct inode *inode = page->mapping->host;
1689 struct extent_page_data *epd = data;
1690 struct extent_io_tree *tree = epd->tree;
1691 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1692 u64 delalloc_start;
1693 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1694 u64 end;
1695 u64 cur = start;
1696 u64 extent_offset;
1697 u64 last_byte = i_size_read(inode);
1698 u64 block_start;
1699 u64 iosize;
1700 sector_t sector;
1701 struct extent_map *em;
1702 struct block_device *bdev;
1703 int ret;
1704 int nr = 0;
1705 size_t page_offset = 0;
1706 size_t blocksize;
1707 loff_t i_size = i_size_read(inode);
1708 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1709 u64 nr_delalloc;
1710 u64 delalloc_end;
1711
1712 WARN_ON(!PageLocked(page));
1713 if (page->index > end_index) {
1714 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1715 unlock_page(page);
1716 return 0;
1717 }
1718
1719 if (page->index == end_index) {
1720 char *userpage;
1721
1722 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1723
1724 userpage = kmap_atomic(page, KM_USER0);
1725 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1726 flush_dcache_page(page);
1727 kunmap_atomic(userpage, KM_USER0);
1728 }
1729
1730 set_page_extent_mapped(page);
1731
1732 delalloc_start = start;
1733 delalloc_end = 0;
1734 while(delalloc_end < page_end) {
1735 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1736 &delalloc_end,
1737 128 * 1024 * 1024);
1738 if (nr_delalloc == 0) {
1739 delalloc_start = delalloc_end + 1;
1740 continue;
1741 }
1742 tree->ops->fill_delalloc(inode, delalloc_start,
1743 delalloc_end);
1744 clear_extent_bit(tree, delalloc_start,
1745 delalloc_end,
1746 EXTENT_LOCKED | EXTENT_DELALLOC,
1747 1, 0, GFP_NOFS);
1748 delalloc_start = delalloc_end + 1;
1749 }
1750 lock_extent(tree, start, page_end, GFP_NOFS);
1751
1752 end = page_end;
1753 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1754 printk("found delalloc bits after lock_extent\n");
1755 }
1756
1757 if (last_byte <= start) {
1758 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1759 goto done;
1760 }
1761
1762 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1763 blocksize = inode->i_sb->s_blocksize;
1764
1765 while (cur <= end) {
1766 if (cur >= last_byte) {
1767 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1768 break;
1769 }
1770 em = epd->get_extent(inode, page, page_offset, cur,
1771 end - cur + 1, 1);
1772 if (IS_ERR(em) || !em) {
1773 SetPageError(page);
1774 break;
1775 }
1776
1777 extent_offset = cur - em->start;
1778 BUG_ON(extent_map_end(em) <= cur);
1779 BUG_ON(end < cur);
1780 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1781 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1782 sector = (em->block_start + extent_offset) >> 9;
1783 bdev = em->bdev;
1784 block_start = em->block_start;
1785 free_extent_map(em);
1786 em = NULL;
1787
1788 if (block_start == EXTENT_MAP_HOLE ||
1789 block_start == EXTENT_MAP_INLINE) {
1790 clear_extent_dirty(tree, cur,
1791 cur + iosize - 1, GFP_NOFS);
1792 cur = cur + iosize;
1793 page_offset += iosize;
1794 continue;
1795 }
1796
1797 /* leave this out until we have a page_mkwrite call */
1798 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1799 EXTENT_DIRTY, 0)) {
1800 cur = cur + iosize;
1801 page_offset += iosize;
1802 continue;
1803 }
1804 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1805 if (tree->ops && tree->ops->writepage_io_hook) {
1806 ret = tree->ops->writepage_io_hook(page, cur,
1807 cur + iosize - 1);
1808 } else {
1809 ret = 0;
1810 }
1811 if (ret)
1812 SetPageError(page);
1813 else {
1814 unsigned long max_nr = end_index + 1;
1815 set_range_writeback(tree, cur, cur + iosize - 1);
1816 if (!PageWriteback(page)) {
1817 printk("warning page %lu not writeback, "
1818 "cur %llu end %llu\n", page->index,
1819 (unsigned long long)cur,
1820 (unsigned long long)end);
1821 }
1822
1823 ret = submit_extent_page(WRITE, tree, page, sector,
1824 iosize, page_offset, bdev,
1825 &epd->bio, max_nr,
1826 end_bio_extent_writepage);
1827 if (ret)
1828 SetPageError(page);
1829 }
1830 cur = cur + iosize;
1831 page_offset += iosize;
1832 nr++;
1833 }
1834done:
1835 if (nr == 0) {
1836 /* make sure the mapping tag for page dirty gets cleared */
1837 set_page_writeback(page);
1838 end_page_writeback(page);
1839 }
1840 unlock_extent(tree, start, page_end, GFP_NOFS);
1841 unlock_page(page);
1842 return 0;
1843}
1844
1845#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1846
1847/* Taken directly from 2.6.23 for 2.6.18 back port */
1848typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1849 void *data);
1850
1851/**
1852 * write_cache_pages - walk the list of dirty pages of the given address space
1853 * and write all of them.
1854 * @mapping: address space structure to write
1855 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1856 * @writepage: function called for each page
1857 * @data: data passed to writepage function
1858 *
1859 * If a page is already under I/O, write_cache_pages() skips it, even
1860 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1861 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1862 * and msync() need to guarantee that all the data which was dirty at the time
1863 * the call was made get new I/O started against them. If wbc->sync_mode is
1864 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1865 * existing IO to complete.
1866 */
1867static int write_cache_pages(struct address_space *mapping,
1868 struct writeback_control *wbc, writepage_t writepage,
1869 void *data)
1870{
1871 struct backing_dev_info *bdi = mapping->backing_dev_info;
1872 int ret = 0;
1873 int done = 0;
1874 struct pagevec pvec;
1875 int nr_pages;
1876 pgoff_t index;
1877 pgoff_t end; /* Inclusive */
1878 int scanned = 0;
1879 int range_whole = 0;
1880
1881 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1882 wbc->encountered_congestion = 1;
1883 return 0;
1884 }
1885
1886 pagevec_init(&pvec, 0);
1887 if (wbc->range_cyclic) {
1888 index = mapping->writeback_index; /* Start from prev offset */
1889 end = -1;
1890 } else {
1891 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1892 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1893 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1894 range_whole = 1;
1895 scanned = 1;
1896 }
1897retry:
1898 while (!done && (index <= end) &&
1899 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1900 PAGECACHE_TAG_DIRTY,
1901 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
1902 unsigned i;
1903
1904 scanned = 1;
1905 for (i = 0; i < nr_pages; i++) {
1906 struct page *page = pvec.pages[i];
1907
1908 /*
1909 * At this point we hold neither mapping->tree_lock nor
1910 * lock on the page itself: the page may be truncated or
1911 * invalidated (changing page->mapping to NULL), or even
1912 * swizzled back from swapper_space to tmpfs file
1913 * mapping
1914 */
1915 lock_page(page);
1916
1917 if (unlikely(page->mapping != mapping)) {
1918 unlock_page(page);
1919 continue;
1920 }
1921
1922 if (!wbc->range_cyclic && page->index > end) {
1923 done = 1;
1924 unlock_page(page);
1925 continue;
1926 }
1927
1928 if (wbc->sync_mode != WB_SYNC_NONE)
1929 wait_on_page_writeback(page);
1930
1931 if (PageWriteback(page) ||
1932 !clear_page_dirty_for_io(page)) {
1933 unlock_page(page);
1934 continue;
1935 }
1936
1937 ret = (*writepage)(page, wbc, data);
1938
1939 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
1940 unlock_page(page);
1941 ret = 0;
1942 }
1943 if (ret || (--(wbc->nr_to_write) <= 0))
1944 done = 1;
1945 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1946 wbc->encountered_congestion = 1;
1947 done = 1;
1948 }
1949 }
1950 pagevec_release(&pvec);
1951 cond_resched();
1952 }
1953 if (!scanned && !done) {
1954 /*
1955 * We hit the last page and there is more work to be done: wrap
1956 * back to the start of the file
1957 */
1958 scanned = 1;
1959 index = 0;
1960 goto retry;
1961 }
1962 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1963 mapping->writeback_index = index;
1964 return ret;
1965}
1966#endif
1967
1968int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
1969 get_extent_t *get_extent,
1970 struct writeback_control *wbc)
1971{
1972 int ret;
1973 struct address_space *mapping = page->mapping;
1974 struct extent_page_data epd = {
1975 .bio = NULL,
1976 .tree = tree,
1977 .get_extent = get_extent,
1978 };
1979 struct writeback_control wbc_writepages = {
1980 .bdi = wbc->bdi,
1981 .sync_mode = WB_SYNC_NONE,
1982 .older_than_this = NULL,
1983 .nr_to_write = 64,
1984 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
1985 .range_end = (loff_t)-1,
1986 };
1987
1988
1989 ret = __extent_writepage(page, wbc, &epd);
1990
1991 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
1992 if (epd.bio) {
1993 submit_one_bio(WRITE, epd.bio);
1994 }
1995 return ret;
1996}
1997EXPORT_SYMBOL(extent_write_full_page);
1998
1999
2000int extent_writepages(struct extent_io_tree *tree,
2001 struct address_space *mapping,
2002 get_extent_t *get_extent,
2003 struct writeback_control *wbc)
2004{
2005 int ret = 0;
2006 struct extent_page_data epd = {
2007 .bio = NULL,
2008 .tree = tree,
2009 .get_extent = get_extent,
2010 };
2011
2012 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2013 if (epd.bio) {
2014 submit_one_bio(WRITE, epd.bio);
2015 }
2016 return ret;
2017}
2018EXPORT_SYMBOL(extent_writepages);
2019
2020int extent_readpages(struct extent_io_tree *tree,
2021 struct address_space *mapping,
2022 struct list_head *pages, unsigned nr_pages,
2023 get_extent_t get_extent)
2024{
2025 struct bio *bio = NULL;
2026 unsigned page_idx;
2027 struct pagevec pvec;
2028
2029 pagevec_init(&pvec, 0);
2030 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2031 struct page *page = list_entry(pages->prev, struct page, lru);
2032
2033 prefetchw(&page->flags);
2034 list_del(&page->lru);
2035 /*
2036 * what we want to do here is call add_to_page_cache_lru,
2037 * but that isn't exported, so we reproduce it here
2038 */
2039 if (!add_to_page_cache(page, mapping,
2040 page->index, GFP_KERNEL)) {
2041
2042 /* open coding of lru_cache_add, also not exported */
2043 page_cache_get(page);
2044 if (!pagevec_add(&pvec, page))
2045 __pagevec_lru_add(&pvec);
2046 __extent_read_full_page(tree, page, get_extent, &bio);
2047 }
2048 page_cache_release(page);
2049 }
2050 if (pagevec_count(&pvec))
2051 __pagevec_lru_add(&pvec);
2052 BUG_ON(!list_empty(pages));
2053 if (bio)
2054 submit_one_bio(READ, bio);
2055 return 0;
2056}
2057EXPORT_SYMBOL(extent_readpages);
2058
2059/*
2060 * basic invalidatepage code, this waits on any locked or writeback
2061 * ranges corresponding to the page, and then deletes any extent state
2062 * records from the tree
2063 */
2064int extent_invalidatepage(struct extent_io_tree *tree,
2065 struct page *page, unsigned long offset)
2066{
2067 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2068 u64 end = start + PAGE_CACHE_SIZE - 1;
2069 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2070
2071 start += (offset + blocksize -1) & ~(blocksize - 1);
2072 if (start > end)
2073 return 0;
2074
2075 lock_extent(tree, start, end, GFP_NOFS);
2076 wait_on_extent_writeback(tree, start, end);
2077 clear_extent_bit(tree, start, end,
2078 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2079 1, 1, GFP_NOFS);
2080 return 0;
2081}
2082EXPORT_SYMBOL(extent_invalidatepage);
2083
2084/*
2085 * simple commit_write call, set_range_dirty is used to mark both
2086 * the pages and the extent records as dirty
2087 */
2088int extent_commit_write(struct extent_io_tree *tree,
2089 struct inode *inode, struct page *page,
2090 unsigned from, unsigned to)
2091{
2092 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2093
2094 set_page_extent_mapped(page);
2095 set_page_dirty(page);
2096
2097 if (pos > inode->i_size) {
2098 i_size_write(inode, pos);
2099 mark_inode_dirty(inode);
2100 }
2101 return 0;
2102}
2103EXPORT_SYMBOL(extent_commit_write);
2104
2105int extent_prepare_write(struct extent_io_tree *tree,
2106 struct inode *inode, struct page *page,
2107 unsigned from, unsigned to, get_extent_t *get_extent)
2108{
2109 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2110 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2111 u64 block_start;
2112 u64 orig_block_start;
2113 u64 block_end;
2114 u64 cur_end;
2115 struct extent_map *em;
2116 unsigned blocksize = 1 << inode->i_blkbits;
2117 size_t page_offset = 0;
2118 size_t block_off_start;
2119 size_t block_off_end;
2120 int err = 0;
2121 int iocount = 0;
2122 int ret = 0;
2123 int isnew;
2124
2125 set_page_extent_mapped(page);
2126
2127 block_start = (page_start + from) & ~((u64)blocksize - 1);
2128 block_end = (page_start + to - 1) | (blocksize - 1);
2129 orig_block_start = block_start;
2130
2131 lock_extent(tree, page_start, page_end, GFP_NOFS);
2132 while(block_start <= block_end) {
2133 em = get_extent(inode, page, page_offset, block_start,
2134 block_end - block_start + 1, 1);
2135 if (IS_ERR(em) || !em) {
2136 goto err;
2137 }
2138 cur_end = min(block_end, extent_map_end(em) - 1);
2139 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2140 block_off_end = block_off_start + blocksize;
2141 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2142
2143 if (!PageUptodate(page) && isnew &&
2144 (block_off_end > to || block_off_start < from)) {
2145 void *kaddr;
2146
2147 kaddr = kmap_atomic(page, KM_USER0);
2148 if (block_off_end > to)
2149 memset(kaddr + to, 0, block_off_end - to);
2150 if (block_off_start < from)
2151 memset(kaddr + block_off_start, 0,
2152 from - block_off_start);
2153 flush_dcache_page(page);
2154 kunmap_atomic(kaddr, KM_USER0);
2155 }
2156 if ((em->block_start != EXTENT_MAP_HOLE &&
2157 em->block_start != EXTENT_MAP_INLINE) &&
2158 !isnew && !PageUptodate(page) &&
2159 (block_off_end > to || block_off_start < from) &&
2160 !test_range_bit(tree, block_start, cur_end,
2161 EXTENT_UPTODATE, 1)) {
2162 u64 sector;
2163 u64 extent_offset = block_start - em->start;
2164 size_t iosize;
2165 sector = (em->block_start + extent_offset) >> 9;
2166 iosize = (cur_end - block_start + blocksize) &
2167 ~((u64)blocksize - 1);
2168 /*
2169 * we've already got the extent locked, but we
2170 * need to split the state such that our end_bio
2171 * handler can clear the lock.
2172 */
2173 set_extent_bit(tree, block_start,
2174 block_start + iosize - 1,
2175 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2176 ret = submit_extent_page(READ, tree, page,
2177 sector, iosize, page_offset, em->bdev,
2178 NULL, 1,
2179 end_bio_extent_preparewrite);
2180 iocount++;
2181 block_start = block_start + iosize;
2182 } else {
2183 set_extent_uptodate(tree, block_start, cur_end,
2184 GFP_NOFS);
2185 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2186 block_start = cur_end + 1;
2187 }
2188 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2189 free_extent_map(em);
2190 }
2191 if (iocount) {
2192 wait_extent_bit(tree, orig_block_start,
2193 block_end, EXTENT_LOCKED);
2194 }
2195 check_page_uptodate(tree, page);
2196err:
2197 /* FIXME, zero out newly allocated blocks on error */
2198 return err;
2199}
2200EXPORT_SYMBOL(extent_prepare_write);
2201
2202/*
2203 * a helper for releasepage. As long as there are no locked extents
2204 * in the range corresponding to the page, both state records and extent
2205 * map records are removed
2206 */
2207int try_release_extent_mapping(struct extent_map_tree *map,
2208 struct extent_io_tree *tree, struct page *page)
2209{
2210 struct extent_map *em;
2211 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2212 u64 end = start + PAGE_CACHE_SIZE - 1;
2213 u64 orig_start = start;
2214 int ret = 1;
2215
2216 while (start <= end) {
2217 spin_lock(&map->lock);
2218 em = lookup_extent_mapping(map, start, end);
2219 if (!em || IS_ERR(em)) {
2220 spin_unlock(&map->lock);
2221 break;
2222 }
2223 if (!test_range_bit(tree, em->start, extent_map_end(em) - 1,
2224 EXTENT_LOCKED, 0)) {
2225 remove_extent_mapping(map, em);
2226 /* once for the rb tree */
2227 free_extent_map(em);
2228 }
2229 start = extent_map_end(em);
2230 spin_unlock(&map->lock);
2231
2232 /* once for us */
2233 free_extent_map(em);
2234 }
2235 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2236 ret = 0;
2237 else
2238 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2239 1, 1, GFP_NOFS);
2240 return ret;
2241}
2242EXPORT_SYMBOL(try_release_extent_mapping);
2243
2244sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2245 get_extent_t *get_extent)
2246{
2247 struct inode *inode = mapping->host;
2248 u64 start = iblock << inode->i_blkbits;
2249 sector_t sector = 0;
2250 struct extent_map *em;
2251
2252 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2253 if (!em || IS_ERR(em))
2254 return 0;
2255
2256 if (em->block_start == EXTENT_MAP_INLINE ||
2257 em->block_start == EXTENT_MAP_HOLE)
2258 goto out;
2259
2260 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2261printk("bmap finds %Lu %Lu block %Lu\n", em->start, em->len, em->block_start);
2262out:
2263 free_extent_map(em);
2264 return sector;
2265}
2266
2267static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb)
2268{
2269 if (list_empty(&eb->lru)) {
2270 extent_buffer_get(eb);
2271 list_add(&eb->lru, &tree->buffer_lru);
2272 tree->lru_size++;
2273 if (tree->lru_size >= BUFFER_LRU_MAX) {
2274 struct extent_buffer *rm;
2275 rm = list_entry(tree->buffer_lru.prev,
2276 struct extent_buffer, lru);
2277 tree->lru_size--;
2278 list_del_init(&rm->lru);
2279 free_extent_buffer(rm);
2280 }
2281 } else
2282 list_move(&eb->lru, &tree->buffer_lru);
2283 return 0;
2284}
2285static struct extent_buffer *find_lru(struct extent_io_tree *tree,
2286 u64 start, unsigned long len)
2287{
2288 struct list_head *lru = &tree->buffer_lru;
2289 struct list_head *cur = lru->next;
2290 struct extent_buffer *eb;
2291
2292 if (list_empty(lru))
2293 return NULL;
2294
2295 do {
2296 eb = list_entry(cur, struct extent_buffer, lru);
2297 if (eb->start == start && eb->len == len) {
2298 extent_buffer_get(eb);
2299 return eb;
2300 }
2301 cur = cur->next;
2302 } while (cur != lru);
2303 return NULL;
2304}
2305
2306static inline unsigned long num_extent_pages(u64 start, u64 len)
2307{
2308 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2309 (start >> PAGE_CACHE_SHIFT);
2310}
2311
2312static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2313 unsigned long i)
2314{
2315 struct page *p;
2316 struct address_space *mapping;
2317
2318 if (i == 0)
2319 return eb->first_page;
2320 i += eb->start >> PAGE_CACHE_SHIFT;
2321 mapping = eb->first_page->mapping;
2322 read_lock_irq(&mapping->tree_lock);
2323 p = radix_tree_lookup(&mapping->page_tree, i);
2324 read_unlock_irq(&mapping->tree_lock);
2325 return p;
2326}
2327
2328static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2329 u64 start,
2330 unsigned long len,
2331 gfp_t mask)
2332{
2333 struct extent_buffer *eb = NULL;
2334
2335 spin_lock(&tree->lru_lock);
2336 eb = find_lru(tree, start, len);
2337 spin_unlock(&tree->lru_lock);
2338 if (eb) {
2339 return eb;
2340 }
2341
2342 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2343 INIT_LIST_HEAD(&eb->lru);
2344 eb->start = start;
2345 eb->len = len;
2346 atomic_set(&eb->refs, 1);
2347
2348 return eb;
2349}
2350
2351static void __free_extent_buffer(struct extent_buffer *eb)
2352{
2353 kmem_cache_free(extent_buffer_cache, eb);
2354}
2355
2356struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2357 u64 start, unsigned long len,
2358 struct page *page0,
2359 gfp_t mask)
2360{
2361 unsigned long num_pages = num_extent_pages(start, len);
2362 unsigned long i;
2363 unsigned long index = start >> PAGE_CACHE_SHIFT;
2364 struct extent_buffer *eb;
2365 struct page *p;
2366 struct address_space *mapping = tree->mapping;
2367 int uptodate = 1;
2368
2369 eb = __alloc_extent_buffer(tree, start, len, mask);
2370 if (!eb || IS_ERR(eb))
2371 return NULL;
2372
2373 if (eb->flags & EXTENT_BUFFER_FILLED)
2374 goto lru_add;
2375
2376 if (page0) {
2377 eb->first_page = page0;
2378 i = 1;
2379 index++;
2380 page_cache_get(page0);
2381 mark_page_accessed(page0);
2382 set_page_extent_mapped(page0);
2383 WARN_ON(!PageUptodate(page0));
2384 set_page_extent_head(page0, len);
2385 } else {
2386 i = 0;
2387 }
2388 for (; i < num_pages; i++, index++) {
2389 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2390 if (!p) {
2391 WARN_ON(1);
2392 goto fail;
2393 }
2394 set_page_extent_mapped(p);
2395 mark_page_accessed(p);
2396 if (i == 0) {
2397 eb->first_page = p;
2398 set_page_extent_head(p, len);
2399 } else {
2400 set_page_private(p, EXTENT_PAGE_PRIVATE);
2401 }
2402 if (!PageUptodate(p))
2403 uptodate = 0;
2404 unlock_page(p);
2405 }
2406 if (uptodate)
2407 eb->flags |= EXTENT_UPTODATE;
2408 eb->flags |= EXTENT_BUFFER_FILLED;
2409
2410lru_add:
2411 spin_lock(&tree->lru_lock);
2412 add_lru(tree, eb);
2413 spin_unlock(&tree->lru_lock);
2414 return eb;
2415
2416fail:
2417 spin_lock(&tree->lru_lock);
2418 list_del_init(&eb->lru);
2419 spin_unlock(&tree->lru_lock);
2420 if (!atomic_dec_and_test(&eb->refs))
2421 return NULL;
2422 for (index = 1; index < i; index++) {
2423 page_cache_release(extent_buffer_page(eb, index));
2424 }
2425 if (i > 0)
2426 page_cache_release(extent_buffer_page(eb, 0));
2427 __free_extent_buffer(eb);
2428 return NULL;
2429}
2430EXPORT_SYMBOL(alloc_extent_buffer);
2431
2432struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2433 u64 start, unsigned long len,
2434 gfp_t mask)
2435{
2436 unsigned long num_pages = num_extent_pages(start, len);
2437 unsigned long i;
2438 unsigned long index = start >> PAGE_CACHE_SHIFT;
2439 struct extent_buffer *eb;
2440 struct page *p;
2441 struct address_space *mapping = tree->mapping;
2442 int uptodate = 1;
2443
2444 eb = __alloc_extent_buffer(tree, start, len, mask);
2445 if (!eb || IS_ERR(eb))
2446 return NULL;
2447
2448 if (eb->flags & EXTENT_BUFFER_FILLED)
2449 goto lru_add;
2450
2451 for (i = 0; i < num_pages; i++, index++) {
2452 p = find_lock_page(mapping, index);
2453 if (!p) {
2454 goto fail;
2455 }
2456 set_page_extent_mapped(p);
2457 mark_page_accessed(p);
2458
2459 if (i == 0) {
2460 eb->first_page = p;
2461 set_page_extent_head(p, len);
2462 } else {
2463 set_page_private(p, EXTENT_PAGE_PRIVATE);
2464 }
2465
2466 if (!PageUptodate(p))
2467 uptodate = 0;
2468 unlock_page(p);
2469 }
2470 if (uptodate)
2471 eb->flags |= EXTENT_UPTODATE;
2472 eb->flags |= EXTENT_BUFFER_FILLED;
2473
2474lru_add:
2475 spin_lock(&tree->lru_lock);
2476 add_lru(tree, eb);
2477 spin_unlock(&tree->lru_lock);
2478 return eb;
2479fail:
2480 spin_lock(&tree->lru_lock);
2481 list_del_init(&eb->lru);
2482 spin_unlock(&tree->lru_lock);
2483 if (!atomic_dec_and_test(&eb->refs))
2484 return NULL;
2485 for (index = 1; index < i; index++) {
2486 page_cache_release(extent_buffer_page(eb, index));
2487 }
2488 if (i > 0)
2489 page_cache_release(extent_buffer_page(eb, 0));
2490 __free_extent_buffer(eb);
2491 return NULL;
2492}
2493EXPORT_SYMBOL(find_extent_buffer);
2494
2495void free_extent_buffer(struct extent_buffer *eb)
2496{
2497 unsigned long i;
2498 unsigned long num_pages;
2499
2500 if (!eb)
2501 return;
2502
2503 if (!atomic_dec_and_test(&eb->refs))
2504 return;
2505
2506 WARN_ON(!list_empty(&eb->lru));
2507 num_pages = num_extent_pages(eb->start, eb->len);
2508
2509 for (i = 1; i < num_pages; i++) {
2510 page_cache_release(extent_buffer_page(eb, i));
2511 }
2512 page_cache_release(extent_buffer_page(eb, 0));
2513 __free_extent_buffer(eb);
2514}
2515EXPORT_SYMBOL(free_extent_buffer);
2516
2517int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2518 struct extent_buffer *eb)
2519{
2520 int set;
2521 unsigned long i;
2522 unsigned long num_pages;
2523 struct page *page;
2524
2525 u64 start = eb->start;
2526 u64 end = start + eb->len - 1;
2527
2528 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2529 num_pages = num_extent_pages(eb->start, eb->len);
2530
2531 for (i = 0; i < num_pages; i++) {
2532 page = extent_buffer_page(eb, i);
2533 lock_page(page);
2534 if (i == 0)
2535 set_page_extent_head(page, eb->len);
2536 else
2537 set_page_private(page, EXTENT_PAGE_PRIVATE);
2538
2539 /*
2540 * if we're on the last page or the first page and the
2541 * block isn't aligned on a page boundary, do extra checks
2542 * to make sure we don't clean page that is partially dirty
2543 */
2544 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2545 ((i == num_pages - 1) &&
2546 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2547 start = (u64)page->index << PAGE_CACHE_SHIFT;
2548 end = start + PAGE_CACHE_SIZE - 1;
2549 if (test_range_bit(tree, start, end,
2550 EXTENT_DIRTY, 0)) {
2551 unlock_page(page);
2552 continue;
2553 }
2554 }
2555 clear_page_dirty_for_io(page);
2556 write_lock_irq(&page->mapping->tree_lock);
2557 if (!PageDirty(page)) {
2558 radix_tree_tag_clear(&page->mapping->page_tree,
2559 page_index(page),
2560 PAGECACHE_TAG_DIRTY);
2561 }
2562 write_unlock_irq(&page->mapping->tree_lock);
2563 unlock_page(page);
2564 }
2565 return 0;
2566}
2567EXPORT_SYMBOL(clear_extent_buffer_dirty);
2568
2569int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2570 struct extent_buffer *eb)
2571{
2572 return wait_on_extent_writeback(tree, eb->start,
2573 eb->start + eb->len - 1);
2574}
2575EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2576
2577int set_extent_buffer_dirty(struct extent_io_tree *tree,
2578 struct extent_buffer *eb)
2579{
2580 unsigned long i;
2581 unsigned long num_pages;
2582
2583 num_pages = num_extent_pages(eb->start, eb->len);
2584 for (i = 0; i < num_pages; i++) {
2585 struct page *page = extent_buffer_page(eb, i);
2586 /* writepage may need to do something special for the
2587 * first page, we have to make sure page->private is
2588 * properly set. releasepage may drop page->private
2589 * on us if the page isn't already dirty.
2590 */
2591 if (i == 0) {
2592 lock_page(page);
2593 set_page_extent_head(page, eb->len);
2594 } else if (PagePrivate(page) &&
2595 page->private != EXTENT_PAGE_PRIVATE) {
2596 lock_page(page);
2597 set_page_extent_mapped(page);
2598 unlock_page(page);
2599 }
2600 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2601 if (i == 0)
2602 unlock_page(page);
2603 }
2604 return set_extent_dirty(tree, eb->start,
2605 eb->start + eb->len - 1, GFP_NOFS);
2606}
2607EXPORT_SYMBOL(set_extent_buffer_dirty);
2608
2609int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2610 struct extent_buffer *eb)
2611{
2612 unsigned long i;
2613 struct page *page;
2614 unsigned long num_pages;
2615
2616 num_pages = num_extent_pages(eb->start, eb->len);
2617
2618 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2619 GFP_NOFS);
2620 for (i = 0; i < num_pages; i++) {
2621 page = extent_buffer_page(eb, i);
2622 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2623 ((i == num_pages - 1) &&
2624 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2625 check_page_uptodate(tree, page);
2626 continue;
2627 }
2628 SetPageUptodate(page);
2629 }
2630 return 0;
2631}
2632EXPORT_SYMBOL(set_extent_buffer_uptodate);
2633
2634int extent_buffer_uptodate(struct extent_io_tree *tree,
2635 struct extent_buffer *eb)
2636{
2637 if (eb->flags & EXTENT_UPTODATE)
2638 return 1;
2639 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2640 EXTENT_UPTODATE, 1);
2641}
2642EXPORT_SYMBOL(extent_buffer_uptodate);
2643
2644int read_extent_buffer_pages(struct extent_io_tree *tree,
2645 struct extent_buffer *eb,
2646 u64 start,
2647 int wait)
2648{
2649 unsigned long i;
2650 unsigned long start_i;
2651 struct page *page;
2652 int err;
2653 int ret = 0;
2654 unsigned long num_pages;
2655
2656 if (eb->flags & EXTENT_UPTODATE)
2657 return 0;
2658
2659 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2660 EXTENT_UPTODATE, 1)) {
2661 return 0;
2662 }
2663
2664 if (start) {
2665 WARN_ON(start < eb->start);
2666 start_i = (start >> PAGE_CACHE_SHIFT) -
2667 (eb->start >> PAGE_CACHE_SHIFT);
2668 } else {
2669 start_i = 0;
2670 }
2671
2672 num_pages = num_extent_pages(eb->start, eb->len);
2673 for (i = start_i; i < num_pages; i++) {
2674 page = extent_buffer_page(eb, i);
2675 if (PageUptodate(page)) {
2676 continue;
2677 }
2678 if (!wait) {
2679 if (TestSetPageLocked(page)) {
2680 continue;
2681 }
2682 } else {
2683 lock_page(page);
2684 }
2685 if (!PageUptodate(page)) {
2686 err = page->mapping->a_ops->readpage(NULL, page);
2687 if (err) {
2688 ret = err;
2689 }
2690 } else {
2691 unlock_page(page);
2692 }
2693 }
2694
2695 if (ret || !wait) {
2696 return ret;
2697 }
2698
2699 for (i = start_i; i < num_pages; i++) {
2700 page = extent_buffer_page(eb, i);
2701 wait_on_page_locked(page);
2702 if (!PageUptodate(page)) {
2703 ret = -EIO;
2704 }
2705 }
2706 if (!ret)
2707 eb->flags |= EXTENT_UPTODATE;
2708 return ret;
2709}
2710EXPORT_SYMBOL(read_extent_buffer_pages);
2711
2712void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2713 unsigned long start,
2714 unsigned long len)
2715{
2716 size_t cur;
2717 size_t offset;
2718 struct page *page;
2719 char *kaddr;
2720 char *dst = (char *)dstv;
2721 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2722 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2723 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2724
2725 WARN_ON(start > eb->len);
2726 WARN_ON(start + len > eb->start + eb->len);
2727
2728 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2729
2730 while(len > 0) {
2731 page = extent_buffer_page(eb, i);
2732 if (!PageUptodate(page)) {
2733 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2734 WARN_ON(1);
2735 }
2736 WARN_ON(!PageUptodate(page));
2737
2738 cur = min(len, (PAGE_CACHE_SIZE - offset));
2739 kaddr = kmap_atomic(page, KM_USER1);
2740 memcpy(dst, kaddr + offset, cur);
2741 kunmap_atomic(kaddr, KM_USER1);
2742
2743 dst += cur;
2744 len -= cur;
2745 offset = 0;
2746 i++;
2747 }
2748}
2749EXPORT_SYMBOL(read_extent_buffer);
2750
2751int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2752 unsigned long min_len, char **token, char **map,
2753 unsigned long *map_start,
2754 unsigned long *map_len, int km)
2755{
2756 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2757 char *kaddr;
2758 struct page *p;
2759 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2760 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2761 unsigned long end_i = (start_offset + start + min_len - 1) >>
2762 PAGE_CACHE_SHIFT;
2763
2764 if (i != end_i)
2765 return -EINVAL;
2766
2767 if (i == 0) {
2768 offset = start_offset;
2769 *map_start = 0;
2770 } else {
2771 offset = 0;
2772 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2773 }
2774 if (start + min_len > eb->len) {
2775printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2776 WARN_ON(1);
2777 }
2778
2779 p = extent_buffer_page(eb, i);
2780 WARN_ON(!PageUptodate(p));
2781 kaddr = kmap_atomic(p, km);
2782 *token = kaddr;
2783 *map = kaddr + offset;
2784 *map_len = PAGE_CACHE_SIZE - offset;
2785 return 0;
2786}
2787EXPORT_SYMBOL(map_private_extent_buffer);
2788
2789int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2790 unsigned long min_len,
2791 char **token, char **map,
2792 unsigned long *map_start,
2793 unsigned long *map_len, int km)
2794{
2795 int err;
2796 int save = 0;
2797 if (eb->map_token) {
2798 unmap_extent_buffer(eb, eb->map_token, km);
2799 eb->map_token = NULL;
2800 save = 1;
2801 }
2802 err = map_private_extent_buffer(eb, start, min_len, token, map,
2803 map_start, map_len, km);
2804 if (!err && save) {
2805 eb->map_token = *token;
2806 eb->kaddr = *map;
2807 eb->map_start = *map_start;
2808 eb->map_len = *map_len;
2809 }
2810 return err;
2811}
2812EXPORT_SYMBOL(map_extent_buffer);
2813
2814void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2815{
2816 kunmap_atomic(token, km);
2817}
2818EXPORT_SYMBOL(unmap_extent_buffer);
2819
2820int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2821 unsigned long start,
2822 unsigned long len)
2823{
2824 size_t cur;
2825 size_t offset;
2826 struct page *page;
2827 char *kaddr;
2828 char *ptr = (char *)ptrv;
2829 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2830 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2831 int ret = 0;
2832
2833 WARN_ON(start > eb->len);
2834 WARN_ON(start + len > eb->start + eb->len);
2835
2836 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2837
2838 while(len > 0) {
2839 page = extent_buffer_page(eb, i);
2840 WARN_ON(!PageUptodate(page));
2841
2842 cur = min(len, (PAGE_CACHE_SIZE - offset));
2843
2844 kaddr = kmap_atomic(page, KM_USER0);
2845 ret = memcmp(ptr, kaddr + offset, cur);
2846 kunmap_atomic(kaddr, KM_USER0);
2847 if (ret)
2848 break;
2849
2850 ptr += cur;
2851 len -= cur;
2852 offset = 0;
2853 i++;
2854 }
2855 return ret;
2856}
2857EXPORT_SYMBOL(memcmp_extent_buffer);
2858
2859void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2860 unsigned long start, unsigned long len)
2861{
2862 size_t cur;
2863 size_t offset;
2864 struct page *page;
2865 char *kaddr;
2866 char *src = (char *)srcv;
2867 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2868 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2869
2870 WARN_ON(start > eb->len);
2871 WARN_ON(start + len > eb->start + eb->len);
2872
2873 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2874
2875 while(len > 0) {
2876 page = extent_buffer_page(eb, i);
2877 WARN_ON(!PageUptodate(page));
2878
2879 cur = min(len, PAGE_CACHE_SIZE - offset);
2880 kaddr = kmap_atomic(page, KM_USER1);
2881 memcpy(kaddr + offset, src, cur);
2882 kunmap_atomic(kaddr, KM_USER1);
2883
2884 src += cur;
2885 len -= cur;
2886 offset = 0;
2887 i++;
2888 }
2889}
2890EXPORT_SYMBOL(write_extent_buffer);
2891
2892void memset_extent_buffer(struct extent_buffer *eb, char c,
2893 unsigned long start, unsigned long len)
2894{
2895 size_t cur;
2896 size_t offset;
2897 struct page *page;
2898 char *kaddr;
2899 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2900 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2901
2902 WARN_ON(start > eb->len);
2903 WARN_ON(start + len > eb->start + eb->len);
2904
2905 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2906
2907 while(len > 0) {
2908 page = extent_buffer_page(eb, i);
2909 WARN_ON(!PageUptodate(page));
2910
2911 cur = min(len, PAGE_CACHE_SIZE - offset);
2912 kaddr = kmap_atomic(page, KM_USER0);
2913 memset(kaddr + offset, c, cur);
2914 kunmap_atomic(kaddr, KM_USER0);
2915
2916 len -= cur;
2917 offset = 0;
2918 i++;
2919 }
2920}
2921EXPORT_SYMBOL(memset_extent_buffer);
2922
2923void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2924 unsigned long dst_offset, unsigned long src_offset,
2925 unsigned long len)
2926{
2927 u64 dst_len = dst->len;
2928 size_t cur;
2929 size_t offset;
2930 struct page *page;
2931 char *kaddr;
2932 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2933 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2934
2935 WARN_ON(src->len != dst_len);
2936
2937 offset = (start_offset + dst_offset) &
2938 ((unsigned long)PAGE_CACHE_SIZE - 1);
2939
2940 while(len > 0) {
2941 page = extent_buffer_page(dst, i);
2942 WARN_ON(!PageUptodate(page));
2943
2944 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2945
2946 kaddr = kmap_atomic(page, KM_USER0);
2947 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2948 kunmap_atomic(kaddr, KM_USER0);
2949
2950 src_offset += cur;
2951 len -= cur;
2952 offset = 0;
2953 i++;
2954 }
2955}
2956EXPORT_SYMBOL(copy_extent_buffer);
2957
2958static void move_pages(struct page *dst_page, struct page *src_page,
2959 unsigned long dst_off, unsigned long src_off,
2960 unsigned long len)
2961{
2962 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2963 if (dst_page == src_page) {
2964 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2965 } else {
2966 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2967 char *p = dst_kaddr + dst_off + len;
2968 char *s = src_kaddr + src_off + len;
2969
2970 while (len--)
2971 *--p = *--s;
2972
2973 kunmap_atomic(src_kaddr, KM_USER1);
2974 }
2975 kunmap_atomic(dst_kaddr, KM_USER0);
2976}
2977
2978static void copy_pages(struct page *dst_page, struct page *src_page,
2979 unsigned long dst_off, unsigned long src_off,
2980 unsigned long len)
2981{
2982 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2983 char *src_kaddr;
2984
2985 if (dst_page != src_page)
2986 src_kaddr = kmap_atomic(src_page, KM_USER1);
2987 else
2988 src_kaddr = dst_kaddr;
2989
2990 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2991 kunmap_atomic(dst_kaddr, KM_USER0);
2992 if (dst_page != src_page)
2993 kunmap_atomic(src_kaddr, KM_USER1);
2994}
2995
2996void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2997 unsigned long src_offset, unsigned long len)
2998{
2999 size_t cur;
3000 size_t dst_off_in_page;
3001 size_t src_off_in_page;
3002 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3003 unsigned long dst_i;
3004 unsigned long src_i;
3005
3006 if (src_offset + len > dst->len) {
3007 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3008 src_offset, len, dst->len);
3009 BUG_ON(1);
3010 }
3011 if (dst_offset + len > dst->len) {
3012 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3013 dst_offset, len, dst->len);
3014 BUG_ON(1);
3015 }
3016
3017 while(len > 0) {
3018 dst_off_in_page = (start_offset + dst_offset) &
3019 ((unsigned long)PAGE_CACHE_SIZE - 1);
3020 src_off_in_page = (start_offset + src_offset) &
3021 ((unsigned long)PAGE_CACHE_SIZE - 1);
3022
3023 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3024 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3025
3026 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3027 src_off_in_page));
3028 cur = min_t(unsigned long, cur,
3029 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3030
3031 copy_pages(extent_buffer_page(dst, dst_i),
3032 extent_buffer_page(dst, src_i),
3033 dst_off_in_page, src_off_in_page, cur);
3034
3035 src_offset += cur;
3036 dst_offset += cur;
3037 len -= cur;
3038 }
3039}
3040EXPORT_SYMBOL(memcpy_extent_buffer);
3041
3042void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3043 unsigned long src_offset, unsigned long len)
3044{
3045 size_t cur;
3046 size_t dst_off_in_page;
3047 size_t src_off_in_page;
3048 unsigned long dst_end = dst_offset + len - 1;
3049 unsigned long src_end = src_offset + len - 1;
3050 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3051 unsigned long dst_i;
3052 unsigned long src_i;
3053
3054 if (src_offset + len > dst->len) {
3055 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3056 src_offset, len, dst->len);
3057 BUG_ON(1);
3058 }
3059 if (dst_offset + len > dst->len) {
3060 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3061 dst_offset, len, dst->len);
3062 BUG_ON(1);
3063 }
3064 if (dst_offset < src_offset) {
3065 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3066 return;
3067 }
3068 while(len > 0) {
3069 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3070 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3071
3072 dst_off_in_page = (start_offset + dst_end) &
3073 ((unsigned long)PAGE_CACHE_SIZE - 1);
3074 src_off_in_page = (start_offset + src_end) &
3075 ((unsigned long)PAGE_CACHE_SIZE - 1);
3076
3077 cur = min_t(unsigned long, len, src_off_in_page + 1);
3078 cur = min(cur, dst_off_in_page + 1);
3079 move_pages(extent_buffer_page(dst, dst_i),
3080 extent_buffer_page(dst, src_i),
3081 dst_off_in_page - cur + 1,
3082 src_off_in_page - cur + 1, cur);
3083
3084 dst_end -= cur;
3085 src_end -= cur;
3086 len -= cur;
3087 }
3088}
3089EXPORT_SYMBOL(memmove_extent_buffer);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
new file mode 100644
index 000000000000..06be1fe84b29
--- /dev/null
+++ b/fs/btrfs/extent_io.h
@@ -0,0 +1,193 @@
1#ifndef __EXTENTIO__
2#define __EXTENTIO__
3
4#include <linux/rbtree.h>
5
6/* bits for the extent state */
7#define EXTENT_DIRTY 1
8#define EXTENT_WRITEBACK (1 << 1)
9#define EXTENT_UPTODATE (1 << 2)
10#define EXTENT_LOCKED (1 << 3)
11#define EXTENT_NEW (1 << 4)
12#define EXTENT_DELALLOC (1 << 5)
13#define EXTENT_DEFRAG (1 << 6)
14#define EXTENT_DEFRAG_DONE (1 << 7)
15#define EXTENT_BUFFER_FILLED (1 << 8)
16#define EXTENT_CSUM (1 << 9)
17#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
18
19/*
20 * page->private values. Every page that is controlled by the extent
21 * map has page->private set to one.
22 */
23#define EXTENT_PAGE_PRIVATE 1
24#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
25
26struct extent_io_ops {
27 int (*fill_delalloc)(struct inode *inode, u64 start, u64 end);
28 int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
29 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
30 int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end);
31 void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end);
32};
33
34struct extent_io_tree {
35 struct rb_root state;
36 struct address_space *mapping;
37 u64 dirty_bytes;
38 rwlock_t lock;
39 struct extent_io_ops *ops;
40 spinlock_t lru_lock;
41 struct list_head buffer_lru;
42 int lru_size;
43};
44
45struct extent_state {
46 u64 start;
47 u64 end; /* inclusive */
48 int in_tree;
49 struct rb_node rb_node;
50 wait_queue_head_t wq;
51 atomic_t refs;
52 unsigned long state;
53
54 /* for use by the FS */
55 u64 private;
56
57 struct list_head list;
58};
59
60struct extent_buffer {
61 u64 start;
62 unsigned long len;
63 char *map_token;
64 char *kaddr;
65 unsigned long map_start;
66 unsigned long map_len;
67 struct page *first_page;
68 struct list_head lru;
69 atomic_t refs;
70 int flags;
71};
72
73struct extent_map_tree;
74
75typedef struct extent_map *(get_extent_t)(struct inode *inode,
76 struct page *page,
77 size_t page_offset,
78 u64 start, u64 len,
79 int create);
80
81void extent_io_tree_init(struct extent_io_tree *tree,
82 struct address_space *mapping, gfp_t mask);
83void extent_io_tree_empty_lru(struct extent_io_tree *tree);
84int try_release_extent_mapping(struct extent_map_tree *map,
85 struct extent_io_tree *tree, struct page *page);
86int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
87int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
88int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
89 get_extent_t *get_extent);
90int __init extent_io_init(void);
91void extent_io_exit(void);
92
93u64 count_range_bits(struct extent_io_tree *tree,
94 u64 *start, u64 search_end,
95 u64 max_bytes, unsigned long bits);
96
97int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
98 int bits, int filled);
99int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
100 int bits, gfp_t mask);
101int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
102 int bits, gfp_t mask);
103int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
104 gfp_t mask);
105int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
106 gfp_t mask);
107int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
108 gfp_t mask);
109int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
110 gfp_t mask);
111int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
112 gfp_t mask);
113int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
114 u64 *start_ret, u64 *end_ret, int bits);
115int extent_invalidatepage(struct extent_io_tree *tree,
116 struct page *page, unsigned long offset);
117int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
118 get_extent_t *get_extent,
119 struct writeback_control *wbc);
120int extent_writepages(struct extent_io_tree *tree,
121 struct address_space *mapping,
122 get_extent_t *get_extent,
123 struct writeback_control *wbc);
124int extent_readpages(struct extent_io_tree *tree,
125 struct address_space *mapping,
126 struct list_head *pages, unsigned nr_pages,
127 get_extent_t get_extent);
128int extent_prepare_write(struct extent_io_tree *tree,
129 struct inode *inode, struct page *page,
130 unsigned from, unsigned to, get_extent_t *get_extent);
131int extent_commit_write(struct extent_io_tree *tree,
132 struct inode *inode, struct page *page,
133 unsigned from, unsigned to);
134sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
135 get_extent_t *get_extent);
136int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
137int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
138int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
139void set_page_extent_mapped(struct page *page);
140
141struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
142 u64 start, unsigned long len,
143 struct page *page0,
144 gfp_t mask);
145struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
146 u64 start, unsigned long len,
147 gfp_t mask);
148void free_extent_buffer(struct extent_buffer *eb);
149int read_extent_buffer_pages(struct extent_io_tree *tree,
150 struct extent_buffer *eb, u64 start, int wait);
151
152static inline void extent_buffer_get(struct extent_buffer *eb)
153{
154 atomic_inc(&eb->refs);
155}
156
157int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
158 unsigned long start,
159 unsigned long len);
160void read_extent_buffer(struct extent_buffer *eb, void *dst,
161 unsigned long start,
162 unsigned long len);
163void write_extent_buffer(struct extent_buffer *eb, const void *src,
164 unsigned long start, unsigned long len);
165void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
166 unsigned long dst_offset, unsigned long src_offset,
167 unsigned long len);
168void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
169 unsigned long src_offset, unsigned long len);
170void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
171 unsigned long src_offset, unsigned long len);
172void memset_extent_buffer(struct extent_buffer *eb, char c,
173 unsigned long start, unsigned long len);
174int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
175 struct extent_buffer *eb);
176int clear_extent_buffer_dirty(struct extent_io_tree *tree,
177 struct extent_buffer *eb);
178int set_extent_buffer_dirty(struct extent_io_tree *tree,
179 struct extent_buffer *eb);
180int set_extent_buffer_uptodate(struct extent_io_tree *tree,
181 struct extent_buffer *eb);
182int extent_buffer_uptodate(struct extent_io_tree *tree,
183 struct extent_buffer *eb);
184int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
185 unsigned long min_len, char **token, char **map,
186 unsigned long *map_start,
187 unsigned long *map_len, int km);
188int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
189 unsigned long min_len, char **token, char **map,
190 unsigned long *map_start,
191 unsigned long *map_len, int km);
192void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
193#endif
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 010a287fbd71..268ad8facf6e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -1,17 +1,10 @@
1#include <linux/bitops.h> 1#include <linux/err.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h> 2#include <linux/gfp.h>
6#include <linux/pagemap.h> 3#include <linux/slab.h>
7#include <linux/page-flags.h>
8#include <linux/module.h> 4#include <linux/module.h>
9#include <linux/spinlock.h> 5#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h> 6#include <linux/version.h>
13#include <linux/writeback.h> 7#include <linux/hardirq.h>
14#include <linux/pagevec.h>
15#include "extent_map.h" 8#include "extent_map.h"
16 9
17/* temporary define until extent_map moves out of btrfs */ 10/* temporary define until extent_map moves out of btrfs */
@@ -21,27 +14,6 @@ struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
21 unsigned long)); 14 unsigned long));
22 15
23static struct kmem_cache *extent_map_cache; 16static struct kmem_cache *extent_map_cache;
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
26
27static LIST_HEAD(buffers);
28static LIST_HEAD(states);
29
30static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31#define BUFFER_LRU_MAX 64
32
33struct tree_entry {
34 u64 start;
35 u64 end;
36 int in_tree;
37 struct rb_node rb_node;
38};
39
40struct extent_page_data {
41 struct bio *bio;
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
44};
45 17
46int __init extent_map_init(void) 18int __init extent_map_init(void)
47{ 19{
@@ -50,72 +22,23 @@ int __init extent_map_init(void)
50 NULL); 22 NULL);
51 if (!extent_map_cache) 23 if (!extent_map_cache)
52 return -ENOMEM; 24 return -ENOMEM;
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
55 NULL);
56 if (!extent_state_cache)
57 goto free_map_cache;
58 extent_buffer_cache = btrfs_cache_create("extent_buffers",
59 sizeof(struct extent_buffer), 0,
60 NULL);
61 if (!extent_buffer_cache)
62 goto free_state_cache;
63 return 0; 25 return 0;
64
65free_state_cache:
66 kmem_cache_destroy(extent_state_cache);
67free_map_cache:
68 kmem_cache_destroy(extent_map_cache);
69 return -ENOMEM;
70} 26}
71 27
72void extent_map_exit(void) 28void extent_map_exit(void)
73{ 29{
74 struct extent_state *state;
75
76 while (!list_empty(&states)) {
77 state = list_entry(states.next, struct extent_state, list);
78 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
79 list_del(&state->list);
80 kmem_cache_free(extent_state_cache, state);
81
82 }
83
84 if (extent_map_cache) 30 if (extent_map_cache)
85 kmem_cache_destroy(extent_map_cache); 31 kmem_cache_destroy(extent_map_cache);
86 if (extent_state_cache)
87 kmem_cache_destroy(extent_state_cache);
88 if (extent_buffer_cache)
89 kmem_cache_destroy(extent_buffer_cache);
90} 32}
91 33
92void extent_map_tree_init(struct extent_map_tree *tree, 34void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
93 struct address_space *mapping, gfp_t mask)
94{ 35{
95 tree->map.rb_node = NULL; 36 tree->map.rb_node = NULL;
96 tree->state.rb_node = NULL; 37 tree->last = NULL;
97 tree->ops = NULL; 38 spin_lock_init(&tree->lock);
98 tree->dirty_bytes = 0;
99 rwlock_init(&tree->lock);
100 spin_lock_init(&tree->lru_lock);
101 tree->mapping = mapping;
102 INIT_LIST_HEAD(&tree->buffer_lru);
103 tree->lru_size = 0;
104} 39}
105EXPORT_SYMBOL(extent_map_tree_init); 40EXPORT_SYMBOL(extent_map_tree_init);
106 41
107void extent_map_tree_empty_lru(struct extent_map_tree *tree)
108{
109 struct extent_buffer *eb;
110 while(!list_empty(&tree->buffer_lru)) {
111 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
112 lru);
113 list_del_init(&eb->lru);
114 free_extent_buffer(eb);
115 }
116}
117EXPORT_SYMBOL(extent_map_tree_empty_lru);
118
119struct extent_map *alloc_extent_map(gfp_t mask) 42struct extent_map *alloc_extent_map(gfp_t mask)
120{ 43{
121 struct extent_map *em; 44 struct extent_map *em;
@@ -123,6 +46,7 @@ struct extent_map *alloc_extent_map(gfp_t mask)
123 if (!em || IS_ERR(em)) 46 if (!em || IS_ERR(em))
124 return em; 47 return em;
125 em->in_tree = 0; 48 em->in_tree = 0;
49 em->flags = 0;
126 atomic_set(&em->refs, 1); 50 atomic_set(&em->refs, 1);
127 return em; 51 return em;
128} 52}
@@ -132,6 +56,7 @@ void free_extent_map(struct extent_map *em)
132{ 56{
133 if (!em) 57 if (!em)
134 return; 58 return;
59 WARN_ON(atomic_read(&em->refs) == 0);
135 if (atomic_dec_and_test(&em->refs)) { 60 if (atomic_dec_and_test(&em->refs)) {
136 WARN_ON(em->in_tree); 61 WARN_ON(em->in_tree);
137 kmem_cache_free(extent_map_cache, em); 62 kmem_cache_free(extent_map_cache, em);
@@ -139,64 +64,28 @@ void free_extent_map(struct extent_map *em)
139} 64}
140EXPORT_SYMBOL(free_extent_map); 65EXPORT_SYMBOL(free_extent_map);
141 66
142
143struct extent_state *alloc_extent_state(gfp_t mask)
144{
145 struct extent_state *state;
146 unsigned long flags;
147
148 state = kmem_cache_alloc(extent_state_cache, mask);
149 if (!state || IS_ERR(state))
150 return state;
151 state->state = 0;
152 state->in_tree = 0;
153 state->private = 0;
154
155 spin_lock_irqsave(&state_lock, flags);
156 list_add(&state->list, &states);
157 spin_unlock_irqrestore(&state_lock, flags);
158
159 atomic_set(&state->refs, 1);
160 init_waitqueue_head(&state->wq);
161 return state;
162}
163EXPORT_SYMBOL(alloc_extent_state);
164
165void free_extent_state(struct extent_state *state)
166{
167 unsigned long flags;
168 if (!state)
169 return;
170 if (atomic_dec_and_test(&state->refs)) {
171 WARN_ON(state->in_tree);
172 spin_lock_irqsave(&state_lock, flags);
173 list_del(&state->list);
174 spin_unlock_irqrestore(&state_lock, flags);
175 kmem_cache_free(extent_state_cache, state);
176 }
177}
178EXPORT_SYMBOL(free_extent_state);
179
180static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 67static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
181 struct rb_node *node) 68 struct rb_node *node)
182{ 69{
183 struct rb_node ** p = &root->rb_node; 70 struct rb_node ** p = &root->rb_node;
184 struct rb_node * parent = NULL; 71 struct rb_node * parent = NULL;
185 struct tree_entry *entry; 72 struct extent_map *entry;
186 73
187 while(*p) { 74 while(*p) {
188 parent = *p; 75 parent = *p;
189 entry = rb_entry(parent, struct tree_entry, rb_node); 76 entry = rb_entry(parent, struct extent_map, rb_node);
77
78 WARN_ON(!entry->in_tree);
190 79
191 if (offset < entry->start) 80 if (offset < entry->start)
192 p = &(*p)->rb_left; 81 p = &(*p)->rb_left;
193 else if (offset > entry->end) 82 else if (offset >= extent_map_end(entry))
194 p = &(*p)->rb_right; 83 p = &(*p)->rb_right;
195 else 84 else
196 return parent; 85 return parent;
197 } 86 }
198 87
199 entry = rb_entry(node, struct tree_entry, rb_node); 88 entry = rb_entry(node, struct extent_map, rb_node);
200 entry->in_tree = 1; 89 entry->in_tree = 1;
201 rb_link_node(node, parent, p); 90 rb_link_node(node, parent, p);
202 rb_insert_color(node, root); 91 rb_insert_color(node, root);
@@ -210,17 +99,19 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
210 struct rb_node * n = root->rb_node; 99 struct rb_node * n = root->rb_node;
211 struct rb_node *prev = NULL; 100 struct rb_node *prev = NULL;
212 struct rb_node *orig_prev = NULL; 101 struct rb_node *orig_prev = NULL;
213 struct tree_entry *entry; 102 struct extent_map *entry;
214 struct tree_entry *prev_entry = NULL; 103 struct extent_map *prev_entry = NULL;
215 104
216 while(n) { 105 while(n) {
217 entry = rb_entry(n, struct tree_entry, rb_node); 106 entry = rb_entry(n, struct extent_map, rb_node);
218 prev = n; 107 prev = n;
219 prev_entry = entry; 108 prev_entry = entry;
220 109
110 WARN_ON(!entry->in_tree);
111
221 if (offset < entry->start) 112 if (offset < entry->start)
222 n = n->rb_left; 113 n = n->rb_left;
223 else if (offset > entry->end) 114 else if (offset >= extent_map_end(entry))
224 n = n->rb_right; 115 n = n->rb_right;
225 else 116 else
226 return n; 117 return n;
@@ -228,19 +119,19 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
228 119
229 if (prev_ret) { 120 if (prev_ret) {
230 orig_prev = prev; 121 orig_prev = prev;
231 while(prev && offset > prev_entry->end) { 122 while(prev && offset >= extent_map_end(prev_entry)) {
232 prev = rb_next(prev); 123 prev = rb_next(prev);
233 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 124 prev_entry = rb_entry(prev, struct extent_map, rb_node);
234 } 125 }
235 *prev_ret = prev; 126 *prev_ret = prev;
236 prev = orig_prev; 127 prev = orig_prev;
237 } 128 }
238 129
239 if (next_ret) { 130 if (next_ret) {
240 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 131 prev_entry = rb_entry(prev, struct extent_map, rb_node);
241 while(prev && offset < prev_entry->start) { 132 while(prev && offset < prev_entry->start) {
242 prev = rb_prev(prev); 133 prev = rb_prev(prev);
243 prev_entry = rb_entry(prev, struct tree_entry, rb_node); 134 prev_entry = rb_entry(prev, struct extent_map, rb_node);
244 } 135 }
245 *next_ret = prev; 136 *next_ret = prev;
246 } 137 }
@@ -257,22 +148,26 @@ static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
257 return ret; 148 return ret;
258} 149}
259 150
260static int tree_delete(struct rb_root *root, u64 offset) 151static int mergable_maps(struct extent_map *prev, struct extent_map *next)
261{ 152{
262 struct rb_node *node; 153 if (extent_map_end(prev) == next->start &&
263 struct tree_entry *entry; 154 prev->flags == next->flags &&
264 155 prev->bdev == next->bdev &&
265 node = __tree_search(root, offset, NULL, NULL); 156 ((next->block_start == EXTENT_MAP_HOLE &&
266 if (!node) 157 prev->block_start == EXTENT_MAP_HOLE) ||
267 return -ENOENT; 158 (next->block_start == EXTENT_MAP_INLINE &&
268 entry = rb_entry(node, struct tree_entry, rb_node); 159 prev->block_start == EXTENT_MAP_INLINE) ||
269 entry->in_tree = 0; 160 (next->block_start == EXTENT_MAP_DELALLOC &&
270 rb_erase(node, root); 161 prev->block_start == EXTENT_MAP_DELALLOC) ||
162 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
163 next->block_start == extent_map_block_end(prev)))) {
164 return 1;
165 }
271 return 0; 166 return 0;
272} 167}
273 168
274/* 169/*
275 * add_extent_mapping tries a simple backward merge with existing 170 * add_extent_mapping tries a simple forward/backward merge with existing
276 * mappings. The extent_map struct passed in will be inserted into 171 * mappings. The extent_map struct passed in will be inserted into
277 * the tree directly (no copies made, just a reference taken). 172 * the tree directly (no copies made, just a reference taken).
278 */ 173 */
@@ -280,13 +175,12 @@ int add_extent_mapping(struct extent_map_tree *tree,
280 struct extent_map *em) 175 struct extent_map *em)
281{ 176{
282 int ret = 0; 177 int ret = 0;
283 struct extent_map *prev = NULL; 178 struct extent_map *merge = NULL;
284 struct rb_node *rb; 179 struct rb_node *rb;
285 180
286 write_lock_irq(&tree->lock); 181 rb = tree_insert(&tree->map, em->start, &em->rb_node);
287 rb = tree_insert(&tree->map, em->end, &em->rb_node);
288 if (rb) { 182 if (rb) {
289 prev = rb_entry(rb, struct extent_map, rb_node); 183 merge = rb_entry(rb, struct extent_map, rb_node);
290 ret = -EEXIST; 184 ret = -EEXIST;
291 goto out; 185 goto out;
292 } 186 }
@@ -294,53 +188,60 @@ int add_extent_mapping(struct extent_map_tree *tree,
294 if (em->start != 0) { 188 if (em->start != 0) {
295 rb = rb_prev(&em->rb_node); 189 rb = rb_prev(&em->rb_node);
296 if (rb) 190 if (rb)
297 prev = rb_entry(rb, struct extent_map, rb_node); 191 merge = rb_entry(rb, struct extent_map, rb_node);
298 if (prev && prev->end + 1 == em->start && 192 if (rb && mergable_maps(merge, em)) {
299 ((em->block_start == EXTENT_MAP_HOLE && 193 em->start = merge->start;
300 prev->block_start == EXTENT_MAP_HOLE) || 194 em->len += merge->len;
301 (em->block_start == EXTENT_MAP_INLINE && 195 em->block_start = merge->block_start;
302 prev->block_start == EXTENT_MAP_INLINE) || 196 merge->in_tree = 0;
303 (em->block_start == EXTENT_MAP_DELALLOC && 197 rb_erase(&merge->rb_node, &tree->map);
304 prev->block_start == EXTENT_MAP_DELALLOC) || 198 free_extent_map(merge);
305 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
306 em->block_start == prev->block_end + 1))) {
307 em->start = prev->start;
308 em->block_start = prev->block_start;
309 rb_erase(&prev->rb_node, &tree->map);
310 prev->in_tree = 0;
311 free_extent_map(prev);
312 } 199 }
313 } 200 }
201 rb = rb_next(&em->rb_node);
202 if (rb)
203 merge = rb_entry(rb, struct extent_map, rb_node);
204 if (rb && mergable_maps(em, merge)) {
205 em->len += merge->len;
206 rb_erase(&merge->rb_node, &tree->map);
207 merge->in_tree = 0;
208 free_extent_map(merge);
209 }
210 tree->last = em;
314out: 211out:
315 write_unlock_irq(&tree->lock);
316 return ret; 212 return ret;
317} 213}
318EXPORT_SYMBOL(add_extent_mapping); 214EXPORT_SYMBOL(add_extent_mapping);
319 215
216static u64 range_end(u64 start, u64 len)
217{
218 if (start + len < start)
219 return (u64)-1;
220 return start + len;
221}
222
320/* 223/*
321 * lookup_extent_mapping returns the first extent_map struct in the 224 * lookup_extent_mapping returns the first extent_map struct in the
322 * tree that intersects the [start, end] (inclusive) range. There may 225 * tree that intersects the [start, len] range. There may
323 * be additional objects in the tree that intersect, so check the object 226 * be additional objects in the tree that intersect, so check the object
324 * returned carefully to make sure you don't need additional lookups. 227 * returned carefully to make sure you don't need additional lookups.
325 */ 228 */
326struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 229struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
327 u64 start, u64 end) 230 u64 start, u64 len)
328{ 231{
329 struct extent_map *em; 232 struct extent_map *em;
330 struct rb_node *rb_node; 233 struct rb_node *rb_node;
331 struct rb_node *prev = NULL; 234 struct rb_node *prev = NULL; struct rb_node *next = NULL; u64 end = range_end(start, len); em = tree->last; if (em && end > em->start && start < extent_map_end(em)) goto found;
332 struct rb_node *next = NULL;
333 235
334 read_lock_irq(&tree->lock);
335 rb_node = __tree_search(&tree->map, start, &prev, &next); 236 rb_node = __tree_search(&tree->map, start, &prev, &next);
336 if (!rb_node && prev) { 237 if (!rb_node && prev) {
337 em = rb_entry(prev, struct extent_map, rb_node); 238 em = rb_entry(prev, struct extent_map, rb_node);
338 if (em->start <= end && em->end >= start) 239 if (end > em->start && start < extent_map_end(em))
339 goto found; 240 goto found;
340 } 241 }
341 if (!rb_node && next) { 242 if (!rb_node && next) {
342 em = rb_entry(next, struct extent_map, rb_node); 243 em = rb_entry(next, struct extent_map, rb_node);
343 if (em->start <= end && em->end >= start) 244 if (end > em->start && start < extent_map_end(em))
344 goto found; 245 goto found;
345 } 246 }
346 if (!rb_node) { 247 if (!rb_node) {
@@ -352,14 +253,16 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
352 goto out; 253 goto out;
353 } 254 }
354 em = rb_entry(rb_node, struct extent_map, rb_node); 255 em = rb_entry(rb_node, struct extent_map, rb_node);
355 if (em->end < start || em->start > end) { 256 if (end > em->start && start < extent_map_end(em))
356 em = NULL; 257 goto found;
357 goto out; 258
358 } 259 em = NULL;
260 goto out;
261
359found: 262found:
360 atomic_inc(&em->refs); 263 atomic_inc(&em->refs);
264 tree->last = em;
361out: 265out:
362 read_unlock_irq(&tree->lock);
363 return em; 266 return em;
364} 267}
365EXPORT_SYMBOL(lookup_extent_mapping); 268EXPORT_SYMBOL(lookup_extent_mapping);
@@ -370,2866 +273,12 @@ EXPORT_SYMBOL(lookup_extent_mapping);
370 */ 273 */
371int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 274int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
372{ 275{
373 int ret;
374
375 write_lock_irq(&tree->lock);
376 ret = tree_delete(&tree->map, em->end);
377 write_unlock_irq(&tree->lock);
378 return ret;
379}
380EXPORT_SYMBOL(remove_extent_mapping);
381
382/*
383 * utility function to look for merge candidates inside a given range.
384 * Any extents with matching state are merged together into a single
385 * extent in the tree. Extents with EXTENT_IO in their state field
386 * are not merged because the end_io handlers need to be able to do
387 * operations on them without sleeping (or doing allocations/splits).
388 *
389 * This should be called with the tree lock held.
390 */
391static int merge_state(struct extent_map_tree *tree,
392 struct extent_state *state)
393{
394 struct extent_state *other;
395 struct rb_node *other_node;
396
397 if (state->state & EXTENT_IOBITS)
398 return 0;
399
400 other_node = rb_prev(&state->rb_node);
401 if (other_node) {
402 other = rb_entry(other_node, struct extent_state, rb_node);
403 if (other->end == state->start - 1 &&
404 other->state == state->state) {
405 state->start = other->start;
406 other->in_tree = 0;
407 rb_erase(&other->rb_node, &tree->state);
408 free_extent_state(other);
409 }
410 }
411 other_node = rb_next(&state->rb_node);
412 if (other_node) {
413 other = rb_entry(other_node, struct extent_state, rb_node);
414 if (other->start == state->end + 1 &&
415 other->state == state->state) {
416 other->start = state->start;
417 state->in_tree = 0;
418 rb_erase(&state->rb_node, &tree->state);
419 free_extent_state(state);
420 }
421 }
422 return 0;
423}
424
425/*
426 * insert an extent_state struct into the tree. 'bits' are set on the
427 * struct before it is inserted.
428 *
429 * This may return -EEXIST if the extent is already there, in which case the
430 * state struct is freed.
431 *
432 * The tree lock is not taken internally. This is a utility function and
433 * probably isn't what you want to call (see set/clear_extent_bit).
434 */
435static int insert_state(struct extent_map_tree *tree,
436 struct extent_state *state, u64 start, u64 end,
437 int bits)
438{
439 struct rb_node *node;
440
441 if (end < start) {
442 printk("end < start %Lu %Lu\n", end, start);
443 WARN_ON(1);
444 }
445 if (bits & EXTENT_DIRTY)
446 tree->dirty_bytes += end - start + 1;
447 state->state |= bits;
448 state->start = start;
449 state->end = end;
450 node = tree_insert(&tree->state, end, &state->rb_node);
451 if (node) {
452 struct extent_state *found;
453 found = rb_entry(node, struct extent_state, rb_node);
454 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
455 free_extent_state(state);
456 return -EEXIST;
457 }
458 merge_state(tree, state);
459 return 0;
460}
461
462/*
463 * split a given extent state struct in two, inserting the preallocated
464 * struct 'prealloc' as the newly created second half. 'split' indicates an
465 * offset inside 'orig' where it should be split.
466 *
467 * Before calling,
468 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
469 * are two extent state structs in the tree:
470 * prealloc: [orig->start, split - 1]
471 * orig: [ split, orig->end ]
472 *
473 * The tree locks are not taken by this function. They need to be held
474 * by the caller.
475 */
476static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
477 struct extent_state *prealloc, u64 split)
478{
479 struct rb_node *node;
480 prealloc->start = orig->start;
481 prealloc->end = split - 1;
482 prealloc->state = orig->state;
483 orig->start = split;
484
485 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
486 if (node) {
487 struct extent_state *found;
488 found = rb_entry(node, struct extent_state, rb_node);
489 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
490 free_extent_state(prealloc);
491 return -EEXIST;
492 }
493 return 0;
494}
495
496/*
497 * utility function to clear some bits in an extent state struct.
498 * it will optionally wake up any one waiting on this state (wake == 1), or
499 * forcibly remove the state from the tree (delete == 1).
500 *
501 * If no bits are set on the state struct after clearing things, the
502 * struct is freed and removed from the tree
503 */
504static int clear_state_bit(struct extent_map_tree *tree,
505 struct extent_state *state, int bits, int wake,
506 int delete)
507{
508 int ret = state->state & bits;
509
510 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
511 u64 range = state->end - state->start + 1;
512 WARN_ON(range > tree->dirty_bytes);
513 tree->dirty_bytes -= range;
514 }
515 state->state &= ~bits;
516 if (wake)
517 wake_up(&state->wq);
518 if (delete || state->state == 0) {
519 if (state->in_tree) {
520 rb_erase(&state->rb_node, &tree->state);
521 state->in_tree = 0;
522 free_extent_state(state);
523 } else {
524 WARN_ON(1);
525 }
526 } else {
527 merge_state(tree, state);
528 }
529 return ret;
530}
531
532/*
533 * clear some bits on a range in the tree. This may require splitting
534 * or inserting elements in the tree, so the gfp mask is used to
535 * indicate which allocations or sleeping are allowed.
536 *
537 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
538 * the given range from the tree regardless of state (ie for truncate).
539 *
540 * the range [start, end] is inclusive.
541 *
542 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
543 * bits were already set, or zero if none of the bits were already set.
544 */
545int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
546 int bits, int wake, int delete, gfp_t mask)
547{
548 struct extent_state *state;
549 struct extent_state *prealloc = NULL;
550 struct rb_node *node;
551 unsigned long flags;
552 int err;
553 int set = 0;
554
555again:
556 if (!prealloc && (mask & __GFP_WAIT)) {
557 prealloc = alloc_extent_state(mask);
558 if (!prealloc)
559 return -ENOMEM;
560 }
561
562 write_lock_irqsave(&tree->lock, flags);
563 /*
564 * this search will find the extents that end after
565 * our range starts
566 */
567 node = tree_search(&tree->state, start);
568 if (!node)
569 goto out;
570 state = rb_entry(node, struct extent_state, rb_node);
571 if (state->start > end)
572 goto out;
573 WARN_ON(state->end < start);
574
575 /*
576 * | ---- desired range ---- |
577 * | state | or
578 * | ------------- state -------------- |
579 *
580 * We need to split the extent we found, and may flip
581 * bits on second half.
582 *
583 * If the extent we found extends past our range, we
584 * just split and search again. It'll get split again
585 * the next time though.
586 *
587 * If the extent we found is inside our range, we clear
588 * the desired bit on it.
589 */
590
591 if (state->start < start) {
592 err = split_state(tree, state, prealloc, start);
593 BUG_ON(err == -EEXIST);
594 prealloc = NULL;
595 if (err)
596 goto out;
597 if (state->end <= end) {
598 start = state->end + 1;
599 set |= clear_state_bit(tree, state, bits,
600 wake, delete);
601 } else {
602 start = state->start;
603 }
604 goto search_again;
605 }
606 /*
607 * | ---- desired range ---- |
608 * | state |
609 * We need to split the extent, and clear the bit
610 * on the first half
611 */
612 if (state->start <= end && state->end > end) {
613 err = split_state(tree, state, prealloc, end + 1);
614 BUG_ON(err == -EEXIST);
615
616 if (wake)
617 wake_up(&state->wq);
618 set |= clear_state_bit(tree, prealloc, bits,
619 wake, delete);
620 prealloc = NULL;
621 goto out;
622 }
623
624 start = state->end + 1;
625 set |= clear_state_bit(tree, state, bits, wake, delete);
626 goto search_again;
627
628out:
629 write_unlock_irqrestore(&tree->lock, flags);
630 if (prealloc)
631 free_extent_state(prealloc);
632
633 return set;
634
635search_again:
636 if (start > end)
637 goto out;
638 write_unlock_irqrestore(&tree->lock, flags);
639 if (mask & __GFP_WAIT)
640 cond_resched();
641 goto again;
642}
643EXPORT_SYMBOL(clear_extent_bit);
644
645static int wait_on_state(struct extent_map_tree *tree,
646 struct extent_state *state)
647{
648 DEFINE_WAIT(wait);
649 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
650 read_unlock_irq(&tree->lock);
651 schedule();
652 read_lock_irq(&tree->lock);
653 finish_wait(&state->wq, &wait);
654 return 0;
655}
656
657/*
658 * waits for one or more bits to clear on a range in the state tree.
659 * The range [start, end] is inclusive.
660 * The tree lock is taken by this function
661 */
662int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
663{
664 struct extent_state *state;
665 struct rb_node *node;
666
667 read_lock_irq(&tree->lock);
668again:
669 while (1) {
670 /*
671 * this search will find all the extents that end after
672 * our range starts
673 */
674 node = tree_search(&tree->state, start);
675 if (!node)
676 break;
677
678 state = rb_entry(node, struct extent_state, rb_node);
679
680 if (state->start > end)
681 goto out;
682
683 if (state->state & bits) {
684 start = state->start;
685 atomic_inc(&state->refs);
686 wait_on_state(tree, state);
687 free_extent_state(state);
688 goto again;
689 }
690 start = state->end + 1;
691
692 if (start > end)
693 break;
694
695 if (need_resched()) {
696 read_unlock_irq(&tree->lock);
697 cond_resched();
698 read_lock_irq(&tree->lock);
699 }
700 }
701out:
702 read_unlock_irq(&tree->lock);
703 return 0;
704}
705EXPORT_SYMBOL(wait_extent_bit);
706
707static void set_state_bits(struct extent_map_tree *tree,
708 struct extent_state *state,
709 int bits)
710{
711 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
712 u64 range = state->end - state->start + 1;
713 tree->dirty_bytes += range;
714 }
715 state->state |= bits;
716}
717
718/*
719 * set some bits on a range in the tree. This may require allocations
720 * or sleeping, so the gfp mask is used to indicate what is allowed.
721 *
722 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
723 * range already has the desired bits set. The start of the existing
724 * range is returned in failed_start in this case.
725 *
726 * [start, end] is inclusive
727 * This takes the tree lock.
728 */
729int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
730 int exclusive, u64 *failed_start, gfp_t mask)
731{
732 struct extent_state *state;
733 struct extent_state *prealloc = NULL;
734 struct rb_node *node;
735 unsigned long flags;
736 int err = 0;
737 int set;
738 u64 last_start;
739 u64 last_end;
740again:
741 if (!prealloc && (mask & __GFP_WAIT)) {
742 prealloc = alloc_extent_state(mask);
743 if (!prealloc)
744 return -ENOMEM;
745 }
746
747 write_lock_irqsave(&tree->lock, flags);
748 /*
749 * this search will find all the extents that end after
750 * our range starts.
751 */
752 node = tree_search(&tree->state, start);
753 if (!node) {
754 err = insert_state(tree, prealloc, start, end, bits);
755 prealloc = NULL;
756 BUG_ON(err == -EEXIST);
757 goto out;
758 }
759
760 state = rb_entry(node, struct extent_state, rb_node);
761 last_start = state->start;
762 last_end = state->end;
763
764 /*
765 * | ---- desired range ---- |
766 * | state |
767 *
768 * Just lock what we found and keep going
769 */
770 if (state->start == start && state->end <= end) {
771 set = state->state & bits;
772 if (set && exclusive) {
773 *failed_start = state->start;
774 err = -EEXIST;
775 goto out;
776 }
777 set_state_bits(tree, state, bits);
778 start = state->end + 1;
779 merge_state(tree, state);
780 goto search_again;
781 }
782
783 /*
784 * | ---- desired range ---- |
785 * | state |
786 * or
787 * | ------------- state -------------- |
788 *
789 * We need to split the extent we found, and may flip bits on
790 * second half.
791 *
792 * If the extent we found extends past our
793 * range, we just split and search again. It'll get split
794 * again the next time though.
795 *
796 * If the extent we found is inside our range, we set the
797 * desired bit on it.
798 */
799 if (state->start < start) {
800 set = state->state & bits;
801 if (exclusive && set) {
802 *failed_start = start;
803 err = -EEXIST;
804 goto out;
805 }
806 err = split_state(tree, state, prealloc, start);
807 BUG_ON(err == -EEXIST);
808 prealloc = NULL;
809 if (err)
810 goto out;
811 if (state->end <= end) {
812 set_state_bits(tree, state, bits);
813 start = state->end + 1;
814 merge_state(tree, state);
815 } else {
816 start = state->start;
817 }
818 goto search_again;
819 }
820 /*
821 * | ---- desired range ---- |
822 * | state | or | state |
823 *
824 * There's a hole, we need to insert something in it and
825 * ignore the extent we found.
826 */
827 if (state->start > start) {
828 u64 this_end;
829 if (end < last_start)
830 this_end = end;
831 else
832 this_end = last_start -1;
833 err = insert_state(tree, prealloc, start, this_end,
834 bits);
835 prealloc = NULL;
836 BUG_ON(err == -EEXIST);
837 if (err)
838 goto out;
839 start = this_end + 1;
840 goto search_again;
841 }
842 /*
843 * | ---- desired range ---- |
844 * | state |
845 * We need to split the extent, and set the bit
846 * on the first half
847 */
848 if (state->start <= end && state->end > end) {
849 set = state->state & bits;
850 if (exclusive && set) {
851 *failed_start = start;
852 err = -EEXIST;
853 goto out;
854 }
855 err = split_state(tree, state, prealloc, end + 1);
856 BUG_ON(err == -EEXIST);
857
858 set_state_bits(tree, prealloc, bits);
859 merge_state(tree, prealloc);
860 prealloc = NULL;
861 goto out;
862 }
863
864 goto search_again;
865
866out:
867 write_unlock_irqrestore(&tree->lock, flags);
868 if (prealloc)
869 free_extent_state(prealloc);
870
871 return err;
872
873search_again:
874 if (start > end)
875 goto out;
876 write_unlock_irqrestore(&tree->lock, flags);
877 if (mask & __GFP_WAIT)
878 cond_resched();
879 goto again;
880}
881EXPORT_SYMBOL(set_extent_bit);
882
883/* wrappers around set/clear extent bit */
884int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
885 gfp_t mask)
886{
887 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
888 mask);
889}
890EXPORT_SYMBOL(set_extent_dirty);
891
892int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
893 int bits, gfp_t mask)
894{
895 return set_extent_bit(tree, start, end, bits, 0, NULL,
896 mask);
897}
898EXPORT_SYMBOL(set_extent_bits);
899
900int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
901 int bits, gfp_t mask)
902{
903 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
904}
905EXPORT_SYMBOL(clear_extent_bits);
906
907int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
908 gfp_t mask)
909{
910 return set_extent_bit(tree, start, end,
911 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
912 mask);
913}
914EXPORT_SYMBOL(set_extent_delalloc);
915
916int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
917 gfp_t mask)
918{
919 return clear_extent_bit(tree, start, end,
920 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
921}
922EXPORT_SYMBOL(clear_extent_dirty);
923
924int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
925 gfp_t mask)
926{
927 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
928 mask);
929}
930EXPORT_SYMBOL(set_extent_new);
931
932int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
933 gfp_t mask)
934{
935 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
936}
937EXPORT_SYMBOL(clear_extent_new);
938
939int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
940 gfp_t mask)
941{
942 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
943 mask);
944}
945EXPORT_SYMBOL(set_extent_uptodate);
946
947int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
948 gfp_t mask)
949{
950 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
951}
952EXPORT_SYMBOL(clear_extent_uptodate);
953
954int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
955 gfp_t mask)
956{
957 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
958 0, NULL, mask);
959}
960EXPORT_SYMBOL(set_extent_writeback);
961
962int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
963 gfp_t mask)
964{
965 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
966}
967EXPORT_SYMBOL(clear_extent_writeback);
968
969int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
970{
971 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
972}
973EXPORT_SYMBOL(wait_on_extent_writeback);
974
975/*
976 * locks a range in ascending order, waiting for any locked regions
977 * it hits on the way. [start,end] are inclusive, and this will sleep.
978 */
979int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
980{
981 int err;
982 u64 failed_start;
983 while (1) {
984 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
985 &failed_start, mask);
986 if (err == -EEXIST && (mask & __GFP_WAIT)) {
987 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
988 start = failed_start;
989 } else {
990 break;
991 }
992 WARN_ON(start > end);
993 }
994 return err;
995}
996EXPORT_SYMBOL(lock_extent);
997
998int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
999 gfp_t mask)
1000{
1001 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
1002}
1003EXPORT_SYMBOL(unlock_extent);
1004
1005/*
1006 * helper function to set pages and extents in the tree dirty
1007 */
1008int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
1009{
1010 unsigned long index = start >> PAGE_CACHE_SHIFT;
1011 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1012 struct page *page;
1013
1014 while (index <= end_index) {
1015 page = find_get_page(tree->mapping, index);
1016 BUG_ON(!page);
1017 __set_page_dirty_nobuffers(page);
1018 page_cache_release(page);
1019 index++;
1020 }
1021 set_extent_dirty(tree, start, end, GFP_NOFS);
1022 return 0;
1023}
1024EXPORT_SYMBOL(set_range_dirty);
1025
1026/*
1027 * helper function to set both pages and extents in the tree writeback
1028 */
1029int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
1030{
1031 unsigned long index = start >> PAGE_CACHE_SHIFT;
1032 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1033 struct page *page;
1034
1035 while (index <= end_index) {
1036 page = find_get_page(tree->mapping, index);
1037 BUG_ON(!page);
1038 set_page_writeback(page);
1039 page_cache_release(page);
1040 index++;
1041 }
1042 set_extent_writeback(tree, start, end, GFP_NOFS);
1043 return 0;
1044}
1045EXPORT_SYMBOL(set_range_writeback);
1046
1047int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1048 u64 *start_ret, u64 *end_ret, int bits)
1049{
1050 struct rb_node *node;
1051 struct extent_state *state;
1052 int ret = 1;
1053
1054 read_lock_irq(&tree->lock);
1055 /*
1056 * this search will find all the extents that end after
1057 * our range starts.
1058 */
1059 node = tree_search(&tree->state, start);
1060 if (!node || IS_ERR(node)) {
1061 goto out;
1062 }
1063
1064 while(1) {
1065 state = rb_entry(node, struct extent_state, rb_node);
1066 if (state->end >= start && (state->state & bits)) {
1067 *start_ret = state->start;
1068 *end_ret = state->end;
1069 ret = 0;
1070 break;
1071 }
1072 node = rb_next(node);
1073 if (!node)
1074 break;
1075 }
1076out:
1077 read_unlock_irq(&tree->lock);
1078 return ret;
1079}
1080EXPORT_SYMBOL(find_first_extent_bit);
1081
1082u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1083 u64 *start, u64 *end, u64 max_bytes)
1084{
1085 struct rb_node *node;
1086 struct extent_state *state;
1087 u64 cur_start = *start;
1088 u64 found = 0;
1089 u64 total_bytes = 0;
1090
1091 write_lock_irq(&tree->lock);
1092 /*
1093 * this search will find all the extents that end after
1094 * our range starts.
1095 */
1096search_again:
1097 node = tree_search(&tree->state, cur_start);
1098 if (!node || IS_ERR(node)) {
1099 *end = (u64)-1;
1100 goto out;
1101 }
1102
1103 while(1) {
1104 state = rb_entry(node, struct extent_state, rb_node);
1105 if (found && state->start != cur_start) {
1106 goto out;
1107 }
1108 if (!(state->state & EXTENT_DELALLOC)) {
1109 if (!found)
1110 *end = state->end;
1111 goto out;
1112 }
1113 if (!found) {
1114 struct extent_state *prev_state;
1115 struct rb_node *prev_node = node;
1116 while(1) {
1117 prev_node = rb_prev(prev_node);
1118 if (!prev_node)
1119 break;
1120 prev_state = rb_entry(prev_node,
1121 struct extent_state,
1122 rb_node);
1123 if (!(prev_state->state & EXTENT_DELALLOC))
1124 break;
1125 state = prev_state;
1126 node = prev_node;
1127 }
1128 }
1129 if (state->state & EXTENT_LOCKED) {
1130 DEFINE_WAIT(wait);
1131 atomic_inc(&state->refs);
1132 prepare_to_wait(&state->wq, &wait,
1133 TASK_UNINTERRUPTIBLE);
1134 write_unlock_irq(&tree->lock);
1135 schedule();
1136 write_lock_irq(&tree->lock);
1137 finish_wait(&state->wq, &wait);
1138 free_extent_state(state);
1139 goto search_again;
1140 }
1141 state->state |= EXTENT_LOCKED;
1142 if (!found)
1143 *start = state->start;
1144 found++;
1145 *end = state->end;
1146 cur_start = state->end + 1;
1147 node = rb_next(node);
1148 if (!node)
1149 break;
1150 total_bytes += state->end - state->start + 1;
1151 if (total_bytes >= max_bytes)
1152 break;
1153 }
1154out:
1155 write_unlock_irq(&tree->lock);
1156 return found;
1157}
1158
1159u64 count_range_bits(struct extent_map_tree *tree,
1160 u64 *start, u64 search_end, u64 max_bytes,
1161 unsigned long bits)
1162{
1163 struct rb_node *node;
1164 struct extent_state *state;
1165 u64 cur_start = *start;
1166 u64 total_bytes = 0;
1167 int found = 0;
1168
1169 if (search_end <= cur_start) {
1170 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1171 WARN_ON(1);
1172 return 0;
1173 }
1174
1175 write_lock_irq(&tree->lock);
1176 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1177 total_bytes = tree->dirty_bytes;
1178 goto out;
1179 }
1180 /*
1181 * this search will find all the extents that end after
1182 * our range starts.
1183 */
1184 node = tree_search(&tree->state, cur_start);
1185 if (!node || IS_ERR(node)) {
1186 goto out;
1187 }
1188
1189 while(1) {
1190 state = rb_entry(node, struct extent_state, rb_node);
1191 if (state->start > search_end)
1192 break;
1193 if (state->end >= cur_start && (state->state & bits)) {
1194 total_bytes += min(search_end, state->end) + 1 -
1195 max(cur_start, state->start);
1196 if (total_bytes >= max_bytes)
1197 break;
1198 if (!found) {
1199 *start = state->start;
1200 found = 1;
1201 }
1202 }
1203 node = rb_next(node);
1204 if (!node)
1205 break;
1206 }
1207out:
1208 write_unlock_irq(&tree->lock);
1209 return total_bytes;
1210}
1211/*
1212 * helper function to lock both pages and extents in the tree.
1213 * pages must be locked first.
1214 */
1215int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1216{
1217 unsigned long index = start >> PAGE_CACHE_SHIFT;
1218 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1219 struct page *page;
1220 int err;
1221
1222 while (index <= end_index) {
1223 page = grab_cache_page(tree->mapping, index);
1224 if (!page) {
1225 err = -ENOMEM;
1226 goto failed;
1227 }
1228 if (IS_ERR(page)) {
1229 err = PTR_ERR(page);
1230 goto failed;
1231 }
1232 index++;
1233 }
1234 lock_extent(tree, start, end, GFP_NOFS);
1235 return 0;
1236
1237failed:
1238 /*
1239 * we failed above in getting the page at 'index', so we undo here
1240 * up to but not including the page at 'index'
1241 */
1242 end_index = index;
1243 index = start >> PAGE_CACHE_SHIFT;
1244 while (index < end_index) {
1245 page = find_get_page(tree->mapping, index);
1246 unlock_page(page);
1247 page_cache_release(page);
1248 index++;
1249 }
1250 return err;
1251}
1252EXPORT_SYMBOL(lock_range);
1253
1254/*
1255 * helper function to unlock both pages and extents in the tree.
1256 */
1257int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1258{
1259 unsigned long index = start >> PAGE_CACHE_SHIFT;
1260 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1261 struct page *page;
1262
1263 while (index <= end_index) {
1264 page = find_get_page(tree->mapping, index);
1265 unlock_page(page);
1266 page_cache_release(page);
1267 index++;
1268 }
1269 unlock_extent(tree, start, end, GFP_NOFS);
1270 return 0;
1271}
1272EXPORT_SYMBOL(unlock_range);
1273
1274int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1275{
1276 struct rb_node *node;
1277 struct extent_state *state;
1278 int ret = 0;
1279
1280 write_lock_irq(&tree->lock);
1281 /*
1282 * this search will find all the extents that end after
1283 * our range starts.
1284 */
1285 node = tree_search(&tree->state, start);
1286 if (!node || IS_ERR(node)) {
1287 ret = -ENOENT;
1288 goto out;
1289 }
1290 state = rb_entry(node, struct extent_state, rb_node);
1291 if (state->start != start) {
1292 ret = -ENOENT;
1293 goto out;
1294 }
1295 state->private = private;
1296out:
1297 write_unlock_irq(&tree->lock);
1298 return ret;
1299}
1300
1301int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1302{
1303 struct rb_node *node;
1304 struct extent_state *state;
1305 int ret = 0;
1306
1307 read_lock_irq(&tree->lock);
1308 /*
1309 * this search will find all the extents that end after
1310 * our range starts.
1311 */
1312 node = tree_search(&tree->state, start);
1313 if (!node || IS_ERR(node)) {
1314 ret = -ENOENT;
1315 goto out;
1316 }
1317 state = rb_entry(node, struct extent_state, rb_node);
1318 if (state->start != start) {
1319 ret = -ENOENT;
1320 goto out;
1321 }
1322 *private = state->private;
1323out:
1324 read_unlock_irq(&tree->lock);
1325 return ret;
1326}
1327
1328/*
1329 * searches a range in the state tree for a given mask.
1330 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1331 * has the bits set. Otherwise, 1 is returned if any bit in the
1332 * range is found set.
1333 */
1334int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1335 int bits, int filled)
1336{
1337 struct extent_state *state = NULL;
1338 struct rb_node *node;
1339 int bitset = 0;
1340
1341 read_lock_irq(&tree->lock);
1342 node = tree_search(&tree->state, start);
1343 while (node && start <= end) {
1344 state = rb_entry(node, struct extent_state, rb_node);
1345
1346 if (filled && state->start > start) {
1347 bitset = 0;
1348 break;
1349 }
1350
1351 if (state->start > end)
1352 break;
1353
1354 if (state->state & bits) {
1355 bitset = 1;
1356 if (!filled)
1357 break;
1358 } else if (filled) {
1359 bitset = 0;
1360 break;
1361 }
1362 start = state->end + 1;
1363 if (start > end)
1364 break;
1365 node = rb_next(node);
1366 if (!node) {
1367 if (filled)
1368 bitset = 0;
1369 break;
1370 }
1371 }
1372 read_unlock_irq(&tree->lock);
1373 return bitset;
1374}
1375EXPORT_SYMBOL(test_range_bit);
1376
1377/*
1378 * helper function to set a given page up to date if all the
1379 * extents in the tree for that page are up to date
1380 */
1381static int check_page_uptodate(struct extent_map_tree *tree,
1382 struct page *page)
1383{
1384 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1385 u64 end = start + PAGE_CACHE_SIZE - 1;
1386 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1387 SetPageUptodate(page);
1388 return 0;
1389}
1390
1391/*
1392 * helper function to unlock a page if all the extents in the tree
1393 * for that page are unlocked
1394 */
1395static int check_page_locked(struct extent_map_tree *tree,
1396 struct page *page)
1397{
1398 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1399 u64 end = start + PAGE_CACHE_SIZE - 1;
1400 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1401 unlock_page(page);
1402 return 0;
1403}
1404
1405/*
1406 * helper function to end page writeback if all the extents
1407 * in the tree for that page are done with writeback
1408 */
1409static int check_page_writeback(struct extent_map_tree *tree,
1410 struct page *page)
1411{
1412 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1413 u64 end = start + PAGE_CACHE_SIZE - 1;
1414 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1415 end_page_writeback(page);
1416 return 0;
1417}
1418
1419/* lots and lots of room for performance fixes in the end_bio funcs */
1420
1421/*
1422 * after a writepage IO is done, we need to:
1423 * clear the uptodate bits on error
1424 * clear the writeback bits in the extent tree for this IO
1425 * end_page_writeback if the page has no more pending IO
1426 *
1427 * Scheduling is not allowed, so the extent state tree is expected
1428 * to have one and only one object corresponding to this IO.
1429 */
1430#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1431static void end_bio_extent_writepage(struct bio *bio, int err)
1432#else
1433static int end_bio_extent_writepage(struct bio *bio,
1434 unsigned int bytes_done, int err)
1435#endif
1436{
1437 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1438 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1439 struct extent_map_tree *tree = bio->bi_private;
1440 u64 start;
1441 u64 end;
1442 int whole_page;
1443
1444#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1445 if (bio->bi_size)
1446 return 1;
1447#endif
1448
1449 do {
1450 struct page *page = bvec->bv_page;
1451 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1452 bvec->bv_offset;
1453 end = start + bvec->bv_len - 1;
1454
1455 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1456 whole_page = 1;
1457 else
1458 whole_page = 0;
1459
1460 if (--bvec >= bio->bi_io_vec)
1461 prefetchw(&bvec->bv_page->flags);
1462
1463 if (!uptodate) {
1464 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1465 ClearPageUptodate(page);
1466 SetPageError(page);
1467 }
1468 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1469
1470 if (whole_page)
1471 end_page_writeback(page);
1472 else
1473 check_page_writeback(tree, page);
1474 if (tree->ops && tree->ops->writepage_end_io_hook)
1475 tree->ops->writepage_end_io_hook(page, start, end);
1476 } while (bvec >= bio->bi_io_vec);
1477
1478 bio_put(bio);
1479#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1480 return 0;
1481#endif
1482}
1483
1484/*
1485 * after a readpage IO is done, we need to:
1486 * clear the uptodate bits on error
1487 * set the uptodate bits if things worked
1488 * set the page up to date if all extents in the tree are uptodate
1489 * clear the lock bit in the extent tree
1490 * unlock the page if there are no other extents locked for it
1491 *
1492 * Scheduling is not allowed, so the extent state tree is expected
1493 * to have one and only one object corresponding to this IO.
1494 */
1495#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1496static void end_bio_extent_readpage(struct bio *bio, int err)
1497#else
1498static int end_bio_extent_readpage(struct bio *bio,
1499 unsigned int bytes_done, int err)
1500#endif
1501{
1502 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1503 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1504 struct extent_map_tree *tree = bio->bi_private;
1505 u64 start;
1506 u64 end;
1507 int whole_page;
1508 int ret;
1509
1510#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1511 if (bio->bi_size)
1512 return 1;
1513#endif
1514
1515 do {
1516 struct page *page = bvec->bv_page;
1517 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1518 bvec->bv_offset;
1519 end = start + bvec->bv_len - 1;
1520
1521 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1522 whole_page = 1;
1523 else
1524 whole_page = 0;
1525
1526 if (--bvec >= bio->bi_io_vec)
1527 prefetchw(&bvec->bv_page->flags);
1528
1529 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1530 ret = tree->ops->readpage_end_io_hook(page, start, end);
1531 if (ret)
1532 uptodate = 0;
1533 }
1534 if (uptodate) {
1535 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1536 if (whole_page)
1537 SetPageUptodate(page);
1538 else
1539 check_page_uptodate(tree, page);
1540 } else {
1541 ClearPageUptodate(page);
1542 SetPageError(page);
1543 }
1544
1545 unlock_extent(tree, start, end, GFP_ATOMIC);
1546
1547 if (whole_page)
1548 unlock_page(page);
1549 else
1550 check_page_locked(tree, page);
1551 } while (bvec >= bio->bi_io_vec);
1552
1553 bio_put(bio);
1554#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1555 return 0;
1556#endif
1557}
1558
1559/*
1560 * IO done from prepare_write is pretty simple, we just unlock
1561 * the structs in the extent tree when done, and set the uptodate bits
1562 * as appropriate.
1563 */
1564#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1565static void end_bio_extent_preparewrite(struct bio *bio, int err)
1566#else
1567static int end_bio_extent_preparewrite(struct bio *bio,
1568 unsigned int bytes_done, int err)
1569#endif
1570{
1571 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1572 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1573 struct extent_map_tree *tree = bio->bi_private;
1574 u64 start;
1575 u64 end;
1576
1577#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1578 if (bio->bi_size)
1579 return 1;
1580#endif
1581
1582 do {
1583 struct page *page = bvec->bv_page;
1584 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1585 bvec->bv_offset;
1586 end = start + bvec->bv_len - 1;
1587
1588 if (--bvec >= bio->bi_io_vec)
1589 prefetchw(&bvec->bv_page->flags);
1590
1591 if (uptodate) {
1592 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1593 } else {
1594 ClearPageUptodate(page);
1595 SetPageError(page);
1596 }
1597
1598 unlock_extent(tree, start, end, GFP_ATOMIC);
1599
1600 } while (bvec >= bio->bi_io_vec);
1601
1602 bio_put(bio);
1603#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1604 return 0;
1605#endif
1606}
1607
1608static struct bio *
1609extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1610 gfp_t gfp_flags)
1611{
1612 struct bio *bio;
1613
1614 bio = bio_alloc(gfp_flags, nr_vecs);
1615
1616 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1617 while (!bio && (nr_vecs /= 2))
1618 bio = bio_alloc(gfp_flags, nr_vecs);
1619 }
1620
1621 if (bio) {
1622 bio->bi_bdev = bdev;
1623 bio->bi_sector = first_sector;
1624 }
1625 return bio;
1626}
1627
1628static int submit_one_bio(int rw, struct bio *bio)
1629{
1630 u64 maxsector;
1631 int ret = 0;
1632
1633 bio_get(bio);
1634
1635 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1636 if (maxsector < bio->bi_sector) {
1637 printk("sector too large max %Lu got %llu\n", maxsector,
1638 (unsigned long long)bio->bi_sector);
1639 WARN_ON(1);
1640 }
1641
1642 submit_bio(rw, bio);
1643 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1644 ret = -EOPNOTSUPP;
1645 bio_put(bio);
1646 return ret;
1647}
1648
1649static int submit_extent_page(int rw, struct extent_map_tree *tree,
1650 struct page *page, sector_t sector,
1651 size_t size, unsigned long offset,
1652 struct block_device *bdev,
1653 struct bio **bio_ret,
1654 unsigned long max_pages,
1655 bio_end_io_t end_io_func)
1656{
1657 int ret = 0;
1658 struct bio *bio;
1659 int nr;
1660
1661 if (bio_ret && *bio_ret) {
1662 bio = *bio_ret;
1663 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1664 bio_add_page(bio, page, size, offset) < size) {
1665 ret = submit_one_bio(rw, bio);
1666 bio = NULL;
1667 } else {
1668 return 0;
1669 }
1670 }
1671 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1672 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1673 if (!bio) {
1674 printk("failed to allocate bio nr %d\n", nr);
1675 }
1676 bio_add_page(bio, page, size, offset);
1677 bio->bi_end_io = end_io_func;
1678 bio->bi_private = tree;
1679 if (bio_ret) {
1680 *bio_ret = bio;
1681 } else {
1682 ret = submit_one_bio(rw, bio);
1683 }
1684
1685 return ret;
1686}
1687
1688void set_page_extent_mapped(struct page *page)
1689{
1690 if (!PagePrivate(page)) {
1691 SetPagePrivate(page);
1692 WARN_ON(!page->mapping->a_ops->invalidatepage);
1693 set_page_private(page, EXTENT_PAGE_PRIVATE);
1694 page_cache_get(page);
1695 }
1696}
1697
1698void set_page_extent_head(struct page *page, unsigned long len)
1699{
1700 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1701}
1702
1703/*
1704 * basic readpage implementation. Locked extent state structs are inserted
1705 * into the tree that are removed when the IO is done (by the end_io
1706 * handlers)
1707 */
1708static int __extent_read_full_page(struct extent_map_tree *tree,
1709 struct page *page,
1710 get_extent_t *get_extent,
1711 struct bio **bio)
1712{
1713 struct inode *inode = page->mapping->host;
1714 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1715 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1716 u64 end;
1717 u64 cur = start;
1718 u64 extent_offset;
1719 u64 last_byte = i_size_read(inode);
1720 u64 block_start;
1721 u64 cur_end;
1722 sector_t sector;
1723 struct extent_map *em;
1724 struct block_device *bdev;
1725 int ret;
1726 int nr = 0;
1727 size_t page_offset = 0;
1728 size_t iosize;
1729 size_t blocksize = inode->i_sb->s_blocksize;
1730
1731 set_page_extent_mapped(page);
1732
1733 end = page_end;
1734 lock_extent(tree, start, end, GFP_NOFS);
1735
1736 while (cur <= end) {
1737 if (cur >= last_byte) {
1738 char *userpage;
1739 iosize = PAGE_CACHE_SIZE - page_offset;
1740 userpage = kmap_atomic(page, KM_USER0);
1741 memset(userpage + page_offset, 0, iosize);
1742 flush_dcache_page(page);
1743 kunmap_atomic(userpage, KM_USER0);
1744 set_extent_uptodate(tree, cur, cur + iosize - 1,
1745 GFP_NOFS);
1746 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1747 break;
1748 }
1749 em = get_extent(inode, page, page_offset, cur, end, 0);
1750 if (IS_ERR(em) || !em) {
1751 SetPageError(page);
1752 unlock_extent(tree, cur, end, GFP_NOFS);
1753 break;
1754 }
1755
1756 extent_offset = cur - em->start;
1757 BUG_ON(em->end < cur);
1758 BUG_ON(end < cur);
1759
1760 iosize = min(em->end - cur, end - cur) + 1;
1761 cur_end = min(em->end, end);
1762 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1763 sector = (em->block_start + extent_offset) >> 9;
1764 bdev = em->bdev;
1765 block_start = em->block_start;
1766 free_extent_map(em);
1767 em = NULL;
1768
1769 /* we've found a hole, just zero and go on */
1770 if (block_start == EXTENT_MAP_HOLE) {
1771 char *userpage;
1772 userpage = kmap_atomic(page, KM_USER0);
1773 memset(userpage + page_offset, 0, iosize);
1774 flush_dcache_page(page);
1775 kunmap_atomic(userpage, KM_USER0);
1776
1777 set_extent_uptodate(tree, cur, cur + iosize - 1,
1778 GFP_NOFS);
1779 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1780 cur = cur + iosize;
1781 page_offset += iosize;
1782 continue;
1783 }
1784 /* the get_extent function already copied into the page */
1785 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1786 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1787 cur = cur + iosize;
1788 page_offset += iosize;
1789 continue;
1790 }
1791
1792 ret = 0;
1793 if (tree->ops && tree->ops->readpage_io_hook) {
1794 ret = tree->ops->readpage_io_hook(page, cur,
1795 cur + iosize - 1);
1796 }
1797 if (!ret) {
1798 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1799 nr -= page->index;
1800 ret = submit_extent_page(READ, tree, page,
1801 sector, iosize, page_offset,
1802 bdev, bio, nr,
1803 end_bio_extent_readpage);
1804 }
1805 if (ret)
1806 SetPageError(page);
1807 cur = cur + iosize;
1808 page_offset += iosize;
1809 nr++;
1810 }
1811 if (!nr) {
1812 if (!PageError(page))
1813 SetPageUptodate(page);
1814 unlock_page(page);
1815 }
1816 return 0;
1817}
1818
1819int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1820 get_extent_t *get_extent)
1821{
1822 struct bio *bio = NULL;
1823 int ret;
1824
1825 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1826 if (bio)
1827 submit_one_bio(READ, bio);
1828 return ret;
1829}
1830EXPORT_SYMBOL(extent_read_full_page);
1831
1832/*
1833 * the writepage semantics are similar to regular writepage. extent
1834 * records are inserted to lock ranges in the tree, and as dirty areas
1835 * are found, they are marked writeback. Then the lock bits are removed
1836 * and the end_io handler clears the writeback ranges
1837 */
1838static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1839 void *data)
1840{
1841 struct inode *inode = page->mapping->host;
1842 struct extent_page_data *epd = data;
1843 struct extent_map_tree *tree = epd->tree;
1844 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1845 u64 delalloc_start;
1846 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1847 u64 end;
1848 u64 cur = start;
1849 u64 extent_offset;
1850 u64 last_byte = i_size_read(inode);
1851 u64 block_start;
1852 u64 iosize;
1853 sector_t sector;
1854 struct extent_map *em;
1855 struct block_device *bdev;
1856 int ret;
1857 int nr = 0;
1858 size_t page_offset = 0;
1859 size_t blocksize;
1860 loff_t i_size = i_size_read(inode);
1861 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1862 u64 nr_delalloc;
1863 u64 delalloc_end;
1864
1865 WARN_ON(!PageLocked(page));
1866 if (page->index > end_index) {
1867 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1868 unlock_page(page);
1869 return 0;
1870 }
1871
1872 if (page->index == end_index) {
1873 char *userpage;
1874
1875 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1876
1877 userpage = kmap_atomic(page, KM_USER0);
1878 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1879 flush_dcache_page(page);
1880 kunmap_atomic(userpage, KM_USER0);
1881 }
1882
1883 set_page_extent_mapped(page);
1884
1885 delalloc_start = start;
1886 delalloc_end = 0;
1887 while(delalloc_end < page_end) {
1888 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1889 &delalloc_end,
1890 128 * 1024 * 1024);
1891 if (nr_delalloc == 0) {
1892 delalloc_start = delalloc_end + 1;
1893 continue;
1894 }
1895 tree->ops->fill_delalloc(inode, delalloc_start,
1896 delalloc_end);
1897 clear_extent_bit(tree, delalloc_start,
1898 delalloc_end,
1899 EXTENT_LOCKED | EXTENT_DELALLOC,
1900 1, 0, GFP_NOFS);
1901 delalloc_start = delalloc_end + 1;
1902 }
1903 lock_extent(tree, start, page_end, GFP_NOFS);
1904
1905 end = page_end;
1906 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1907 printk("found delalloc bits after lock_extent\n");
1908 }
1909
1910 if (last_byte <= start) {
1911 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1912 goto done;
1913 }
1914
1915 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1916 blocksize = inode->i_sb->s_blocksize;
1917
1918 while (cur <= end) {
1919 if (cur >= last_byte) {
1920 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1921 break;
1922 }
1923 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1924 if (IS_ERR(em) || !em) {
1925 SetPageError(page);
1926 break;
1927 }
1928
1929 extent_offset = cur - em->start;
1930 BUG_ON(em->end < cur);
1931 BUG_ON(end < cur);
1932 iosize = min(em->end - cur, end - cur) + 1;
1933 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1934 sector = (em->block_start + extent_offset) >> 9;
1935 bdev = em->bdev;
1936 block_start = em->block_start;
1937 free_extent_map(em);
1938 em = NULL;
1939
1940 if (block_start == EXTENT_MAP_HOLE ||
1941 block_start == EXTENT_MAP_INLINE) {
1942 clear_extent_dirty(tree, cur,
1943 cur + iosize - 1, GFP_NOFS);
1944 cur = cur + iosize;
1945 page_offset += iosize;
1946 continue;
1947 }
1948
1949 /* leave this out until we have a page_mkwrite call */
1950 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1951 EXTENT_DIRTY, 0)) {
1952 cur = cur + iosize;
1953 page_offset += iosize;
1954 continue;
1955 }
1956 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1957 if (tree->ops && tree->ops->writepage_io_hook) {
1958 ret = tree->ops->writepage_io_hook(page, cur,
1959 cur + iosize - 1);
1960 } else {
1961 ret = 0;
1962 }
1963 if (ret)
1964 SetPageError(page);
1965 else {
1966 unsigned long max_nr = end_index + 1;
1967 set_range_writeback(tree, cur, cur + iosize - 1);
1968 if (!PageWriteback(page)) {
1969 printk("warning page %lu not writeback, "
1970 "cur %llu end %llu\n", page->index,
1971 (unsigned long long)cur,
1972 (unsigned long long)end);
1973 }
1974
1975 ret = submit_extent_page(WRITE, tree, page, sector,
1976 iosize, page_offset, bdev,
1977 &epd->bio, max_nr,
1978 end_bio_extent_writepage);
1979 if (ret)
1980 SetPageError(page);
1981 }
1982 cur = cur + iosize;
1983 page_offset += iosize;
1984 nr++;
1985 }
1986done:
1987 if (nr == 0) {
1988 /* make sure the mapping tag for page dirty gets cleared */
1989 set_page_writeback(page);
1990 end_page_writeback(page);
1991 }
1992 unlock_extent(tree, start, page_end, GFP_NOFS);
1993 unlock_page(page);
1994 return 0;
1995}
1996
1997#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1998
1999/* Taken directly from 2.6.23 for 2.6.18 back port */
2000typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2001 void *data);
2002
2003/**
2004 * write_cache_pages - walk the list of dirty pages of the given address space
2005 * and write all of them.
2006 * @mapping: address space structure to write
2007 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2008 * @writepage: function called for each page
2009 * @data: data passed to writepage function
2010 *
2011 * If a page is already under I/O, write_cache_pages() skips it, even
2012 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2013 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2014 * and msync() need to guarantee that all the data which was dirty at the time
2015 * the call was made get new I/O started against them. If wbc->sync_mode is
2016 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2017 * existing IO to complete.
2018 */
2019static int write_cache_pages(struct address_space *mapping,
2020 struct writeback_control *wbc, writepage_t writepage,
2021 void *data)
2022{
2023 struct backing_dev_info *bdi = mapping->backing_dev_info;
2024 int ret = 0;
2025 int done = 0;
2026 struct pagevec pvec;
2027 int nr_pages;
2028 pgoff_t index;
2029 pgoff_t end; /* Inclusive */
2030 int scanned = 0;
2031 int range_whole = 0;
2032
2033 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2034 wbc->encountered_congestion = 1;
2035 return 0;
2036 }
2037
2038 pagevec_init(&pvec, 0);
2039 if (wbc->range_cyclic) {
2040 index = mapping->writeback_index; /* Start from prev offset */
2041 end = -1;
2042 } else {
2043 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2044 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2045 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2046 range_whole = 1;
2047 scanned = 1;
2048 }
2049retry:
2050 while (!done && (index <= end) &&
2051 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2052 PAGECACHE_TAG_DIRTY,
2053 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2054 unsigned i;
2055
2056 scanned = 1;
2057 for (i = 0; i < nr_pages; i++) {
2058 struct page *page = pvec.pages[i];
2059
2060 /*
2061 * At this point we hold neither mapping->tree_lock nor
2062 * lock on the page itself: the page may be truncated or
2063 * invalidated (changing page->mapping to NULL), or even
2064 * swizzled back from swapper_space to tmpfs file
2065 * mapping
2066 */
2067 lock_page(page);
2068
2069 if (unlikely(page->mapping != mapping)) {
2070 unlock_page(page);
2071 continue;
2072 }
2073
2074 if (!wbc->range_cyclic && page->index > end) {
2075 done = 1;
2076 unlock_page(page);
2077 continue;
2078 }
2079
2080 if (wbc->sync_mode != WB_SYNC_NONE)
2081 wait_on_page_writeback(page);
2082
2083 if (PageWriteback(page) ||
2084 !clear_page_dirty_for_io(page)) {
2085 unlock_page(page);
2086 continue;
2087 }
2088
2089 ret = (*writepage)(page, wbc, data);
2090
2091 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2092 unlock_page(page);
2093 ret = 0;
2094 }
2095 if (ret || (--(wbc->nr_to_write) <= 0))
2096 done = 1;
2097 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2098 wbc->encountered_congestion = 1;
2099 done = 1;
2100 }
2101 }
2102 pagevec_release(&pvec);
2103 cond_resched();
2104 }
2105 if (!scanned && !done) {
2106 /*
2107 * We hit the last page and there is more work to be done: wrap
2108 * back to the start of the file
2109 */
2110 scanned = 1;
2111 index = 0;
2112 goto retry;
2113 }
2114 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2115 mapping->writeback_index = index;
2116 return ret;
2117}
2118#endif
2119
2120int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
2121 get_extent_t *get_extent,
2122 struct writeback_control *wbc)
2123{
2124 int ret;
2125 struct address_space *mapping = page->mapping;
2126 struct extent_page_data epd = {
2127 .bio = NULL,
2128 .tree = tree,
2129 .get_extent = get_extent,
2130 };
2131 struct writeback_control wbc_writepages = {
2132 .bdi = wbc->bdi,
2133 .sync_mode = WB_SYNC_NONE,
2134 .older_than_this = NULL,
2135 .nr_to_write = 64,
2136 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2137 .range_end = (loff_t)-1,
2138 };
2139
2140
2141 ret = __extent_writepage(page, wbc, &epd);
2142
2143 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2144 if (epd.bio) {
2145 submit_one_bio(WRITE, epd.bio);
2146 }
2147 return ret;
2148}
2149EXPORT_SYMBOL(extent_write_full_page);
2150
2151
2152int extent_writepages(struct extent_map_tree *tree,
2153 struct address_space *mapping,
2154 get_extent_t *get_extent,
2155 struct writeback_control *wbc)
2156{
2157 int ret = 0; 276 int ret = 0;
2158 struct extent_page_data epd = {
2159 .bio = NULL,
2160 .tree = tree,
2161 .get_extent = get_extent,
2162 };
2163
2164 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2165 if (epd.bio) {
2166 submit_one_bio(WRITE, epd.bio);
2167 }
2168 return ret;
2169}
2170EXPORT_SYMBOL(extent_writepages);
2171
2172int extent_readpages(struct extent_map_tree *tree,
2173 struct address_space *mapping,
2174 struct list_head *pages, unsigned nr_pages,
2175 get_extent_t get_extent)
2176{
2177 struct bio *bio = NULL;
2178 unsigned page_idx;
2179 struct pagevec pvec;
2180
2181 pagevec_init(&pvec, 0);
2182 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2183 struct page *page = list_entry(pages->prev, struct page, lru);
2184
2185 prefetchw(&page->flags);
2186 list_del(&page->lru);
2187 /*
2188 * what we want to do here is call add_to_page_cache_lru,
2189 * but that isn't exported, so we reproduce it here
2190 */
2191 if (!add_to_page_cache(page, mapping,
2192 page->index, GFP_KERNEL)) {
2193
2194 /* open coding of lru_cache_add, also not exported */
2195 page_cache_get(page);
2196 if (!pagevec_add(&pvec, page))
2197 __pagevec_lru_add(&pvec);
2198 __extent_read_full_page(tree, page, get_extent, &bio);
2199 }
2200 page_cache_release(page);
2201 }
2202 if (pagevec_count(&pvec))
2203 __pagevec_lru_add(&pvec);
2204 BUG_ON(!list_empty(pages));
2205 if (bio)
2206 submit_one_bio(READ, bio);
2207 return 0;
2208}
2209EXPORT_SYMBOL(extent_readpages);
2210
2211/*
2212 * basic invalidatepage code, this waits on any locked or writeback
2213 * ranges corresponding to the page, and then deletes any extent state
2214 * records from the tree
2215 */
2216int extent_invalidatepage(struct extent_map_tree *tree,
2217 struct page *page, unsigned long offset)
2218{
2219 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2220 u64 end = start + PAGE_CACHE_SIZE - 1;
2221 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2222
2223 start += (offset + blocksize -1) & ~(blocksize - 1);
2224 if (start > end)
2225 return 0;
2226
2227 lock_extent(tree, start, end, GFP_NOFS);
2228 wait_on_extent_writeback(tree, start, end);
2229 clear_extent_bit(tree, start, end,
2230 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2231 1, 1, GFP_NOFS);
2232 return 0;
2233}
2234EXPORT_SYMBOL(extent_invalidatepage);
2235
2236/*
2237 * simple commit_write call, set_range_dirty is used to mark both
2238 * the pages and the extent records as dirty
2239 */
2240int extent_commit_write(struct extent_map_tree *tree,
2241 struct inode *inode, struct page *page,
2242 unsigned from, unsigned to)
2243{
2244 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2245
2246 set_page_extent_mapped(page);
2247 set_page_dirty(page);
2248
2249 if (pos > inode->i_size) {
2250 i_size_write(inode, pos);
2251 mark_inode_dirty(inode);
2252 }
2253 return 0;
2254}
2255EXPORT_SYMBOL(extent_commit_write);
2256
2257int extent_prepare_write(struct extent_map_tree *tree,
2258 struct inode *inode, struct page *page,
2259 unsigned from, unsigned to, get_extent_t *get_extent)
2260{
2261 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2262 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2263 u64 block_start;
2264 u64 orig_block_start;
2265 u64 block_end;
2266 u64 cur_end;
2267 struct extent_map *em;
2268 unsigned blocksize = 1 << inode->i_blkbits;
2269 size_t page_offset = 0;
2270 size_t block_off_start;
2271 size_t block_off_end;
2272 int err = 0;
2273 int iocount = 0;
2274 int ret = 0;
2275 int isnew;
2276
2277 set_page_extent_mapped(page);
2278
2279 block_start = (page_start + from) & ~((u64)blocksize - 1);
2280 block_end = (page_start + to - 1) | (blocksize - 1);
2281 orig_block_start = block_start;
2282
2283 lock_extent(tree, page_start, page_end, GFP_NOFS);
2284 while(block_start <= block_end) {
2285 em = get_extent(inode, page, page_offset, block_start,
2286 block_end, 1);
2287 if (IS_ERR(em) || !em) {
2288 goto err;
2289 }
2290 cur_end = min(block_end, em->end);
2291 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2292 block_off_end = block_off_start + blocksize;
2293 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2294
2295 if (!PageUptodate(page) && isnew &&
2296 (block_off_end > to || block_off_start < from)) {
2297 void *kaddr;
2298
2299 kaddr = kmap_atomic(page, KM_USER0);
2300 if (block_off_end > to)
2301 memset(kaddr + to, 0, block_off_end - to);
2302 if (block_off_start < from)
2303 memset(kaddr + block_off_start, 0,
2304 from - block_off_start);
2305 flush_dcache_page(page);
2306 kunmap_atomic(kaddr, KM_USER0);
2307 }
2308 if ((em->block_start != EXTENT_MAP_HOLE &&
2309 em->block_start != EXTENT_MAP_INLINE) &&
2310 !isnew && !PageUptodate(page) &&
2311 (block_off_end > to || block_off_start < from) &&
2312 !test_range_bit(tree, block_start, cur_end,
2313 EXTENT_UPTODATE, 1)) {
2314 u64 sector;
2315 u64 extent_offset = block_start - em->start;
2316 size_t iosize;
2317 sector = (em->block_start + extent_offset) >> 9;
2318 iosize = (cur_end - block_start + blocksize) &
2319 ~((u64)blocksize - 1);
2320 /*
2321 * we've already got the extent locked, but we
2322 * need to split the state such that our end_bio
2323 * handler can clear the lock.
2324 */
2325 set_extent_bit(tree, block_start,
2326 block_start + iosize - 1,
2327 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2328 ret = submit_extent_page(READ, tree, page,
2329 sector, iosize, page_offset, em->bdev,
2330 NULL, 1,
2331 end_bio_extent_preparewrite);
2332 iocount++;
2333 block_start = block_start + iosize;
2334 } else {
2335 set_extent_uptodate(tree, block_start, cur_end,
2336 GFP_NOFS);
2337 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2338 block_start = cur_end + 1;
2339 }
2340 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2341 free_extent_map(em);
2342 }
2343 if (iocount) {
2344 wait_extent_bit(tree, orig_block_start,
2345 block_end, EXTENT_LOCKED);
2346 }
2347 check_page_uptodate(tree, page);
2348err:
2349 /* FIXME, zero out newly allocated blocks on error */
2350 return err;
2351}
2352EXPORT_SYMBOL(extent_prepare_write);
2353
2354/*
2355 * a helper for releasepage. As long as there are no locked extents
2356 * in the range corresponding to the page, both state records and extent
2357 * map records are removed
2358 */
2359int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2360{
2361 struct extent_map *em;
2362 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2363 u64 end = start + PAGE_CACHE_SIZE - 1;
2364 u64 orig_start = start;
2365 int ret = 1;
2366
2367 while (start <= end) {
2368 em = lookup_extent_mapping(tree, start, end);
2369 if (!em || IS_ERR(em))
2370 break;
2371 if (!test_range_bit(tree, em->start, em->end,
2372 EXTENT_LOCKED, 0)) {
2373 remove_extent_mapping(tree, em);
2374 /* once for the rb tree */
2375 free_extent_map(em);
2376 }
2377 start = em->end + 1;
2378 /* once for us */
2379 free_extent_map(em);
2380 }
2381 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2382 ret = 0;
2383 else
2384 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2385 1, 1, GFP_NOFS);
2386 return ret;
2387}
2388EXPORT_SYMBOL(try_release_extent_mapping);
2389
2390sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2391 get_extent_t *get_extent)
2392{
2393 struct inode *inode = mapping->host;
2394 u64 start = iblock << inode->i_blkbits;
2395 u64 end = start + (1 << inode->i_blkbits) - 1;
2396 sector_t sector = 0;
2397 struct extent_map *em;
2398
2399 em = get_extent(inode, NULL, 0, start, end, 0);
2400 if (!em || IS_ERR(em))
2401 return 0;
2402
2403 if (em->block_start == EXTENT_MAP_INLINE ||
2404 em->block_start == EXTENT_MAP_HOLE)
2405 goto out;
2406
2407 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2408out:
2409 free_extent_map(em);
2410 return sector;
2411}
2412
2413static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2414{
2415 if (list_empty(&eb->lru)) {
2416 extent_buffer_get(eb);
2417 list_add(&eb->lru, &tree->buffer_lru);
2418 tree->lru_size++;
2419 if (tree->lru_size >= BUFFER_LRU_MAX) {
2420 struct extent_buffer *rm;
2421 rm = list_entry(tree->buffer_lru.prev,
2422 struct extent_buffer, lru);
2423 tree->lru_size--;
2424 list_del_init(&rm->lru);
2425 free_extent_buffer(rm);
2426 }
2427 } else
2428 list_move(&eb->lru, &tree->buffer_lru);
2429 return 0;
2430}
2431static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2432 u64 start, unsigned long len)
2433{
2434 struct list_head *lru = &tree->buffer_lru;
2435 struct list_head *cur = lru->next;
2436 struct extent_buffer *eb;
2437
2438 if (list_empty(lru))
2439 return NULL;
2440
2441 do {
2442 eb = list_entry(cur, struct extent_buffer, lru);
2443 if (eb->start == start && eb->len == len) {
2444 extent_buffer_get(eb);
2445 return eb;
2446 }
2447 cur = cur->next;
2448 } while (cur != lru);
2449 return NULL;
2450}
2451
2452static inline unsigned long num_extent_pages(u64 start, u64 len)
2453{
2454 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2455 (start >> PAGE_CACHE_SHIFT);
2456}
2457
2458static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2459 unsigned long i)
2460{
2461 struct page *p;
2462 struct address_space *mapping;
2463
2464 if (i == 0)
2465 return eb->first_page;
2466 i += eb->start >> PAGE_CACHE_SHIFT;
2467 mapping = eb->first_page->mapping;
2468 read_lock_irq(&mapping->tree_lock);
2469 p = radix_tree_lookup(&mapping->page_tree, i);
2470 read_unlock_irq(&mapping->tree_lock);
2471 return p;
2472}
2473
2474static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2475 u64 start,
2476 unsigned long len,
2477 gfp_t mask)
2478{
2479 struct extent_buffer *eb = NULL;
2480 277
2481 spin_lock(&tree->lru_lock); 278 rb_erase(&em->rb_node, &tree->map);
2482 eb = find_lru(tree, start, len); 279 em->in_tree = 0;
2483 spin_unlock(&tree->lru_lock); 280 if (tree->last == em)
2484 if (eb) { 281 tree->last = NULL;
2485 return eb;
2486 }
2487
2488 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2489 INIT_LIST_HEAD(&eb->lru);
2490 eb->start = start;
2491 eb->len = len;
2492 atomic_set(&eb->refs, 1);
2493
2494 return eb;
2495}
2496
2497static void __free_extent_buffer(struct extent_buffer *eb)
2498{
2499 kmem_cache_free(extent_buffer_cache, eb);
2500}
2501
2502struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2503 u64 start, unsigned long len,
2504 struct page *page0,
2505 gfp_t mask)
2506{
2507 unsigned long num_pages = num_extent_pages(start, len);
2508 unsigned long i;
2509 unsigned long index = start >> PAGE_CACHE_SHIFT;
2510 struct extent_buffer *eb;
2511 struct page *p;
2512 struct address_space *mapping = tree->mapping;
2513 int uptodate = 1;
2514
2515 eb = __alloc_extent_buffer(tree, start, len, mask);
2516 if (!eb || IS_ERR(eb))
2517 return NULL;
2518
2519 if (eb->flags & EXTENT_BUFFER_FILLED)
2520 goto lru_add;
2521
2522 if (page0) {
2523 eb->first_page = page0;
2524 i = 1;
2525 index++;
2526 page_cache_get(page0);
2527 mark_page_accessed(page0);
2528 set_page_extent_mapped(page0);
2529 WARN_ON(!PageUptodate(page0));
2530 set_page_extent_head(page0, len);
2531 } else {
2532 i = 0;
2533 }
2534 for (; i < num_pages; i++, index++) {
2535 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2536 if (!p) {
2537 WARN_ON(1);
2538 goto fail;
2539 }
2540 set_page_extent_mapped(p);
2541 mark_page_accessed(p);
2542 if (i == 0) {
2543 eb->first_page = p;
2544 set_page_extent_head(p, len);
2545 } else {
2546 set_page_private(p, EXTENT_PAGE_PRIVATE);
2547 }
2548 if (!PageUptodate(p))
2549 uptodate = 0;
2550 unlock_page(p);
2551 }
2552 if (uptodate)
2553 eb->flags |= EXTENT_UPTODATE;
2554 eb->flags |= EXTENT_BUFFER_FILLED;
2555
2556lru_add:
2557 spin_lock(&tree->lru_lock);
2558 add_lru(tree, eb);
2559 spin_unlock(&tree->lru_lock);
2560 return eb;
2561
2562fail:
2563 spin_lock(&tree->lru_lock);
2564 list_del_init(&eb->lru);
2565 spin_unlock(&tree->lru_lock);
2566 if (!atomic_dec_and_test(&eb->refs))
2567 return NULL;
2568 for (index = 1; index < i; index++) {
2569 page_cache_release(extent_buffer_page(eb, index));
2570 }
2571 if (i > 0)
2572 page_cache_release(extent_buffer_page(eb, 0));
2573 __free_extent_buffer(eb);
2574 return NULL;
2575}
2576EXPORT_SYMBOL(alloc_extent_buffer);
2577
2578struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2579 u64 start, unsigned long len,
2580 gfp_t mask)
2581{
2582 unsigned long num_pages = num_extent_pages(start, len);
2583 unsigned long i;
2584 unsigned long index = start >> PAGE_CACHE_SHIFT;
2585 struct extent_buffer *eb;
2586 struct page *p;
2587 struct address_space *mapping = tree->mapping;
2588 int uptodate = 1;
2589
2590 eb = __alloc_extent_buffer(tree, start, len, mask);
2591 if (!eb || IS_ERR(eb))
2592 return NULL;
2593
2594 if (eb->flags & EXTENT_BUFFER_FILLED)
2595 goto lru_add;
2596
2597 for (i = 0; i < num_pages; i++, index++) {
2598 p = find_lock_page(mapping, index);
2599 if (!p) {
2600 goto fail;
2601 }
2602 set_page_extent_mapped(p);
2603 mark_page_accessed(p);
2604
2605 if (i == 0) {
2606 eb->first_page = p;
2607 set_page_extent_head(p, len);
2608 } else {
2609 set_page_private(p, EXTENT_PAGE_PRIVATE);
2610 }
2611
2612 if (!PageUptodate(p))
2613 uptodate = 0;
2614 unlock_page(p);
2615 }
2616 if (uptodate)
2617 eb->flags |= EXTENT_UPTODATE;
2618 eb->flags |= EXTENT_BUFFER_FILLED;
2619
2620lru_add:
2621 spin_lock(&tree->lru_lock);
2622 add_lru(tree, eb);
2623 spin_unlock(&tree->lru_lock);
2624 return eb;
2625fail:
2626 spin_lock(&tree->lru_lock);
2627 list_del_init(&eb->lru);
2628 spin_unlock(&tree->lru_lock);
2629 if (!atomic_dec_and_test(&eb->refs))
2630 return NULL;
2631 for (index = 1; index < i; index++) {
2632 page_cache_release(extent_buffer_page(eb, index));
2633 }
2634 if (i > 0)
2635 page_cache_release(extent_buffer_page(eb, 0));
2636 __free_extent_buffer(eb);
2637 return NULL;
2638}
2639EXPORT_SYMBOL(find_extent_buffer);
2640
2641void free_extent_buffer(struct extent_buffer *eb)
2642{
2643 unsigned long i;
2644 unsigned long num_pages;
2645
2646 if (!eb)
2647 return;
2648
2649 if (!atomic_dec_and_test(&eb->refs))
2650 return;
2651
2652 WARN_ON(!list_empty(&eb->lru));
2653 num_pages = num_extent_pages(eb->start, eb->len);
2654
2655 for (i = 1; i < num_pages; i++) {
2656 page_cache_release(extent_buffer_page(eb, i));
2657 }
2658 page_cache_release(extent_buffer_page(eb, 0));
2659 __free_extent_buffer(eb);
2660}
2661EXPORT_SYMBOL(free_extent_buffer);
2662
2663int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2664 struct extent_buffer *eb)
2665{
2666 int set;
2667 unsigned long i;
2668 unsigned long num_pages;
2669 struct page *page;
2670
2671 u64 start = eb->start;
2672 u64 end = start + eb->len - 1;
2673
2674 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2675 num_pages = num_extent_pages(eb->start, eb->len);
2676
2677 for (i = 0; i < num_pages; i++) {
2678 page = extent_buffer_page(eb, i);
2679 lock_page(page);
2680 if (i == 0)
2681 set_page_extent_head(page, eb->len);
2682 else
2683 set_page_private(page, EXTENT_PAGE_PRIVATE);
2684
2685 /*
2686 * if we're on the last page or the first page and the
2687 * block isn't aligned on a page boundary, do extra checks
2688 * to make sure we don't clean page that is partially dirty
2689 */
2690 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2691 ((i == num_pages - 1) &&
2692 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2693 start = (u64)page->index << PAGE_CACHE_SHIFT;
2694 end = start + PAGE_CACHE_SIZE - 1;
2695 if (test_range_bit(tree, start, end,
2696 EXTENT_DIRTY, 0)) {
2697 unlock_page(page);
2698 continue;
2699 }
2700 }
2701 clear_page_dirty_for_io(page);
2702 write_lock_irq(&page->mapping->tree_lock);
2703 if (!PageDirty(page)) {
2704 radix_tree_tag_clear(&page->mapping->page_tree,
2705 page_index(page),
2706 PAGECACHE_TAG_DIRTY);
2707 }
2708 write_unlock_irq(&page->mapping->tree_lock);
2709 unlock_page(page);
2710 }
2711 return 0;
2712}
2713EXPORT_SYMBOL(clear_extent_buffer_dirty);
2714
2715int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2716 struct extent_buffer *eb)
2717{
2718 return wait_on_extent_writeback(tree, eb->start,
2719 eb->start + eb->len - 1);
2720}
2721EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2722
2723int set_extent_buffer_dirty(struct extent_map_tree *tree,
2724 struct extent_buffer *eb)
2725{
2726 unsigned long i;
2727 unsigned long num_pages;
2728
2729 num_pages = num_extent_pages(eb->start, eb->len);
2730 for (i = 0; i < num_pages; i++) {
2731 struct page *page = extent_buffer_page(eb, i);
2732 /* writepage may need to do something special for the
2733 * first page, we have to make sure page->private is
2734 * properly set. releasepage may drop page->private
2735 * on us if the page isn't already dirty.
2736 */
2737 if (i == 0) {
2738 lock_page(page);
2739 set_page_extent_head(page, eb->len);
2740 } else if (PagePrivate(page) &&
2741 page->private != EXTENT_PAGE_PRIVATE) {
2742 lock_page(page);
2743 set_page_extent_mapped(page);
2744 unlock_page(page);
2745 }
2746 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2747 if (i == 0)
2748 unlock_page(page);
2749 }
2750 return set_extent_dirty(tree, eb->start,
2751 eb->start + eb->len - 1, GFP_NOFS);
2752}
2753EXPORT_SYMBOL(set_extent_buffer_dirty);
2754
2755int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2756 struct extent_buffer *eb)
2757{
2758 unsigned long i;
2759 struct page *page;
2760 unsigned long num_pages;
2761
2762 num_pages = num_extent_pages(eb->start, eb->len);
2763
2764 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2765 GFP_NOFS);
2766 for (i = 0; i < num_pages; i++) {
2767 page = extent_buffer_page(eb, i);
2768 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2769 ((i == num_pages - 1) &&
2770 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2771 check_page_uptodate(tree, page);
2772 continue;
2773 }
2774 SetPageUptodate(page);
2775 }
2776 return 0;
2777}
2778EXPORT_SYMBOL(set_extent_buffer_uptodate);
2779
2780int extent_buffer_uptodate(struct extent_map_tree *tree,
2781 struct extent_buffer *eb)
2782{
2783 if (eb->flags & EXTENT_UPTODATE)
2784 return 1;
2785 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2786 EXTENT_UPTODATE, 1);
2787}
2788EXPORT_SYMBOL(extent_buffer_uptodate);
2789
2790int read_extent_buffer_pages(struct extent_map_tree *tree,
2791 struct extent_buffer *eb,
2792 u64 start,
2793 int wait)
2794{
2795 unsigned long i;
2796 unsigned long start_i;
2797 struct page *page;
2798 int err;
2799 int ret = 0;
2800 unsigned long num_pages;
2801
2802 if (eb->flags & EXTENT_UPTODATE)
2803 return 0;
2804
2805 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2806 EXTENT_UPTODATE, 1)) {
2807 return 0;
2808 }
2809
2810 if (start) {
2811 WARN_ON(start < eb->start);
2812 start_i = (start >> PAGE_CACHE_SHIFT) -
2813 (eb->start >> PAGE_CACHE_SHIFT);
2814 } else {
2815 start_i = 0;
2816 }
2817
2818 num_pages = num_extent_pages(eb->start, eb->len);
2819 for (i = start_i; i < num_pages; i++) {
2820 page = extent_buffer_page(eb, i);
2821 if (PageUptodate(page)) {
2822 continue;
2823 }
2824 if (!wait) {
2825 if (TestSetPageLocked(page)) {
2826 continue;
2827 }
2828 } else {
2829 lock_page(page);
2830 }
2831 if (!PageUptodate(page)) {
2832 err = page->mapping->a_ops->readpage(NULL, page);
2833 if (err) {
2834 ret = err;
2835 }
2836 } else {
2837 unlock_page(page);
2838 }
2839 }
2840
2841 if (ret || !wait) {
2842 return ret;
2843 }
2844
2845 for (i = start_i; i < num_pages; i++) {
2846 page = extent_buffer_page(eb, i);
2847 wait_on_page_locked(page);
2848 if (!PageUptodate(page)) {
2849 ret = -EIO;
2850 }
2851 }
2852 if (!ret)
2853 eb->flags |= EXTENT_UPTODATE;
2854 return ret;
2855}
2856EXPORT_SYMBOL(read_extent_buffer_pages);
2857
2858void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2859 unsigned long start,
2860 unsigned long len)
2861{
2862 size_t cur;
2863 size_t offset;
2864 struct page *page;
2865 char *kaddr;
2866 char *dst = (char *)dstv;
2867 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2868 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2869 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2870
2871 WARN_ON(start > eb->len);
2872 WARN_ON(start + len > eb->start + eb->len);
2873
2874 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2875
2876 while(len > 0) {
2877 page = extent_buffer_page(eb, i);
2878 if (!PageUptodate(page)) {
2879 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2880 WARN_ON(1);
2881 }
2882 WARN_ON(!PageUptodate(page));
2883
2884 cur = min(len, (PAGE_CACHE_SIZE - offset));
2885 kaddr = kmap_atomic(page, KM_USER1);
2886 memcpy(dst, kaddr + offset, cur);
2887 kunmap_atomic(kaddr, KM_USER1);
2888
2889 dst += cur;
2890 len -= cur;
2891 offset = 0;
2892 i++;
2893 }
2894}
2895EXPORT_SYMBOL(read_extent_buffer);
2896
2897int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2898 unsigned long min_len, char **token, char **map,
2899 unsigned long *map_start,
2900 unsigned long *map_len, int km)
2901{
2902 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2903 char *kaddr;
2904 struct page *p;
2905 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2906 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2907 unsigned long end_i = (start_offset + start + min_len - 1) >>
2908 PAGE_CACHE_SHIFT;
2909
2910 if (i != end_i)
2911 return -EINVAL;
2912
2913 if (i == 0) {
2914 offset = start_offset;
2915 *map_start = 0;
2916 } else {
2917 offset = 0;
2918 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2919 }
2920 if (start + min_len > eb->len) {
2921printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2922 WARN_ON(1);
2923 }
2924
2925 p = extent_buffer_page(eb, i);
2926 WARN_ON(!PageUptodate(p));
2927 kaddr = kmap_atomic(p, km);
2928 *token = kaddr;
2929 *map = kaddr + offset;
2930 *map_len = PAGE_CACHE_SIZE - offset;
2931 return 0;
2932}
2933EXPORT_SYMBOL(map_private_extent_buffer);
2934
2935int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2936 unsigned long min_len,
2937 char **token, char **map,
2938 unsigned long *map_start,
2939 unsigned long *map_len, int km)
2940{
2941 int err;
2942 int save = 0;
2943 if (eb->map_token) {
2944 unmap_extent_buffer(eb, eb->map_token, km);
2945 eb->map_token = NULL;
2946 save = 1;
2947 }
2948 err = map_private_extent_buffer(eb, start, min_len, token, map,
2949 map_start, map_len, km);
2950 if (!err && save) {
2951 eb->map_token = *token;
2952 eb->kaddr = *map;
2953 eb->map_start = *map_start;
2954 eb->map_len = *map_len;
2955 }
2956 return err;
2957}
2958EXPORT_SYMBOL(map_extent_buffer);
2959
2960void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2961{
2962 kunmap_atomic(token, km);
2963}
2964EXPORT_SYMBOL(unmap_extent_buffer);
2965
2966int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2967 unsigned long start,
2968 unsigned long len)
2969{
2970 size_t cur;
2971 size_t offset;
2972 struct page *page;
2973 char *kaddr;
2974 char *ptr = (char *)ptrv;
2975 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2976 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2977 int ret = 0;
2978
2979 WARN_ON(start > eb->len);
2980 WARN_ON(start + len > eb->start + eb->len);
2981
2982 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2983
2984 while(len > 0) {
2985 page = extent_buffer_page(eb, i);
2986 WARN_ON(!PageUptodate(page));
2987
2988 cur = min(len, (PAGE_CACHE_SIZE - offset));
2989
2990 kaddr = kmap_atomic(page, KM_USER0);
2991 ret = memcmp(ptr, kaddr + offset, cur);
2992 kunmap_atomic(kaddr, KM_USER0);
2993 if (ret)
2994 break;
2995
2996 ptr += cur;
2997 len -= cur;
2998 offset = 0;
2999 i++;
3000 }
3001 return ret; 282 return ret;
3002} 283}
3003EXPORT_SYMBOL(memcmp_extent_buffer); 284EXPORT_SYMBOL(remove_extent_mapping);
3004
3005void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3006 unsigned long start, unsigned long len)
3007{
3008 size_t cur;
3009 size_t offset;
3010 struct page *page;
3011 char *kaddr;
3012 char *src = (char *)srcv;
3013 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3014 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3015
3016 WARN_ON(start > eb->len);
3017 WARN_ON(start + len > eb->start + eb->len);
3018
3019 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3020
3021 while(len > 0) {
3022 page = extent_buffer_page(eb, i);
3023 WARN_ON(!PageUptodate(page));
3024
3025 cur = min(len, PAGE_CACHE_SIZE - offset);
3026 kaddr = kmap_atomic(page, KM_USER1);
3027 memcpy(kaddr + offset, src, cur);
3028 kunmap_atomic(kaddr, KM_USER1);
3029
3030 src += cur;
3031 len -= cur;
3032 offset = 0;
3033 i++;
3034 }
3035}
3036EXPORT_SYMBOL(write_extent_buffer);
3037
3038void memset_extent_buffer(struct extent_buffer *eb, char c,
3039 unsigned long start, unsigned long len)
3040{
3041 size_t cur;
3042 size_t offset;
3043 struct page *page;
3044 char *kaddr;
3045 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3046 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3047
3048 WARN_ON(start > eb->len);
3049 WARN_ON(start + len > eb->start + eb->len);
3050
3051 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3052
3053 while(len > 0) {
3054 page = extent_buffer_page(eb, i);
3055 WARN_ON(!PageUptodate(page));
3056
3057 cur = min(len, PAGE_CACHE_SIZE - offset);
3058 kaddr = kmap_atomic(page, KM_USER0);
3059 memset(kaddr + offset, c, cur);
3060 kunmap_atomic(kaddr, KM_USER0);
3061
3062 len -= cur;
3063 offset = 0;
3064 i++;
3065 }
3066}
3067EXPORT_SYMBOL(memset_extent_buffer);
3068
3069void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3070 unsigned long dst_offset, unsigned long src_offset,
3071 unsigned long len)
3072{
3073 u64 dst_len = dst->len;
3074 size_t cur;
3075 size_t offset;
3076 struct page *page;
3077 char *kaddr;
3078 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3079 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3080
3081 WARN_ON(src->len != dst_len);
3082
3083 offset = (start_offset + dst_offset) &
3084 ((unsigned long)PAGE_CACHE_SIZE - 1);
3085
3086 while(len > 0) {
3087 page = extent_buffer_page(dst, i);
3088 WARN_ON(!PageUptodate(page));
3089
3090 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3091
3092 kaddr = kmap_atomic(page, KM_USER0);
3093 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3094 kunmap_atomic(kaddr, KM_USER0);
3095
3096 src_offset += cur;
3097 len -= cur;
3098 offset = 0;
3099 i++;
3100 }
3101}
3102EXPORT_SYMBOL(copy_extent_buffer);
3103
3104static void move_pages(struct page *dst_page, struct page *src_page,
3105 unsigned long dst_off, unsigned long src_off,
3106 unsigned long len)
3107{
3108 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3109 if (dst_page == src_page) {
3110 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3111 } else {
3112 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3113 char *p = dst_kaddr + dst_off + len;
3114 char *s = src_kaddr + src_off + len;
3115
3116 while (len--)
3117 *--p = *--s;
3118
3119 kunmap_atomic(src_kaddr, KM_USER1);
3120 }
3121 kunmap_atomic(dst_kaddr, KM_USER0);
3122}
3123
3124static void copy_pages(struct page *dst_page, struct page *src_page,
3125 unsigned long dst_off, unsigned long src_off,
3126 unsigned long len)
3127{
3128 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3129 char *src_kaddr;
3130
3131 if (dst_page != src_page)
3132 src_kaddr = kmap_atomic(src_page, KM_USER1);
3133 else
3134 src_kaddr = dst_kaddr;
3135
3136 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3137 kunmap_atomic(dst_kaddr, KM_USER0);
3138 if (dst_page != src_page)
3139 kunmap_atomic(src_kaddr, KM_USER1);
3140}
3141
3142void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3143 unsigned long src_offset, unsigned long len)
3144{
3145 size_t cur;
3146 size_t dst_off_in_page;
3147 size_t src_off_in_page;
3148 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3149 unsigned long dst_i;
3150 unsigned long src_i;
3151
3152 if (src_offset + len > dst->len) {
3153 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3154 src_offset, len, dst->len);
3155 BUG_ON(1);
3156 }
3157 if (dst_offset + len > dst->len) {
3158 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3159 dst_offset, len, dst->len);
3160 BUG_ON(1);
3161 }
3162
3163 while(len > 0) {
3164 dst_off_in_page = (start_offset + dst_offset) &
3165 ((unsigned long)PAGE_CACHE_SIZE - 1);
3166 src_off_in_page = (start_offset + src_offset) &
3167 ((unsigned long)PAGE_CACHE_SIZE - 1);
3168
3169 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3170 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3171
3172 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3173 src_off_in_page));
3174 cur = min_t(unsigned long, cur,
3175 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3176
3177 copy_pages(extent_buffer_page(dst, dst_i),
3178 extent_buffer_page(dst, src_i),
3179 dst_off_in_page, src_off_in_page, cur);
3180
3181 src_offset += cur;
3182 dst_offset += cur;
3183 len -= cur;
3184 }
3185}
3186EXPORT_SYMBOL(memcpy_extent_buffer);
3187
3188void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3189 unsigned long src_offset, unsigned long len)
3190{
3191 size_t cur;
3192 size_t dst_off_in_page;
3193 size_t src_off_in_page;
3194 unsigned long dst_end = dst_offset + len - 1;
3195 unsigned long src_end = src_offset + len - 1;
3196 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3197 unsigned long dst_i;
3198 unsigned long src_i;
3199
3200 if (src_offset + len > dst->len) {
3201 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3202 src_offset, len, dst->len);
3203 BUG_ON(1);
3204 }
3205 if (dst_offset + len > dst->len) {
3206 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3207 dst_offset, len, dst->len);
3208 BUG_ON(1);
3209 }
3210 if (dst_offset < src_offset) {
3211 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3212 return;
3213 }
3214 while(len > 0) {
3215 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3216 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3217
3218 dst_off_in_page = (start_offset + dst_end) &
3219 ((unsigned long)PAGE_CACHE_SIZE - 1);
3220 src_off_in_page = (start_offset + src_end) &
3221 ((unsigned long)PAGE_CACHE_SIZE - 1);
3222
3223 cur = min_t(unsigned long, len, src_off_in_page + 1);
3224 cur = min(cur, dst_off_in_page + 1);
3225 move_pages(extent_buffer_page(dst, dst_i),
3226 extent_buffer_page(dst, src_i),
3227 dst_off_in_page - cur + 1,
3228 src_off_in_page - cur + 1, cur);
3229
3230 dst_end -= cur;
3231 src_end -= cur;
3232 len -= cur;
3233 }
3234}
3235EXPORT_SYMBOL(memmove_extent_buffer);
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index ea60f5447b5b..56314217cfc0 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -3,215 +3,53 @@
3 3
4#include <linux/rbtree.h> 4#include <linux/rbtree.h>
5 5
6#define EXTENT_MAP_LAST_BYTE (u64)-4
6#define EXTENT_MAP_HOLE (u64)-3 7#define EXTENT_MAP_HOLE (u64)-3
7#define EXTENT_MAP_INLINE (u64)-2 8#define EXTENT_MAP_INLINE (u64)-2
8#define EXTENT_MAP_DELALLOC (u64)-1 9#define EXTENT_MAP_DELALLOC (u64)-1
9 10
10/* bits for the extent state */
11#define EXTENT_DIRTY 1
12#define EXTENT_WRITEBACK (1 << 1)
13#define EXTENT_UPTODATE (1 << 2)
14#define EXTENT_LOCKED (1 << 3)
15#define EXTENT_NEW (1 << 4)
16#define EXTENT_DELALLOC (1 << 5)
17#define EXTENT_DEFRAG (1 << 6)
18#define EXTENT_DEFRAG_DONE (1 << 7)
19#define EXTENT_BUFFER_FILLED (1 << 8)
20#define EXTENT_CSUM (1 << 9)
21#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
22
23/*
24 * page->private values. Every page that is controlled by the extent
25 * map has page->private set to one.
26 */
27#define EXTENT_PAGE_PRIVATE 1
28#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
29
30
31struct extent_map_ops {
32 int (*fill_delalloc)(struct inode *inode, u64 start, u64 end);
33 int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
34 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
35 int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end);
36 void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end);
37};
38
39struct extent_map_tree {
40 struct rb_root map;
41 struct rb_root state;
42 struct address_space *mapping;
43 u64 dirty_bytes;
44 rwlock_t lock;
45 struct extent_map_ops *ops;
46 spinlock_t lru_lock;
47 struct list_head buffer_lru;
48 int lru_size;
49};
50
51/* note, this must start with the same fields as fs/extent_map.c:tree_entry */
52struct extent_map { 11struct extent_map {
53 u64 start;
54 u64 end; /* inclusive */
55 int in_tree;
56 struct rb_node rb_node; 12 struct rb_node rb_node;
57 /* block_start and block_end are in bytes */ 13
14 /* all of these are in bytes */
15 u64 start;
16 u64 len;
58 u64 block_start; 17 u64 block_start;
59 u64 block_end; /* inclusive */ 18 unsigned long flags;
60 struct block_device *bdev; 19 struct block_device *bdev;
61 atomic_t refs; 20 atomic_t refs;
62};
63
64/* note, this must start with the same fields as fs/extent_map.c:tree_entry */
65struct extent_state {
66 u64 start;
67 u64 end; /* inclusive */
68 int in_tree; 21 int in_tree;
69 struct rb_node rb_node;
70 wait_queue_head_t wq;
71 atomic_t refs;
72 unsigned long state;
73
74 /* for use by the FS */
75 u64 private;
76
77 struct list_head list;
78}; 22};
79 23
80struct extent_buffer { 24struct extent_map_tree {
81 u64 start; 25 struct rb_root map;
82 unsigned long len; 26 struct extent_map *last;
83 char *map_token; 27 spinlock_t lock;
84 char *kaddr;
85 unsigned long map_start;
86 unsigned long map_len;
87 struct page *first_page;
88 struct list_head lru;
89 atomic_t refs;
90 int flags;
91}; 28};
92 29
93typedef struct extent_map *(get_extent_t)(struct inode *inode, 30static inline u64 extent_map_end(struct extent_map *em)
94 struct page *page, 31{
95 size_t page_offset, 32 if (em->start + em->len < em->start)
96 u64 start, u64 end, 33 return (u64)-1;
97 int create); 34 return em->start + em->len;
35}
36
37static inline u64 extent_map_block_end(struct extent_map *em)
38{
39 if (em->block_start + em->len < em->block_start)
40 return (u64)-1;
41 return em->block_start + em->len;
42}
98 43
99void extent_map_tree_init(struct extent_map_tree *tree, 44void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
100 struct address_space *mapping, gfp_t mask);
101void extent_map_tree_empty_lru(struct extent_map_tree *tree);
102struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 45struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
103 u64 start, u64 end); 46 u64 start, u64 len);
104int add_extent_mapping(struct extent_map_tree *tree, 47int add_extent_mapping(struct extent_map_tree *tree,
105 struct extent_map *em); 48 struct extent_map *em);
106int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); 49int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
107int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page); 50
108int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask);
109int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask);
110struct extent_map *alloc_extent_map(gfp_t mask); 51struct extent_map *alloc_extent_map(gfp_t mask);
111void free_extent_map(struct extent_map *em); 52void free_extent_map(struct extent_map *em);
112int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
113 get_extent_t *get_extent);
114int __init extent_map_init(void); 53int __init extent_map_init(void);
115void extent_map_exit(void); 54void extent_map_exit(void);
116
117u64 count_range_bits(struct extent_map_tree *tree,
118 u64 *start, u64 search_end,
119 u64 max_bytes, unsigned long bits);
120
121int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
122 int bits, int filled);
123int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
124 int bits, gfp_t mask);
125int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
126 int bits, gfp_t mask);
127int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
128 gfp_t mask);
129int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
130 gfp_t mask);
131int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
132 gfp_t mask);
133int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
134 gfp_t mask);
135int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
136 gfp_t mask);
137int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
138 u64 *start_ret, u64 *end_ret, int bits);
139int extent_invalidatepage(struct extent_map_tree *tree,
140 struct page *page, unsigned long offset);
141int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
142 get_extent_t *get_extent,
143 struct writeback_control *wbc);
144int extent_writepages(struct extent_map_tree *tree,
145 struct address_space *mapping,
146 get_extent_t *get_extent,
147 struct writeback_control *wbc);
148int extent_readpages(struct extent_map_tree *tree,
149 struct address_space *mapping,
150 struct list_head *pages, unsigned nr_pages,
151 get_extent_t get_extent);
152int extent_prepare_write(struct extent_map_tree *tree,
153 struct inode *inode, struct page *page,
154 unsigned from, unsigned to, get_extent_t *get_extent);
155int extent_commit_write(struct extent_map_tree *tree,
156 struct inode *inode, struct page *page,
157 unsigned from, unsigned to);
158sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
159 get_extent_t *get_extent);
160int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end);
161int set_state_private(struct extent_map_tree *tree, u64 start, u64 private);
162int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private);
163void set_page_extent_mapped(struct page *page);
164
165struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
166 u64 start, unsigned long len,
167 struct page *page0,
168 gfp_t mask);
169struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
170 u64 start, unsigned long len,
171 gfp_t mask);
172void free_extent_buffer(struct extent_buffer *eb);
173int read_extent_buffer_pages(struct extent_map_tree *tree,
174 struct extent_buffer *eb, u64 start, int wait);
175
176static inline void extent_buffer_get(struct extent_buffer *eb)
177{
178 atomic_inc(&eb->refs);
179}
180
181int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
182 unsigned long start,
183 unsigned long len);
184void read_extent_buffer(struct extent_buffer *eb, void *dst,
185 unsigned long start,
186 unsigned long len);
187void write_extent_buffer(struct extent_buffer *eb, const void *src,
188 unsigned long start, unsigned long len);
189void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
190 unsigned long dst_offset, unsigned long src_offset,
191 unsigned long len);
192void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
193 unsigned long src_offset, unsigned long len);
194void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
195 unsigned long src_offset, unsigned long len);
196void memset_extent_buffer(struct extent_buffer *eb, char c,
197 unsigned long start, unsigned long len);
198int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
199 struct extent_buffer *eb);
200int clear_extent_buffer_dirty(struct extent_map_tree *tree,
201 struct extent_buffer *eb);
202int set_extent_buffer_dirty(struct extent_map_tree *tree,
203 struct extent_buffer *eb);
204int set_extent_buffer_uptodate(struct extent_map_tree *tree,
205 struct extent_buffer *eb);
206int extent_buffer_uptodate(struct extent_map_tree *tree,
207 struct extent_buffer *eb);
208int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
209 unsigned long min_len, char **token, char **map,
210 unsigned long *map_start,
211 unsigned long *map_len, int km);
212int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
213 unsigned long min_len, char **token, char **map,
214 unsigned long *map_start,
215 unsigned long *map_len, int km);
216void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
217#endif 55#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 1cd8c908811e..c5bb00f92396 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -233,8 +233,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
233 int err = 0; 233 int err = 0;
234 int i; 234 int i;
235 struct inode *inode = fdentry(file)->d_inode; 235 struct inode *inode = fdentry(file)->d_inode;
236 struct extent_map *em; 236 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
237 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
238 u64 hint_byte; 237 u64 hint_byte;
239 u64 num_bytes; 238 u64 num_bytes;
240 u64 start_pos; 239 u64 start_pos;
@@ -242,11 +241,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
242 u64 end_pos = pos + write_bytes; 241 u64 end_pos = pos + write_bytes;
243 u64 inline_size; 242 u64 inline_size;
244 loff_t isize = i_size_read(inode); 243 loff_t isize = i_size_read(inode);
245 em = alloc_extent_map(GFP_NOFS);
246 if (!em)
247 return -ENOMEM;
248
249 em->bdev = inode->i_sb->s_bdev;
250 244
251 start_pos = pos & ~((u64)root->sectorsize - 1); 245 start_pos = pos & ~((u64)root->sectorsize - 1);
252 num_bytes = (write_bytes + pos - start_pos + 246 num_bytes = (write_bytes + pos - start_pos +
@@ -254,7 +248,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
254 248
255 end_of_last_block = start_pos + num_bytes - 1; 249 end_of_last_block = start_pos + num_bytes - 1;
256 250
257 lock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS); 251 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
258 mutex_lock(&root->fs_info->fs_mutex); 252 mutex_lock(&root->fs_info->fs_mutex);
259 trans = btrfs_start_transaction(root, 1); 253 trans = btrfs_start_transaction(root, 1);
260 if (!trans) { 254 if (!trans) {
@@ -268,7 +262,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
268 if ((end_of_last_block & 4095) == 0) { 262 if ((end_of_last_block & 4095) == 0) {
269 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block); 263 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
270 } 264 }
271 set_extent_uptodate(em_tree, start_pos, end_of_last_block, GFP_NOFS); 265 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
272 266
273 /* FIXME...EIEIO, ENOSPC and more */ 267 /* FIXME...EIEIO, ENOSPC and more */
274 268
@@ -293,6 +287,8 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
293 inode->i_ino, 287 inode->i_ino,
294 last_pos_in_file, 288 last_pos_in_file,
295 0, 0, hole_size); 289 0, 0, hole_size);
290 btrfs_drop_extent_cache(inode, last_pos_in_file,
291 last_pos_in_file + hole_size -1);
296 btrfs_check_file(root, inode); 292 btrfs_check_file(root, inode);
297 } 293 }
298 if (err) 294 if (err)
@@ -320,12 +316,12 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
320 last_end += PAGE_CACHE_SIZE - 1; 316 last_end += PAGE_CACHE_SIZE - 1;
321 if (start_pos < isize) { 317 if (start_pos < isize) {
322 u64 delalloc_start = start_pos; 318 u64 delalloc_start = start_pos;
323 existing_delalloc = count_range_bits(em_tree, 319 existing_delalloc = count_range_bits(io_tree,
324 &delalloc_start, 320 &delalloc_start,
325 end_of_last_block, (u64)-1, 321 end_of_last_block, (u64)-1,
326 EXTENT_DELALLOC); 322 EXTENT_DELALLOC);
327 } 323 }
328 set_extent_delalloc(em_tree, start_pos, end_of_last_block, 324 set_extent_delalloc(io_tree, start_pos, end_of_last_block,
329 GFP_NOFS); 325 GFP_NOFS);
330 spin_lock(&root->fs_info->delalloc_lock); 326 spin_lock(&root->fs_info->delalloc_lock);
331 root->fs_info->delalloc_bytes += (end_of_last_block + 1 - 327 root->fs_info->delalloc_bytes += (end_of_last_block + 1 -
@@ -346,6 +342,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
346 inline_size -= start_pos; 342 inline_size -= start_pos;
347 err = insert_inline_extent(trans, root, inode, start_pos, 343 err = insert_inline_extent(trans, root, inode, start_pos,
348 inline_size, pages, 0, num_pages); 344 inline_size, pages, 0, num_pages);
345 btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
349 BUG_ON(err); 346 BUG_ON(err);
350 } 347 }
351 if (end_pos > isize) { 348 if (end_pos > isize) {
@@ -356,8 +353,7 @@ failed:
356 err = btrfs_end_transaction(trans, root); 353 err = btrfs_end_transaction(trans, root);
357out_unlock: 354out_unlock:
358 mutex_unlock(&root->fs_info->fs_mutex); 355 mutex_unlock(&root->fs_info->fs_mutex);
359 unlock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS); 356 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
360 free_extent_map(em);
361 return err; 357 return err;
362} 358}
363 359
@@ -367,10 +363,15 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
367 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 363 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
368 364
369 while(1) { 365 while(1) {
366 spin_lock(&em_tree->lock);
370 em = lookup_extent_mapping(em_tree, start, end); 367 em = lookup_extent_mapping(em_tree, start, end);
371 if (!em) 368 if (!em) {
369 spin_unlock(&em_tree->lock);
372 break; 370 break;
371 }
373 remove_extent_mapping(em_tree, em); 372 remove_extent_mapping(em_tree, em);
373 spin_unlock(&em_tree->lock);
374
374 /* once for us */ 375 /* once for us */
375 free_extent_map(em); 376 free_extent_map(em);
376 /* once for the tree*/ 377 /* once for the tree*/
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 67005480e139..16d3aef45d18 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -53,7 +53,7 @@ static struct inode_operations btrfs_file_inode_operations;
53static struct address_space_operations btrfs_aops; 53static struct address_space_operations btrfs_aops;
54static struct address_space_operations btrfs_symlink_aops; 54static struct address_space_operations btrfs_symlink_aops;
55static struct file_operations btrfs_dir_file_operations; 55static struct file_operations btrfs_dir_file_operations;
56static struct extent_map_ops btrfs_extent_map_ops; 56static struct extent_io_ops btrfs_extent_io_ops;
57 57
58static struct kmem_cache *btrfs_inode_cachep; 58static struct kmem_cache *btrfs_inode_cachep;
59struct kmem_cache *btrfs_trans_handle_cachep; 59struct kmem_cache *btrfs_trans_handle_cachep;
@@ -104,6 +104,8 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
104 u64 num_bytes; 104 u64 num_bytes;
105 u64 cur_alloc_size; 105 u64 cur_alloc_size;
106 u64 blocksize = root->sectorsize; 106 u64 blocksize = root->sectorsize;
107 u64 orig_start = start;
108 u64 orig_num_bytes;
107 struct btrfs_key ins; 109 struct btrfs_key ins;
108 int ret; 110 int ret;
109 111
@@ -115,6 +117,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
115 num_bytes = max(blocksize, num_bytes); 117 num_bytes = max(blocksize, num_bytes);
116 ret = btrfs_drop_extents(trans, root, inode, 118 ret = btrfs_drop_extents(trans, root, inode,
117 start, start + num_bytes, start, &alloc_hint); 119 start, start + num_bytes, start, &alloc_hint);
120 orig_num_bytes = num_bytes;
118 121
119 if (alloc_hint == EXTENT_MAP_INLINE) 122 if (alloc_hint == EXTENT_MAP_INLINE)
120 goto out; 123 goto out;
@@ -138,6 +141,8 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
138 alloc_hint = ins.objectid + ins.offset; 141 alloc_hint = ins.objectid + ins.offset;
139 start += cur_alloc_size; 142 start += cur_alloc_size;
140 } 143 }
144 btrfs_drop_extent_cache(inode, orig_start,
145 orig_start + orig_num_bytes - 1);
141 btrfs_add_ordered_inode(inode); 146 btrfs_add_ordered_inode(inode);
142out: 147out:
143 btrfs_end_transaction(trans, root); 148 btrfs_end_transaction(trans, root);
@@ -297,7 +302,7 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
297 int ret = 0; 302 int ret = 0;
298 struct inode *inode = page->mapping->host; 303 struct inode *inode = page->mapping->host;
299 struct btrfs_root *root = BTRFS_I(inode)->root; 304 struct btrfs_root *root = BTRFS_I(inode)->root;
300 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 305 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
301 struct btrfs_csum_item *item; 306 struct btrfs_csum_item *item;
302 struct btrfs_path *path = NULL; 307 struct btrfs_path *path = NULL;
303 u32 csum; 308 u32 csum;
@@ -317,7 +322,7 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
317 } 322 }
318 read_extent_buffer(path->nodes[0], &csum, (unsigned long)item, 323 read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
319 BTRFS_CRC32_SIZE); 324 BTRFS_CRC32_SIZE);
320 set_state_private(em_tree, start, csum); 325 set_state_private(io_tree, start, csum);
321out: 326out:
322 if (path) 327 if (path)
323 btrfs_free_path(path); 328 btrfs_free_path(path);
@@ -329,17 +334,19 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end)
329{ 334{
330 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); 335 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
331 struct inode *inode = page->mapping->host; 336 struct inode *inode = page->mapping->host;
332 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 337 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
333 char *kaddr; 338 char *kaddr;
334 u64 private; 339 u64 private;
335 int ret; 340 int ret;
336 struct btrfs_root *root = BTRFS_I(inode)->root; 341 struct btrfs_root *root = BTRFS_I(inode)->root;
337 u32 csum = ~(u32)0; 342 u32 csum = ~(u32)0;
338 unsigned long flags; 343 unsigned long flags;
344
339 if (btrfs_test_opt(root, NODATASUM) || 345 if (btrfs_test_opt(root, NODATASUM) ||
340 btrfs_test_flag(inode, NODATASUM)) 346 btrfs_test_flag(inode, NODATASUM))
341 return 0; 347 return 0;
342 ret = get_state_private(em_tree, start, &private); 348
349 ret = get_state_private(io_tree, start, &private);
343 local_irq_save(flags); 350 local_irq_save(flags);
344 kaddr = kmap_atomic(page, KM_IRQ0); 351 kaddr = kmap_atomic(page, KM_IRQ0);
345 if (ret) { 352 if (ret) {
@@ -428,7 +435,7 @@ void btrfs_read_locked_inode(struct inode *inode)
428 switch (inode->i_mode & S_IFMT) { 435 switch (inode->i_mode & S_IFMT) {
429 case S_IFREG: 436 case S_IFREG:
430 inode->i_mapping->a_ops = &btrfs_aops; 437 inode->i_mapping->a_ops = &btrfs_aops;
431 BTRFS_I(inode)->extent_tree.ops = &btrfs_extent_map_ops; 438 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
432 inode->i_fop = &btrfs_file_operations; 439 inode->i_fop = &btrfs_file_operations;
433 inode->i_op = &btrfs_file_inode_operations; 440 inode->i_op = &btrfs_file_inode_operations;
434 break; 441 break;
@@ -873,7 +880,7 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page,
873 size_t zero_start) 880 size_t zero_start)
874{ 881{
875 char *kaddr; 882 char *kaddr;
876 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 883 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
877 struct btrfs_root *root = BTRFS_I(inode)->root; 884 struct btrfs_root *root = BTRFS_I(inode)->root;
878 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 885 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
879 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 886 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
@@ -884,12 +891,12 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page,
884 WARN_ON(!PageLocked(page)); 891 WARN_ON(!PageLocked(page));
885 set_page_extent_mapped(page); 892 set_page_extent_mapped(page);
886 893
887 lock_extent(em_tree, page_start, page_end, GFP_NOFS); 894 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
888 delalloc_start = page_start; 895 delalloc_start = page_start;
889 existing_delalloc = count_range_bits(&BTRFS_I(inode)->extent_tree, 896 existing_delalloc = count_range_bits(&BTRFS_I(inode)->io_tree,
890 &delalloc_start, page_end, 897 &delalloc_start, page_end,
891 PAGE_CACHE_SIZE, EXTENT_DELALLOC); 898 PAGE_CACHE_SIZE, EXTENT_DELALLOC);
892 set_extent_delalloc(&BTRFS_I(inode)->extent_tree, page_start, 899 set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
893 page_end, GFP_NOFS); 900 page_end, GFP_NOFS);
894 901
895 spin_lock(&root->fs_info->delalloc_lock); 902 spin_lock(&root->fs_info->delalloc_lock);
@@ -903,7 +910,7 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page,
903 kunmap(page); 910 kunmap(page);
904 } 911 }
905 set_page_dirty(page); 912 set_page_dirty(page);
906 unlock_extent(em_tree, page_start, page_end, GFP_NOFS); 913 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
907 914
908 return ret; 915 return ret;
909} 916}
@@ -961,7 +968,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
961 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) { 968 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
962 struct btrfs_trans_handle *trans; 969 struct btrfs_trans_handle *trans;
963 struct btrfs_root *root = BTRFS_I(inode)->root; 970 struct btrfs_root *root = BTRFS_I(inode)->root;
964 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 971 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
965 972
966 u64 mask = root->sectorsize - 1; 973 u64 mask = root->sectorsize - 1;
967 u64 pos = (inode->i_size + mask) & ~mask; 974 u64 pos = (inode->i_size + mask) & ~mask;
@@ -986,7 +993,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
986 993
987 btrfs_truncate_page(inode->i_mapping, inode->i_size); 994 btrfs_truncate_page(inode->i_mapping, inode->i_size);
988 995
989 lock_extent(em_tree, pos, block_end, GFP_NOFS); 996 lock_extent(io_tree, pos, block_end, GFP_NOFS);
990 hole_size = block_end - hole_start; 997 hole_size = block_end - hole_start;
991 998
992 mutex_lock(&root->fs_info->fs_mutex); 999 mutex_lock(&root->fs_info->fs_mutex);
@@ -1001,11 +1008,13 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
1001 inode->i_ino, 1008 inode->i_ino,
1002 hole_start, 0, 0, 1009 hole_start, 0, 0,
1003 hole_size); 1010 hole_size);
1011 btrfs_drop_extent_cache(inode, hole_start,
1012 hole_size - 1);
1004 btrfs_check_file(root, inode); 1013 btrfs_check_file(root, inode);
1005 } 1014 }
1006 btrfs_end_transaction(trans, root); 1015 btrfs_end_transaction(trans, root);
1007 mutex_unlock(&root->fs_info->fs_mutex); 1016 mutex_unlock(&root->fs_info->fs_mutex);
1008 unlock_extent(em_tree, pos, block_end, GFP_NOFS); 1017 unlock_extent(io_tree, pos, block_end, GFP_NOFS);
1009 if (err) 1018 if (err)
1010 return err; 1019 return err;
1011 } 1020 }
@@ -1189,7 +1198,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
1189 struct btrfs_iget_args *args = p; 1198 struct btrfs_iget_args *args = p;
1190 inode->i_ino = args->ino; 1199 inode->i_ino = args->ino;
1191 BTRFS_I(inode)->root = args->root; 1200 BTRFS_I(inode)->root = args->root;
1192 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, 1201 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1202 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1193 inode->i_mapping, GFP_NOFS); 1203 inode->i_mapping, GFP_NOFS);
1194 return 0; 1204 return 0;
1195} 1205}
@@ -1485,7 +1495,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
1485 if (!inode) 1495 if (!inode)
1486 return ERR_PTR(-ENOMEM); 1496 return ERR_PTR(-ENOMEM);
1487 1497
1488 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, 1498 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1499 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1489 inode->i_mapping, GFP_NOFS); 1500 inode->i_mapping, GFP_NOFS);
1490 BTRFS_I(inode)->root = root; 1501 BTRFS_I(inode)->root = root;
1491 1502
@@ -1672,9 +1683,10 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
1672 inode->i_mapping->a_ops = &btrfs_aops; 1683 inode->i_mapping->a_ops = &btrfs_aops;
1673 inode->i_fop = &btrfs_file_operations; 1684 inode->i_fop = &btrfs_file_operations;
1674 inode->i_op = &btrfs_file_inode_operations; 1685 inode->i_op = &btrfs_file_inode_operations;
1675 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, 1686 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1687 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1676 inode->i_mapping, GFP_NOFS); 1688 inode->i_mapping, GFP_NOFS);
1677 BTRFS_I(inode)->extent_tree.ops = &btrfs_extent_map_ops; 1689 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1678 } 1690 }
1679 dir->i_sb->s_dirt = 1; 1691 dir->i_sb->s_dirt = 1;
1680 btrfs_update_inode_block_group(trans, inode); 1692 btrfs_update_inode_block_group(trans, inode);
@@ -1816,7 +1828,7 @@ out_unlock:
1816} 1828}
1817 1829
1818struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 1830struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
1819 size_t page_offset, u64 start, u64 end, 1831 size_t page_offset, u64 start, u64 len,
1820 int create) 1832 int create)
1821{ 1833{
1822 int ret; 1834 int ret;
@@ -1826,7 +1838,6 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
1826 u64 extent_end = 0; 1838 u64 extent_end = 0;
1827 u64 objectid = inode->i_ino; 1839 u64 objectid = inode->i_ino;
1828 u32 found_type; 1840 u32 found_type;
1829 int failed_insert = 0;
1830 struct btrfs_path *path; 1841 struct btrfs_path *path;
1831 struct btrfs_root *root = BTRFS_I(inode)->root; 1842 struct btrfs_root *root = BTRFS_I(inode)->root;
1832 struct btrfs_file_extent_item *item; 1843 struct btrfs_file_extent_item *item;
@@ -1834,6 +1845,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
1834 struct btrfs_key found_key; 1845 struct btrfs_key found_key;
1835 struct extent_map *em = NULL; 1846 struct extent_map *em = NULL;
1836 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 1847 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1848 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1837 struct btrfs_trans_handle *trans = NULL; 1849 struct btrfs_trans_handle *trans = NULL;
1838 1850
1839 path = btrfs_alloc_path(); 1851 path = btrfs_alloc_path();
@@ -1841,24 +1853,26 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
1841 mutex_lock(&root->fs_info->fs_mutex); 1853 mutex_lock(&root->fs_info->fs_mutex);
1842 1854
1843again: 1855again:
1844 em = lookup_extent_mapping(em_tree, start, end); 1856 spin_lock(&em_tree->lock);
1857 em = lookup_extent_mapping(em_tree, start, len);
1858 spin_unlock(&em_tree->lock);
1859
1845 if (em) { 1860 if (em) {
1846 if (em->start > start) { 1861 if (em->start > start) {
1847 printk("get_extent start %Lu em start %Lu\n", 1862 printk("get_extent lookup [%Lu %Lu] em [%Lu %Lu]\n",
1848 start, em->start); 1863 start, len, em->start, em->len);
1849 WARN_ON(1); 1864 WARN_ON(1);
1850 } 1865 }
1851 goto out; 1866 goto out;
1852 } 1867 }
1868 em = alloc_extent_map(GFP_NOFS);
1853 if (!em) { 1869 if (!em) {
1854 em = alloc_extent_map(GFP_NOFS); 1870 err = -ENOMEM;
1855 if (!em) { 1871 goto out;
1856 err = -ENOMEM;
1857 goto out;
1858 }
1859 em->start = EXTENT_MAP_HOLE;
1860 em->end = EXTENT_MAP_HOLE;
1861 } 1872 }
1873
1874 em->start = EXTENT_MAP_HOLE;
1875 em->len = (u64)-1;
1862 em->bdev = inode->i_sb->s_bdev; 1876 em->bdev = inode->i_sb->s_bdev;
1863 ret = btrfs_lookup_file_extent(trans, root, path, 1877 ret = btrfs_lookup_file_extent(trans, root, path,
1864 objectid, start, trans != NULL); 1878 objectid, start, trans != NULL);
@@ -1893,28 +1907,25 @@ again:
1893 if (start < extent_start || start >= extent_end) { 1907 if (start < extent_start || start >= extent_end) {
1894 em->start = start; 1908 em->start = start;
1895 if (start < extent_start) { 1909 if (start < extent_start) {
1896 if (end < extent_start) 1910 if (start + len <= extent_start)
1897 goto not_found; 1911 goto not_found;
1898 em->end = extent_end - 1; 1912 em->len = extent_end - extent_start;
1899 } else { 1913 } else {
1900 em->end = end; 1914 em->len = len;
1901 } 1915 }
1902 goto not_found_em; 1916 goto not_found_em;
1903 } 1917 }
1904 bytenr = btrfs_file_extent_disk_bytenr(leaf, item); 1918 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
1905 if (bytenr == 0) { 1919 if (bytenr == 0) {
1906 em->start = extent_start; 1920 em->start = extent_start;
1907 em->end = extent_end - 1; 1921 em->len = extent_end - extent_start;
1908 em->block_start = EXTENT_MAP_HOLE; 1922 em->block_start = EXTENT_MAP_HOLE;
1909 em->block_end = EXTENT_MAP_HOLE;
1910 goto insert; 1923 goto insert;
1911 } 1924 }
1912 bytenr += btrfs_file_extent_offset(leaf, item); 1925 bytenr += btrfs_file_extent_offset(leaf, item);
1913 em->block_start = bytenr; 1926 em->block_start = bytenr;
1914 em->block_end = em->block_start +
1915 btrfs_file_extent_num_bytes(leaf, item) - 1;
1916 em->start = extent_start; 1927 em->start = extent_start;
1917 em->end = extent_end - 1; 1928 em->len = extent_end - extent_start;
1918 goto insert; 1929 goto insert;
1919 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 1930 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
1920 unsigned long ptr; 1931 unsigned long ptr;
@@ -1925,25 +1936,24 @@ again:
1925 1936
1926 size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf, 1937 size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
1927 path->slots[0])); 1938 path->slots[0]));
1928 extent_end = (extent_start + size - 1) | 1939 extent_end = (extent_start + size + root->sectorsize - 1) &
1929 ((u64)root->sectorsize - 1); 1940 ~((u64)root->sectorsize - 1);
1930 if (start < extent_start || start >= extent_end) { 1941 if (start < extent_start || start >= extent_end) {
1931 em->start = start; 1942 em->start = start;
1932 if (start < extent_start) { 1943 if (start < extent_start) {
1933 if (end < extent_start) 1944 if (start + len <= extent_start)
1934 goto not_found; 1945 goto not_found;
1935 em->end = extent_end; 1946 em->len = extent_end - extent_start;
1936 } else { 1947 } else {
1937 em->end = end; 1948 em->len = len;
1938 } 1949 }
1939 goto not_found_em; 1950 goto not_found_em;
1940 } 1951 }
1941 em->block_start = EXTENT_MAP_INLINE; 1952 em->block_start = EXTENT_MAP_INLINE;
1942 em->block_end = EXTENT_MAP_INLINE;
1943 1953
1944 if (!page) { 1954 if (!page) {
1945 em->start = extent_start; 1955 em->start = extent_start;
1946 em->end = extent_start + size - 1; 1956 em->len = size;
1947 goto out; 1957 goto out;
1948 } 1958 }
1949 1959
@@ -1952,8 +1962,7 @@ again:
1952 copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset, 1962 copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset,
1953 size - extent_offset); 1963 size - extent_offset);
1954 em->start = extent_start + extent_offset; 1964 em->start = extent_start + extent_offset;
1955 em->end = (em->start + copy_size -1) | 1965 em->len = copy_size;
1956 ((u64)root->sectorsize -1);
1957 map = kmap(page); 1966 map = kmap(page);
1958 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 1967 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
1959 if (create == 0 && !PageUptodate(page)) { 1968 if (create == 0 && !PageUptodate(page)) {
@@ -1974,7 +1983,8 @@ again:
1974 btrfs_mark_buffer_dirty(leaf); 1983 btrfs_mark_buffer_dirty(leaf);
1975 } 1984 }
1976 kunmap(page); 1985 kunmap(page);
1977 set_extent_uptodate(em_tree, em->start, em->end, GFP_NOFS); 1986 set_extent_uptodate(io_tree, em->start,
1987 extent_map_end(em) - 1, GFP_NOFS);
1978 goto insert; 1988 goto insert;
1979 } else { 1989 } else {
1980 printk("unkknown found_type %d\n", found_type); 1990 printk("unkknown found_type %d\n", found_type);
@@ -1982,33 +1992,29 @@ again:
1982 } 1992 }
1983not_found: 1993not_found:
1984 em->start = start; 1994 em->start = start;
1985 em->end = end; 1995 em->len = len;
1986not_found_em: 1996not_found_em:
1987 em->block_start = EXTENT_MAP_HOLE; 1997 em->block_start = EXTENT_MAP_HOLE;
1988 em->block_end = EXTENT_MAP_HOLE;
1989insert: 1998insert:
1990 btrfs_release_path(root, path); 1999 btrfs_release_path(root, path);
1991 if (em->start > start || em->end < start) { 2000 if (em->start > start || extent_map_end(em) <= start) {
1992 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->end, start, end); 2001 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
1993 err = -EIO; 2002 err = -EIO;
1994 goto out; 2003 goto out;
1995 } 2004 }
2005
2006 err = 0;
2007 spin_lock(&em_tree->lock);
1996 ret = add_extent_mapping(em_tree, em); 2008 ret = add_extent_mapping(em_tree, em);
1997 if (ret == -EEXIST) { 2009 if (ret == -EEXIST) {
1998 free_extent_map(em); 2010 free_extent_map(em);
1999 em = NULL; 2011 em = lookup_extent_mapping(em_tree, start, len);
2000 if (0 && failed_insert == 1) { 2012 if (!em) {
2001 btrfs_drop_extent_cache(inode, start, end);
2002 }
2003 failed_insert++;
2004 if (failed_insert > 5) {
2005 printk("failing to insert %Lu %Lu\n", start, end);
2006 err = -EIO; 2013 err = -EIO;
2007 goto out; 2014 printk("failing to insert %Lu %Lu\n", start, len);
2008 } 2015 }
2009 goto again;
2010 } 2016 }
2011 err = 0; 2017 spin_unlock(&em_tree->lock);
2012out: 2018out:
2013 btrfs_free_path(path); 2019 btrfs_free_path(path);
2014 if (trans) { 2020 if (trans) {
@@ -2032,14 +2038,14 @@ static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
2032 2038
2033int btrfs_readpage(struct file *file, struct page *page) 2039int btrfs_readpage(struct file *file, struct page *page)
2034{ 2040{
2035 struct extent_map_tree *tree; 2041 struct extent_io_tree *tree;
2036 tree = &BTRFS_I(page->mapping->host)->extent_tree; 2042 tree = &BTRFS_I(page->mapping->host)->io_tree;
2037 return extent_read_full_page(tree, page, btrfs_get_extent); 2043 return extent_read_full_page(tree, page, btrfs_get_extent);
2038} 2044}
2039 2045
2040static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 2046static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2041{ 2047{
2042 struct extent_map_tree *tree; 2048 struct extent_io_tree *tree;
2043 2049
2044 2050
2045 if (current->flags & PF_MEMALLOC) { 2051 if (current->flags & PF_MEMALLOC) {
@@ -2047,15 +2053,15 @@ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2047 unlock_page(page); 2053 unlock_page(page);
2048 return 0; 2054 return 0;
2049 } 2055 }
2050 tree = &BTRFS_I(page->mapping->host)->extent_tree; 2056 tree = &BTRFS_I(page->mapping->host)->io_tree;
2051 return extent_write_full_page(tree, page, btrfs_get_extent, wbc); 2057 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2052} 2058}
2053 2059
2054static int btrfs_writepages(struct address_space *mapping, 2060static int btrfs_writepages(struct address_space *mapping,
2055 struct writeback_control *wbc) 2061 struct writeback_control *wbc)
2056{ 2062{
2057 struct extent_map_tree *tree; 2063 struct extent_io_tree *tree;
2058 tree = &BTRFS_I(mapping->host)->extent_tree; 2064 tree = &BTRFS_I(mapping->host)->io_tree;
2059 return extent_writepages(tree, mapping, btrfs_get_extent, wbc); 2065 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
2060} 2066}
2061 2067
@@ -2063,19 +2069,21 @@ static int
2063btrfs_readpages(struct file *file, struct address_space *mapping, 2069btrfs_readpages(struct file *file, struct address_space *mapping,
2064 struct list_head *pages, unsigned nr_pages) 2070 struct list_head *pages, unsigned nr_pages)
2065{ 2071{
2066 struct extent_map_tree *tree; 2072 struct extent_io_tree *tree;
2067 tree = &BTRFS_I(mapping->host)->extent_tree; 2073 tree = &BTRFS_I(mapping->host)->io_tree;
2068 return extent_readpages(tree, mapping, pages, nr_pages, 2074 return extent_readpages(tree, mapping, pages, nr_pages,
2069 btrfs_get_extent); 2075 btrfs_get_extent);
2070} 2076}
2071 2077
2072static int btrfs_releasepage(struct page *page, gfp_t unused_gfp_flags) 2078static int btrfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
2073{ 2079{
2074 struct extent_map_tree *tree; 2080 struct extent_io_tree *tree;
2081 struct extent_map_tree *map;
2075 int ret; 2082 int ret;
2076 2083
2077 tree = &BTRFS_I(page->mapping->host)->extent_tree; 2084 tree = &BTRFS_I(page->mapping->host)->io_tree;
2078 ret = try_release_extent_mapping(tree, page); 2085 map = &BTRFS_I(page->mapping->host)->extent_tree;
2086 ret = try_release_extent_mapping(map, tree, page);
2079 if (ret == 1) { 2087 if (ret == 1) {
2080 ClearPagePrivate(page); 2088 ClearPagePrivate(page);
2081 set_page_private(page, 0); 2089 set_page_private(page, 0);
@@ -2086,9 +2094,9 @@ static int btrfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
2086 2094
2087static void btrfs_invalidatepage(struct page *page, unsigned long offset) 2095static void btrfs_invalidatepage(struct page *page, unsigned long offset)
2088{ 2096{
2089 struct extent_map_tree *tree; 2097 struct extent_io_tree *tree;
2090 2098
2091 tree = &BTRFS_I(page->mapping->host)->extent_tree; 2099 tree = &BTRFS_I(page->mapping->host)->io_tree;
2092 extent_invalidatepage(tree, page, offset); 2100 extent_invalidatepage(tree, page, offset);
2093 btrfs_releasepage(page, GFP_NOFS); 2101 btrfs_releasepage(page, GFP_NOFS);
2094} 2102}
@@ -2374,7 +2382,7 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
2374int btrfs_defrag_file(struct file *file) { 2382int btrfs_defrag_file(struct file *file) {
2375 struct inode *inode = fdentry(file)->d_inode; 2383 struct inode *inode = fdentry(file)->d_inode;
2376 struct btrfs_root *root = BTRFS_I(inode)->root; 2384 struct btrfs_root *root = BTRFS_I(inode)->root;
2377 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 2385 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2378 struct page *page; 2386 struct page *page;
2379 unsigned long last_index; 2387 unsigned long last_index;
2380 unsigned long ra_index = 0; 2388 unsigned long ra_index = 0;
@@ -2414,13 +2422,13 @@ int btrfs_defrag_file(struct file *file) {
2414 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 2422 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2415 page_end = page_start + PAGE_CACHE_SIZE - 1; 2423 page_end = page_start + PAGE_CACHE_SIZE - 1;
2416 2424
2417 lock_extent(em_tree, page_start, page_end, GFP_NOFS); 2425 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2418 delalloc_start = page_start; 2426 delalloc_start = page_start;
2419 existing_delalloc = 2427 existing_delalloc =
2420 count_range_bits(&BTRFS_I(inode)->extent_tree, 2428 count_range_bits(&BTRFS_I(inode)->io_tree,
2421 &delalloc_start, page_end, 2429 &delalloc_start, page_end,
2422 PAGE_CACHE_SIZE, EXTENT_DELALLOC); 2430 PAGE_CACHE_SIZE, EXTENT_DELALLOC);
2423 set_extent_delalloc(em_tree, page_start, 2431 set_extent_delalloc(io_tree, page_start,
2424 page_end, GFP_NOFS); 2432 page_end, GFP_NOFS);
2425 2433
2426 spin_lock(&root->fs_info->delalloc_lock); 2434 spin_lock(&root->fs_info->delalloc_lock);
@@ -2428,7 +2436,7 @@ int btrfs_defrag_file(struct file *file) {
2428 existing_delalloc; 2436 existing_delalloc;
2429 spin_unlock(&root->fs_info->delalloc_lock); 2437 spin_unlock(&root->fs_info->delalloc_lock);
2430 2438
2431 unlock_extent(em_tree, page_start, page_end, GFP_NOFS); 2439 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2432 set_page_dirty(page); 2440 set_page_dirty(page);
2433 unlock_page(page); 2441 unlock_page(page);
2434 page_cache_release(page); 2442 page_cache_release(page);
@@ -2842,9 +2850,10 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
2842 inode->i_mapping->a_ops = &btrfs_aops; 2850 inode->i_mapping->a_ops = &btrfs_aops;
2843 inode->i_fop = &btrfs_file_operations; 2851 inode->i_fop = &btrfs_file_operations;
2844 inode->i_op = &btrfs_file_inode_operations; 2852 inode->i_op = &btrfs_file_inode_operations;
2845 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, 2853 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2854 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2846 inode->i_mapping, GFP_NOFS); 2855 inode->i_mapping, GFP_NOFS);
2847 BTRFS_I(inode)->extent_tree.ops = &btrfs_extent_map_ops; 2856 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2848 } 2857 }
2849 dir->i_sb->s_dirt = 1; 2858 dir->i_sb->s_dirt = 1;
2850 btrfs_update_inode_block_group(trans, inode); 2859 btrfs_update_inode_block_group(trans, inode);
@@ -2934,7 +2943,7 @@ static struct file_operations btrfs_dir_file_operations = {
2934#endif 2943#endif
2935}; 2944};
2936 2945
2937static struct extent_map_ops btrfs_extent_map_ops = { 2946static struct extent_io_ops btrfs_extent_io_ops = {
2938 .fill_delalloc = run_delalloc_range, 2947 .fill_delalloc = run_delalloc_range,
2939 .writepage_io_hook = btrfs_writepage_io_hook, 2948 .writepage_io_hook = btrfs_writepage_io_hook,
2940 .readpage_io_hook = btrfs_readpage_io_hook, 2949 .readpage_io_hook = btrfs_readpage_io_hook,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8b52c69fda2e..f8a1016600b1 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -468,10 +468,15 @@ static int __init init_btrfs_fs(void)
468 err = btrfs_init_cachep(); 468 err = btrfs_init_cachep();
469 if (err) 469 if (err)
470 goto free_transaction_sys; 470 goto free_transaction_sys;
471 err = extent_map_init(); 471
472 err = extent_io_init();
472 if (err) 473 if (err)
473 goto free_cachep; 474 goto free_cachep;
474 475
476 err = extent_map_init();
477 if (err)
478 goto free_extent_io;
479
475 err = register_filesystem(&btrfs_fs_type); 480 err = register_filesystem(&btrfs_fs_type);
476 if (err) 481 if (err)
477 goto free_extent_map; 482 goto free_extent_map;
@@ -479,6 +484,8 @@ static int __init init_btrfs_fs(void)
479 484
480free_extent_map: 485free_extent_map:
481 extent_map_exit(); 486 extent_map_exit();
487free_extent_io:
488 extent_io_exit();
482free_cachep: 489free_cachep:
483 btrfs_destroy_cachep(); 490 btrfs_destroy_cachep();
484free_transaction_sys: 491free_transaction_sys:
@@ -492,6 +499,7 @@ static void __exit exit_btrfs_fs(void)
492 btrfs_exit_transaction_sys(); 499 btrfs_exit_transaction_sys();
493 btrfs_destroy_cachep(); 500 btrfs_destroy_cachep();
494 extent_map_exit(); 501 extent_map_exit();
502 extent_io_exit();
495 unregister_filesystem(&btrfs_fs_type); 503 unregister_filesystem(&btrfs_fs_type);
496 btrfs_exit_sysfs(); 504 btrfs_exit_sysfs();
497} 505}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 163c01a24498..b4a1bc62a784 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -70,7 +70,7 @@ static int join_transaction(struct btrfs_root *root)
70 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 70 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
71 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 71 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
72 btrfs_ordered_inode_tree_init(&cur_trans->ordered_inode_tree); 72 btrfs_ordered_inode_tree_init(&cur_trans->ordered_inode_tree);
73 extent_map_tree_init(&cur_trans->dirty_pages, 73 extent_io_tree_init(&cur_trans->dirty_pages,
74 root->fs_info->btree_inode->i_mapping, 74 root->fs_info->btree_inode->i_mapping,
75 GFP_NOFS); 75 GFP_NOFS);
76 } else { 76 } else {
@@ -153,7 +153,7 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
153 int ret; 153 int ret;
154 int err; 154 int err;
155 int werr = 0; 155 int werr = 0;
156 struct extent_map_tree *dirty_pages; 156 struct extent_io_tree *dirty_pages;
157 struct page *page; 157 struct page *page;
158 struct inode *btree_inode = root->fs_info->btree_inode; 158 struct inode *btree_inode = root->fs_info->btree_inode;
159 u64 start; 159 u64 start;
@@ -610,7 +610,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
610 struct btrfs_transaction *cur_trans; 610 struct btrfs_transaction *cur_trans;
611 struct btrfs_transaction *prev_trans = NULL; 611 struct btrfs_transaction *prev_trans = NULL;
612 struct list_head dirty_fs_roots; 612 struct list_head dirty_fs_roots;
613 struct extent_map_tree *pinned_copy; 613 struct extent_io_tree *pinned_copy;
614 DEFINE_WAIT(wait); 614 DEFINE_WAIT(wait);
615 int ret; 615 int ret;
616 616
@@ -639,7 +639,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
639 if (!pinned_copy) 639 if (!pinned_copy)
640 return -ENOMEM; 640 return -ENOMEM;
641 641
642 extent_map_tree_init(pinned_copy, 642 extent_io_tree_init(pinned_copy,
643 root->fs_info->btree_inode->i_mapping, GFP_NOFS); 643 root->fs_info->btree_inode->i_mapping, GFP_NOFS);
644 644
645 trans->transaction->in_commit = 1; 645 trans->transaction->in_commit = 1;
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index fd52e9b23922..c3172ddb3321 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -29,7 +29,7 @@ struct btrfs_transaction {
29 int use_count; 29 int use_count;
30 int commit_done; 30 int commit_done;
31 struct list_head list; 31 struct list_head list;
32 struct extent_map_tree dirty_pages; 32 struct extent_io_tree dirty_pages;
33 unsigned long start_time; 33 unsigned long start_time;
34 struct btrfs_ordered_inode_tree ordered_inode_tree; 34 struct btrfs_ordered_inode_tree ordered_inode_tree;
35 wait_queue_head_t writer_wait; 35 wait_queue_head_t writer_wait;