aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-01-24 16:13:08 -0500
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:03:59 -0400
commitd1310b2e0cd98eb1348553e69b73827b436dca7b (patch)
treef55e9c380df0f3a7e9fb792b2308b52b632b90aa /fs/btrfs/file.c
parent5f56406aabdf5444d040c5955effc665b1d0dbaf (diff)
Btrfs: Split the extent_map code into two parts
There is now extent_map for mapping offsets in the file to disk and extent_io for state tracking, IO submission and extent_bufers. The new extent_map code shifts from [start,end] pairs to [start,len], and pushes the locking out into the caller. This allows a few performance optimizations and is easier to use. A number of extent_map usage bugs were fixed, mostly with failing to remove extent_map entries when changing the file. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 1cd8c908811e..c5bb00f92396 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -233,8 +233,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
233 int err = 0; 233 int err = 0;
234 int i; 234 int i;
235 struct inode *inode = fdentry(file)->d_inode; 235 struct inode *inode = fdentry(file)->d_inode;
236 struct extent_map *em; 236 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
237 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
238 u64 hint_byte; 237 u64 hint_byte;
239 u64 num_bytes; 238 u64 num_bytes;
240 u64 start_pos; 239 u64 start_pos;
@@ -242,11 +241,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
242 u64 end_pos = pos + write_bytes; 241 u64 end_pos = pos + write_bytes;
243 u64 inline_size; 242 u64 inline_size;
244 loff_t isize = i_size_read(inode); 243 loff_t isize = i_size_read(inode);
245 em = alloc_extent_map(GFP_NOFS);
246 if (!em)
247 return -ENOMEM;
248
249 em->bdev = inode->i_sb->s_bdev;
250 244
251 start_pos = pos & ~((u64)root->sectorsize - 1); 245 start_pos = pos & ~((u64)root->sectorsize - 1);
252 num_bytes = (write_bytes + pos - start_pos + 246 num_bytes = (write_bytes + pos - start_pos +
@@ -254,7 +248,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
254 248
255 end_of_last_block = start_pos + num_bytes - 1; 249 end_of_last_block = start_pos + num_bytes - 1;
256 250
257 lock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS); 251 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
258 mutex_lock(&root->fs_info->fs_mutex); 252 mutex_lock(&root->fs_info->fs_mutex);
259 trans = btrfs_start_transaction(root, 1); 253 trans = btrfs_start_transaction(root, 1);
260 if (!trans) { 254 if (!trans) {
@@ -268,7 +262,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
268 if ((end_of_last_block & 4095) == 0) { 262 if ((end_of_last_block & 4095) == 0) {
269 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block); 263 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
270 } 264 }
271 set_extent_uptodate(em_tree, start_pos, end_of_last_block, GFP_NOFS); 265 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
272 266
273 /* FIXME...EIEIO, ENOSPC and more */ 267 /* FIXME...EIEIO, ENOSPC and more */
274 268
@@ -293,6 +287,8 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
293 inode->i_ino, 287 inode->i_ino,
294 last_pos_in_file, 288 last_pos_in_file,
295 0, 0, hole_size); 289 0, 0, hole_size);
290 btrfs_drop_extent_cache(inode, last_pos_in_file,
291 last_pos_in_file + hole_size -1);
296 btrfs_check_file(root, inode); 292 btrfs_check_file(root, inode);
297 } 293 }
298 if (err) 294 if (err)
@@ -320,12 +316,12 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
320 last_end += PAGE_CACHE_SIZE - 1; 316 last_end += PAGE_CACHE_SIZE - 1;
321 if (start_pos < isize) { 317 if (start_pos < isize) {
322 u64 delalloc_start = start_pos; 318 u64 delalloc_start = start_pos;
323 existing_delalloc = count_range_bits(em_tree, 319 existing_delalloc = count_range_bits(io_tree,
324 &delalloc_start, 320 &delalloc_start,
325 end_of_last_block, (u64)-1, 321 end_of_last_block, (u64)-1,
326 EXTENT_DELALLOC); 322 EXTENT_DELALLOC);
327 } 323 }
328 set_extent_delalloc(em_tree, start_pos, end_of_last_block, 324 set_extent_delalloc(io_tree, start_pos, end_of_last_block,
329 GFP_NOFS); 325 GFP_NOFS);
330 spin_lock(&root->fs_info->delalloc_lock); 326 spin_lock(&root->fs_info->delalloc_lock);
331 root->fs_info->delalloc_bytes += (end_of_last_block + 1 - 327 root->fs_info->delalloc_bytes += (end_of_last_block + 1 -
@@ -346,6 +342,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
346 inline_size -= start_pos; 342 inline_size -= start_pos;
347 err = insert_inline_extent(trans, root, inode, start_pos, 343 err = insert_inline_extent(trans, root, inode, start_pos,
348 inline_size, pages, 0, num_pages); 344 inline_size, pages, 0, num_pages);
345 btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
349 BUG_ON(err); 346 BUG_ON(err);
350 } 347 }
351 if (end_pos > isize) { 348 if (end_pos > isize) {
@@ -356,8 +353,7 @@ failed:
356 err = btrfs_end_transaction(trans, root); 353 err = btrfs_end_transaction(trans, root);
357out_unlock: 354out_unlock:
358 mutex_unlock(&root->fs_info->fs_mutex); 355 mutex_unlock(&root->fs_info->fs_mutex);
359 unlock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS); 356 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
360 free_extent_map(em);
361 return err; 357 return err;
362} 358}
363 359
@@ -367,10 +363,15 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
367 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 363 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
368 364
369 while(1) { 365 while(1) {
366 spin_lock(&em_tree->lock);
370 em = lookup_extent_mapping(em_tree, start, end); 367 em = lookup_extent_mapping(em_tree, start, end);
371 if (!em) 368 if (!em) {
369 spin_unlock(&em_tree->lock);
372 break; 370 break;
371 }
373 remove_extent_mapping(em_tree, em); 372 remove_extent_mapping(em_tree, em);
373 spin_unlock(&em_tree->lock);
374
374 /* once for us */ 375 /* once for us */
375 free_extent_map(em); 376 free_extent_map(em);
376 /* once for the tree*/ 377 /* once for the tree*/