aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-04-17 11:29:12 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:01 -0400
commit3b951516ed703af0f6d82053937655ad69b60864 (patch)
tree616e648b8683e6bb2996c7ce6bafb8f592244698 /fs/btrfs/file.c
parent7b859fe7cd383fa5e7e379884a748680d0af7bcc (diff)
Btrfs: Use the extent map cache to find the logical disk block during data retries
The data read retry code needs to find the logical disk block before it can resubmit new bios. But, finding this block isn't allowed to take the fs_mutex because that will deadlock with a number of different callers. This changes the retry code to use the extent map cache instead, but that requires the extent map cache to have the extent we're looking for. This is a problem because btrfs_drop_extent_cache just drops the entire extent instead of the little tiny part it is invalidating. The bulk of the code in this patch changes btrfs_drop_extent_cache to invalidate only a portion of the extent cache, and changes btrfs_get_extent to deal with the results. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c47
1 files changed, 46 insertions, 1 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9fbda6552069..3f5525f0834c 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -356,12 +356,23 @@ out_unlock:
356int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) 356int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
357{ 357{
358 struct extent_map *em; 358 struct extent_map *em;
359 struct extent_map *split = NULL;
360 struct extent_map *split2 = NULL;
359 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 361 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
360 u64 len = end - start + 1; 362 u64 len = end - start + 1;
363 int ret;
364 int testend = 1;
361 365
362 if (end == (u64)-1) 366 if (end == (u64)-1) {
363 len = (u64)-1; 367 len = (u64)-1;
368 testend = 0;
369 }
364 while(1) { 370 while(1) {
371 if (!split)
372 split = alloc_extent_map(GFP_NOFS);
373 if (!split2)
374 split2 = alloc_extent_map(GFP_NOFS);
375
365 spin_lock(&em_tree->lock); 376 spin_lock(&em_tree->lock);
366 em = lookup_extent_mapping(em_tree, start, len); 377 em = lookup_extent_mapping(em_tree, start, len);
367 if (!em) { 378 if (!em) {
@@ -369,6 +380,36 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
369 break; 380 break;
370 } 381 }
371 remove_extent_mapping(em_tree, em); 382 remove_extent_mapping(em_tree, em);
383
384 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
385 em->start < start) {
386 split->start = em->start;
387 split->len = start - em->start;
388 split->block_start = em->block_start;
389 split->bdev = em->bdev;
390 split->flags = em->flags;
391 ret = add_extent_mapping(em_tree, split);
392 BUG_ON(ret);
393 free_extent_map(split);
394 split = split2;
395 split2 = NULL;
396 }
397 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
398 testend && em->start + em->len > start + len) {
399 u64 diff = start + len - em->start;
400
401 split->start = start + len;
402 split->len = em->start + em->len - (start + len);
403 split->bdev = em->bdev;
404 split->flags = em->flags;
405
406 split->block_start = em->block_start + diff;
407
408 ret = add_extent_mapping(em_tree, split);
409 BUG_ON(ret);
410 free_extent_map(split);
411 split = NULL;
412 }
372 spin_unlock(&em_tree->lock); 413 spin_unlock(&em_tree->lock);
373 414
374 /* once for us */ 415 /* once for us */
@@ -376,6 +417,10 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
376 /* once for the tree*/ 417 /* once for the tree*/
377 free_extent_map(em); 418 free_extent_map(em);
378 } 419 }
420 if (split)
421 free_extent_map(split);
422 if (split2)
423 free_extent_map(split2);
379 return 0; 424 return 0;
380} 425}
381 426