aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fusionio.com>2013-10-07 22:11:09 -0400
committerChris Mason <chris.mason@fusionio.com>2013-10-10 21:27:56 -0400
commit7bf811a595a895b7a886dcf218d0d34f97df76dc (patch)
tree84ac740c10a366b012a3b5f0a4551fe944fbf1d1 /fs
parent4871c1588f92c6c13f4713a7009f25f217055807 (diff)
Btrfs: limit delalloc pages outside of find_delalloc_range
Liu fixed part of this problem and unfortunately I steered him in slightly the wrong direction and so didn't completely fix the problem. The problem is we limit the size of the delalloc range we are looking for to max bytes and then we try to lock that range. If we fail to lock the pages in that range we will shrink the max bytes to a single page and re loop. However if our first page is inside of the delalloc range then we will end up limiting the end of the range to a period before our first page. This is illustrated below [0 -------- delalloc range --------- 256mb] [page] So find_delalloc_range will return with delalloc_start as 0 and end as 128mb, and then we will notice that delalloc_start < *start and adjust it up, but not adjust delalloc_end up, so things go sideways. To fix this we need to not limit the max bytes in find_delalloc_range, but in find_lock_delalloc_range and that way we don't end up with this confusion. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent_io.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 43feb4663f5b..d8ea0cb200b4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1482,10 +1482,8 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1482 cur_start = state->end + 1; 1482 cur_start = state->end + 1;
1483 node = rb_next(node); 1483 node = rb_next(node);
1484 total_bytes += state->end - state->start + 1; 1484 total_bytes += state->end - state->start + 1;
1485 if (total_bytes >= max_bytes) { 1485 if (total_bytes >= max_bytes)
1486 *end = *start + max_bytes - 1;
1487 break; 1486 break;
1488 }
1489 if (!node) 1487 if (!node)
1490 break; 1488 break;
1491 } 1489 }
@@ -1627,10 +1625,9 @@ again:
1627 1625
1628 /* 1626 /*
1629 * make sure to limit the number of pages we try to lock down 1627 * make sure to limit the number of pages we try to lock down
1630 * if we're looping.
1631 */ 1628 */
1632 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) 1629 if (delalloc_end + 1 - delalloc_start > max_bytes)
1633 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; 1630 delalloc_end = delalloc_start + max_bytes - 1;
1634 1631
1635 /* step two, lock all the pages after the page that has start */ 1632 /* step two, lock all the pages after the page that has start */
1636 ret = lock_delalloc_pages(inode, locked_page, 1633 ret = lock_delalloc_pages(inode, locked_page,
@@ -1641,8 +1638,7 @@ again:
1641 */ 1638 */
1642 free_extent_state(cached_state); 1639 free_extent_state(cached_state);
1643 if (!loops) { 1640 if (!loops) {
1644 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1); 1641 max_bytes = PAGE_CACHE_SIZE;
1645 max_bytes = PAGE_CACHE_SIZE - offset;
1646 loops = 1; 1642 loops = 1;
1647 goto again; 1643 goto again;
1648 } else { 1644 } else {