aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2013-05-21 23:17:23 -0400
committerTheodore Ts'o <tytso@mit.edu>2013-05-21 23:17:23 -0400
commitd47992f86b307985b3215bcf141d56d1849d71df (patch)
treee1ae47bd19185371462c5a273c15276534447349 /fs/buffer.c
parentc7788792a5e7b0d5d7f96d0766b4cb6112d47d75 (diff)
mm: change invalidatepage prototype to accept length
Currently there is no way to truncate partial page where the end truncate point is not at the end of the page. This is because it was not needed and the functionality was enough for file system truncate operation to work properly. However more file systems now support punch hole feature and it can benefit from mm supporting truncating page just up to the certain point. Specifically, with this functionality truncate_inode_pages_range() can be changed so it supports truncating partial page at the end of the range (currently it will BUG_ON() if 'end' is not at the end of the page). This commit changes the invalidatepage() address space operation prototype to accept range to be invalidated and update all the instances for it. We also change the block_invalidatepage() in the same way and actually make a use of the new length argument implementing range invalidation. Actual file system implementations will follow except the file systems where the changes are really simple and should not change the behaviour in any way .Implementation for truncate_page_range() which will be able to accept page unaligned ranges will follow as well. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Hugh Dickins <hughd@google.com>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index d2a4d1bb2d57..f93392e2df12 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1454,7 +1454,8 @@ static void discard_buffer(struct buffer_head * bh)
1454 * block_invalidatepage - invalidate part or all of a buffer-backed page 1454 * block_invalidatepage - invalidate part or all of a buffer-backed page
1455 * 1455 *
1456 * @page: the page which is affected 1456 * @page: the page which is affected
1457 * @offset: the index of the truncation point 1457 * @offset: start of the range to invalidate
1458 * @length: length of the range to invalidate
1458 * 1459 *
1459 * block_invalidatepage() is called when all or part of the page has become 1460 * block_invalidatepage() is called when all or part of the page has become
1460 * invalidated by a truncate operation. 1461 * invalidated by a truncate operation.
@@ -1465,15 +1466,22 @@ static void discard_buffer(struct buffer_head * bh)
1465 * point. Because the caller is about to free (and possibly reuse) those 1466 * point. Because the caller is about to free (and possibly reuse) those
1466 * blocks on-disk. 1467 * blocks on-disk.
1467 */ 1468 */
1468void block_invalidatepage(struct page *page, unsigned long offset) 1469void block_invalidatepage(struct page *page, unsigned int offset,
1470 unsigned int length)
1469{ 1471{
1470 struct buffer_head *head, *bh, *next; 1472 struct buffer_head *head, *bh, *next;
1471 unsigned int curr_off = 0; 1473 unsigned int curr_off = 0;
1474 unsigned int stop = length + offset;
1472 1475
1473 BUG_ON(!PageLocked(page)); 1476 BUG_ON(!PageLocked(page));
1474 if (!page_has_buffers(page)) 1477 if (!page_has_buffers(page))
1475 goto out; 1478 goto out;
1476 1479
1480 /*
1481 * Check for overflow
1482 */
1483 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
1484
1477 head = page_buffers(page); 1485 head = page_buffers(page);
1478 bh = head; 1486 bh = head;
1479 do { 1487 do {
@@ -1481,6 +1489,12 @@ void block_invalidatepage(struct page *page, unsigned long offset)
1481 next = bh->b_this_page; 1489 next = bh->b_this_page;
1482 1490
1483 /* 1491 /*
1492 * Are we still fully in range ?
1493 */
1494 if (next_off > stop)
1495 goto out;
1496
1497 /*
1484 * is this block fully invalidated? 1498 * is this block fully invalidated?
1485 */ 1499 */
1486 if (offset <= curr_off) 1500 if (offset <= curr_off)
@@ -1501,6 +1515,7 @@ out:
1501} 1515}
1502EXPORT_SYMBOL(block_invalidatepage); 1516EXPORT_SYMBOL(block_invalidatepage);
1503 1517
1518
1504/* 1519/*
1505 * We attach and possibly dirty the buffers atomically wrt 1520 * We attach and possibly dirty the buffers atomically wrt
1506 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1521 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
@@ -2841,7 +2856,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2841 * they may have been added in ext3_writepage(). Make them 2856 * they may have been added in ext3_writepage(). Make them
2842 * freeable here, so the page does not leak. 2857 * freeable here, so the page does not leak.
2843 */ 2858 */
2844 do_invalidatepage(page, 0); 2859 do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
2845 unlock_page(page); 2860 unlock_page(page);
2846 return 0; /* don't care */ 2861 return 0; /* don't care */
2847 } 2862 }