aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2013-05-21 23:26:36 -0400
committerTheodore Ts'o <tytso@mit.edu>2013-05-21 23:26:36 -0400
commitd8c8900ac113d2b2b3d382acda198b4ae29b1b51 (patch)
tree7d5874b39bd61a2d31ff3dbf537cddd4b5b5e20a /fs/jbd
parentca99fdd26b450cfc1dbcb6b01e1753ee5e6c88e0 (diff)
jbd: change journal_invalidatepage() to accept length
->invalidatepage() aop now accepts range to invalidate so we can make use of it in journal_invalidatepage() and all the users in ext3 file system. Also update ext3 trace point to print out length argument. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Reviewed-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/jbd')
-rw-r--r--fs/jbd/transaction.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index e3e255c0a509..be0c39b66fe0 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -2019,16 +2019,20 @@ zap_buffer_unlocked:
2019 * void journal_invalidatepage() - invalidate a journal page 2019 * void journal_invalidatepage() - invalidate a journal page
2020 * @journal: journal to use for flush 2020 * @journal: journal to use for flush
2021 * @page: page to flush 2021 * @page: page to flush
2022 * @offset: length of page to invalidate. 2022 * @offset: offset of the range to invalidate
2023 * @length: length of the range to invalidate
2023 * 2024 *
2024 * Reap page buffers containing data after offset in page. 2025 * Reap page buffers containing data in specified range in page.
2025 */ 2026 */
2026void journal_invalidatepage(journal_t *journal, 2027void journal_invalidatepage(journal_t *journal,
2027 struct page *page, 2028 struct page *page,
2028 unsigned long offset) 2029 unsigned int offset,
2030 unsigned int length)
2029{ 2031{
2030 struct buffer_head *head, *bh, *next; 2032 struct buffer_head *head, *bh, *next;
2033 unsigned int stop = offset + length;
2031 unsigned int curr_off = 0; 2034 unsigned int curr_off = 0;
2035 int partial_page = (offset || length < PAGE_CACHE_SIZE);
2032 int may_free = 1; 2036 int may_free = 1;
2033 2037
2034 if (!PageLocked(page)) 2038 if (!PageLocked(page))
@@ -2036,6 +2040,8 @@ void journal_invalidatepage(journal_t *journal,
2036 if (!page_has_buffers(page)) 2040 if (!page_has_buffers(page))
2037 return; 2041 return;
2038 2042
2043 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
2044
2039 /* We will potentially be playing with lists other than just the 2045 /* We will potentially be playing with lists other than just the
2040 * data lists (especially for journaled data mode), so be 2046 * data lists (especially for journaled data mode), so be
2041 * cautious in our locking. */ 2047 * cautious in our locking. */
@@ -2045,11 +2051,14 @@ void journal_invalidatepage(journal_t *journal,
2045 unsigned int next_off = curr_off + bh->b_size; 2051 unsigned int next_off = curr_off + bh->b_size;
2046 next = bh->b_this_page; 2052 next = bh->b_this_page;
2047 2053
2054 if (next_off > stop)
2055 return;
2056
2048 if (offset <= curr_off) { 2057 if (offset <= curr_off) {
2049 /* This block is wholly outside the truncation point */ 2058 /* This block is wholly outside the truncation point */
2050 lock_buffer(bh); 2059 lock_buffer(bh);
2051 may_free &= journal_unmap_buffer(journal, bh, 2060 may_free &= journal_unmap_buffer(journal, bh,
2052 offset > 0); 2061 partial_page);
2053 unlock_buffer(bh); 2062 unlock_buffer(bh);
2054 } 2063 }
2055 curr_off = next_off; 2064 curr_off = next_off;
@@ -2057,7 +2066,7 @@ void journal_invalidatepage(journal_t *journal,
2057 2066
2058 } while (bh != head); 2067 } while (bh != head);
2059 2068
2060 if (!offset) { 2069 if (!partial_page) {
2061 if (may_free && try_to_free_buffers(page)) 2070 if (may_free && try_to_free_buffers(page))
2062 J_ASSERT(!page_has_buffers(page)); 2071 J_ASSERT(!page_has_buffers(page));
2063 } 2072 }