summaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /fs/ext4/inode.c
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c116
1 files changed, 58 insertions, 58 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index dab84a2530ff..8a43c683eef9 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1057,7 +1057,7 @@ int do_journal_get_write_access(handle_t *handle,
1057static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, 1057static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1058 get_block_t *get_block) 1058 get_block_t *get_block)
1059{ 1059{
1060 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1060 unsigned from = pos & (PAGE_SIZE - 1);
1061 unsigned to = from + len; 1061 unsigned to = from + len;
1062 struct inode *inode = page->mapping->host; 1062 struct inode *inode = page->mapping->host;
1063 unsigned block_start, block_end; 1063 unsigned block_start, block_end;
@@ -1069,15 +1069,15 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1069 bool decrypt = false; 1069 bool decrypt = false;
1070 1070
1071 BUG_ON(!PageLocked(page)); 1071 BUG_ON(!PageLocked(page));
1072 BUG_ON(from > PAGE_CACHE_SIZE); 1072 BUG_ON(from > PAGE_SIZE);
1073 BUG_ON(to > PAGE_CACHE_SIZE); 1073 BUG_ON(to > PAGE_SIZE);
1074 BUG_ON(from > to); 1074 BUG_ON(from > to);
1075 1075
1076 if (!page_has_buffers(page)) 1076 if (!page_has_buffers(page))
1077 create_empty_buffers(page, blocksize, 0); 1077 create_empty_buffers(page, blocksize, 0);
1078 head = page_buffers(page); 1078 head = page_buffers(page);
1079 bbits = ilog2(blocksize); 1079 bbits = ilog2(blocksize);
1080 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1080 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1081 1081
1082 for (bh = head, block_start = 0; bh != head || !block_start; 1082 for (bh = head, block_start = 0; bh != head || !block_start;
1083 block++, block_start = block_end, bh = bh->b_this_page) { 1083 block++, block_start = block_end, bh = bh->b_this_page) {
@@ -1159,8 +1159,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
1159 * we allocate blocks but write fails for some reason 1159 * we allocate blocks but write fails for some reason
1160 */ 1160 */
1161 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1161 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1162 index = pos >> PAGE_CACHE_SHIFT; 1162 index = pos >> PAGE_SHIFT;
1163 from = pos & (PAGE_CACHE_SIZE - 1); 1163 from = pos & (PAGE_SIZE - 1);
1164 to = from + len; 1164 to = from + len;
1165 1165
1166 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1166 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -1188,7 +1188,7 @@ retry_grab:
1188retry_journal: 1188retry_journal:
1189 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1189 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1190 if (IS_ERR(handle)) { 1190 if (IS_ERR(handle)) {
1191 page_cache_release(page); 1191 put_page(page);
1192 return PTR_ERR(handle); 1192 return PTR_ERR(handle);
1193 } 1193 }
1194 1194
@@ -1196,7 +1196,7 @@ retry_journal:
1196 if (page->mapping != mapping) { 1196 if (page->mapping != mapping) {
1197 /* The page got truncated from under us */ 1197 /* The page got truncated from under us */
1198 unlock_page(page); 1198 unlock_page(page);
1199 page_cache_release(page); 1199 put_page(page);
1200 ext4_journal_stop(handle); 1200 ext4_journal_stop(handle);
1201 goto retry_grab; 1201 goto retry_grab;
1202 } 1202 }
@@ -1252,7 +1252,7 @@ retry_journal:
1252 if (ret == -ENOSPC && 1252 if (ret == -ENOSPC &&
1253 ext4_should_retry_alloc(inode->i_sb, &retries)) 1253 ext4_should_retry_alloc(inode->i_sb, &retries))
1254 goto retry_journal; 1254 goto retry_journal;
1255 page_cache_release(page); 1255 put_page(page);
1256 return ret; 1256 return ret;
1257 } 1257 }
1258 *pagep = page; 1258 *pagep = page;
@@ -1295,7 +1295,7 @@ static int ext4_write_end(struct file *file,
1295 ret = ext4_jbd2_file_inode(handle, inode); 1295 ret = ext4_jbd2_file_inode(handle, inode);
1296 if (ret) { 1296 if (ret) {
1297 unlock_page(page); 1297 unlock_page(page);
1298 page_cache_release(page); 1298 put_page(page);
1299 goto errout; 1299 goto errout;
1300 } 1300 }
1301 } 1301 }
@@ -1315,7 +1315,7 @@ static int ext4_write_end(struct file *file,
1315 */ 1315 */
1316 i_size_changed = ext4_update_inode_size(inode, pos + copied); 1316 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1317 unlock_page(page); 1317 unlock_page(page);
1318 page_cache_release(page); 1318 put_page(page);
1319 1319
1320 if (old_size < pos) 1320 if (old_size < pos)
1321 pagecache_isize_extended(inode, old_size, pos); 1321 pagecache_isize_extended(inode, old_size, pos);
@@ -1399,7 +1399,7 @@ static int ext4_journalled_write_end(struct file *file,
1399 int size_changed = 0; 1399 int size_changed = 0;
1400 1400
1401 trace_ext4_journalled_write_end(inode, pos, len, copied); 1401 trace_ext4_journalled_write_end(inode, pos, len, copied);
1402 from = pos & (PAGE_CACHE_SIZE - 1); 1402 from = pos & (PAGE_SIZE - 1);
1403 to = from + len; 1403 to = from + len;
1404 1404
1405 BUG_ON(!ext4_handle_valid(handle)); 1405 BUG_ON(!ext4_handle_valid(handle));
@@ -1423,7 +1423,7 @@ static int ext4_journalled_write_end(struct file *file,
1423 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1423 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1424 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1424 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1425 unlock_page(page); 1425 unlock_page(page);
1426 page_cache_release(page); 1426 put_page(page);
1427 1427
1428 if (old_size < pos) 1428 if (old_size < pos)
1429 pagecache_isize_extended(inode, old_size, pos); 1429 pagecache_isize_extended(inode, old_size, pos);
@@ -1537,7 +1537,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1537 int num_clusters; 1537 int num_clusters;
1538 ext4_fsblk_t lblk; 1538 ext4_fsblk_t lblk;
1539 1539
1540 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1540 BUG_ON(stop > PAGE_SIZE || stop < length);
1541 1541
1542 head = page_buffers(page); 1542 head = page_buffers(page);
1543 bh = head; 1543 bh = head;
@@ -1553,7 +1553,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1553 clear_buffer_delay(bh); 1553 clear_buffer_delay(bh);
1554 } else if (contiguous_blks) { 1554 } else if (contiguous_blks) {
1555 lblk = page->index << 1555 lblk = page->index <<
1556 (PAGE_CACHE_SHIFT - inode->i_blkbits); 1556 (PAGE_SHIFT - inode->i_blkbits);
1557 lblk += (curr_off >> inode->i_blkbits) - 1557 lblk += (curr_off >> inode->i_blkbits) -
1558 contiguous_blks; 1558 contiguous_blks;
1559 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1559 ext4_es_remove_extent(inode, lblk, contiguous_blks);
@@ -1563,7 +1563,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1563 } while ((bh = bh->b_this_page) != head); 1563 } while ((bh = bh->b_this_page) != head);
1564 1564
1565 if (contiguous_blks) { 1565 if (contiguous_blks) {
1566 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1566 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
1567 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; 1567 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1568 ext4_es_remove_extent(inode, lblk, contiguous_blks); 1568 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1569 } 1569 }
@@ -1572,7 +1572,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1572 * need to release the reserved space for that cluster. */ 1572 * need to release the reserved space for that cluster. */
1573 num_clusters = EXT4_NUM_B2C(sbi, to_release); 1573 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1574 while (num_clusters > 0) { 1574 while (num_clusters > 0) {
1575 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 1575 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
1576 ((num_clusters - 1) << sbi->s_cluster_bits); 1576 ((num_clusters - 1) << sbi->s_cluster_bits);
1577 if (sbi->s_cluster_ratio == 1 || 1577 if (sbi->s_cluster_ratio == 1 ||
1578 !ext4_find_delalloc_cluster(inode, lblk)) 1578 !ext4_find_delalloc_cluster(inode, lblk))
@@ -1619,8 +1619,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1619 end = mpd->next_page - 1; 1619 end = mpd->next_page - 1;
1620 if (invalidate) { 1620 if (invalidate) {
1621 ext4_lblk_t start, last; 1621 ext4_lblk_t start, last;
1622 start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1622 start = index << (PAGE_SHIFT - inode->i_blkbits);
1623 last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1623 last = end << (PAGE_SHIFT - inode->i_blkbits);
1624 ext4_es_remove_extent(inode, start, last - start + 1); 1624 ext4_es_remove_extent(inode, start, last - start + 1);
1625 } 1625 }
1626 1626
@@ -1636,7 +1636,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1636 BUG_ON(!PageLocked(page)); 1636 BUG_ON(!PageLocked(page));
1637 BUG_ON(PageWriteback(page)); 1637 BUG_ON(PageWriteback(page));
1638 if (invalidate) { 1638 if (invalidate) {
1639 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1639 block_invalidatepage(page, 0, PAGE_SIZE);
1640 ClearPageUptodate(page); 1640 ClearPageUptodate(page);
1641 } 1641 }
1642 unlock_page(page); 1642 unlock_page(page);
@@ -2007,10 +2007,10 @@ static int ext4_writepage(struct page *page,
2007 2007
2008 trace_ext4_writepage(page); 2008 trace_ext4_writepage(page);
2009 size = i_size_read(inode); 2009 size = i_size_read(inode);
2010 if (page->index == size >> PAGE_CACHE_SHIFT) 2010 if (page->index == size >> PAGE_SHIFT)
2011 len = size & ~PAGE_CACHE_MASK; 2011 len = size & ~PAGE_MASK;
2012 else 2012 else
2013 len = PAGE_CACHE_SIZE; 2013 len = PAGE_SIZE;
2014 2014
2015 page_bufs = page_buffers(page); 2015 page_bufs = page_buffers(page);
2016 /* 2016 /*
@@ -2034,7 +2034,7 @@ static int ext4_writepage(struct page *page,
2034 ext4_bh_delay_or_unwritten)) { 2034 ext4_bh_delay_or_unwritten)) {
2035 redirty_page_for_writepage(wbc, page); 2035 redirty_page_for_writepage(wbc, page);
2036 if ((current->flags & PF_MEMALLOC) || 2036 if ((current->flags & PF_MEMALLOC) ||
2037 (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { 2037 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2038 /* 2038 /*
2039 * For memory cleaning there's no point in writing only 2039 * For memory cleaning there's no point in writing only
2040 * some buffers. So just bail out. Warn if we came here 2040 * some buffers. So just bail out. Warn if we came here
@@ -2076,10 +2076,10 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2076 int err; 2076 int err;
2077 2077
2078 BUG_ON(page->index != mpd->first_page); 2078 BUG_ON(page->index != mpd->first_page);
2079 if (page->index == size >> PAGE_CACHE_SHIFT) 2079 if (page->index == size >> PAGE_SHIFT)
2080 len = size & ~PAGE_CACHE_MASK; 2080 len = size & ~PAGE_MASK;
2081 else 2081 else
2082 len = PAGE_CACHE_SIZE; 2082 len = PAGE_SIZE;
2083 clear_page_dirty_for_io(page); 2083 clear_page_dirty_for_io(page);
2084 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2084 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2085 if (!err) 2085 if (!err)
@@ -2213,7 +2213,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2213 int nr_pages, i; 2213 int nr_pages, i;
2214 struct inode *inode = mpd->inode; 2214 struct inode *inode = mpd->inode;
2215 struct buffer_head *head, *bh; 2215 struct buffer_head *head, *bh;
2216 int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; 2216 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2217 pgoff_t start, end; 2217 pgoff_t start, end;
2218 ext4_lblk_t lblk; 2218 ext4_lblk_t lblk;
2219 sector_t pblock; 2219 sector_t pblock;
@@ -2274,7 +2274,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2274 * supports blocksize < pagesize as we will try to 2274 * supports blocksize < pagesize as we will try to
2275 * convert potentially unmapped parts of inode. 2275 * convert potentially unmapped parts of inode.
2276 */ 2276 */
2277 mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; 2277 mpd->io_submit.io_end->size += PAGE_SIZE;
2278 /* Page fully mapped - let IO run! */ 2278 /* Page fully mapped - let IO run! */
2279 err = mpage_submit_page(mpd, page); 2279 err = mpage_submit_page(mpd, page);
2280 if (err < 0) { 2280 if (err < 0) {
@@ -2426,7 +2426,7 @@ update_disksize:
2426 * Update on-disk size after IO is submitted. Races with 2426 * Update on-disk size after IO is submitted. Races with
2427 * truncate are avoided by checking i_size under i_data_sem. 2427 * truncate are avoided by checking i_size under i_data_sem.
2428 */ 2428 */
2429 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; 2429 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2430 if (disksize > EXT4_I(inode)->i_disksize) { 2430 if (disksize > EXT4_I(inode)->i_disksize) {
2431 int err2; 2431 int err2;
2432 loff_t i_size; 2432 loff_t i_size;
@@ -2562,7 +2562,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2562 mpd->next_page = page->index + 1; 2562 mpd->next_page = page->index + 1;
2563 /* Add all dirty buffers to mpd */ 2563 /* Add all dirty buffers to mpd */
2564 lblk = ((ext4_lblk_t)page->index) << 2564 lblk = ((ext4_lblk_t)page->index) <<
2565 (PAGE_CACHE_SHIFT - blkbits); 2565 (PAGE_SHIFT - blkbits);
2566 head = page_buffers(page); 2566 head = page_buffers(page);
2567 err = mpage_process_page_bufs(mpd, head, head, lblk); 2567 err = mpage_process_page_bufs(mpd, head, head, lblk);
2568 if (err <= 0) 2568 if (err <= 0)
@@ -2647,7 +2647,7 @@ static int ext4_writepages(struct address_space *mapping,
2647 * We may need to convert up to one extent per block in 2647 * We may need to convert up to one extent per block in
2648 * the page and we may dirty the inode. 2648 * the page and we may dirty the inode.
2649 */ 2649 */
2650 rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); 2650 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
2651 } 2651 }
2652 2652
2653 /* 2653 /*
@@ -2678,8 +2678,8 @@ static int ext4_writepages(struct address_space *mapping,
2678 mpd.first_page = writeback_index; 2678 mpd.first_page = writeback_index;
2679 mpd.last_page = -1; 2679 mpd.last_page = -1;
2680 } else { 2680 } else {
2681 mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; 2681 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2682 mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; 2682 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2683 } 2683 }
2684 2684
2685 mpd.inode = inode; 2685 mpd.inode = inode;
@@ -2838,7 +2838,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2838 struct inode *inode = mapping->host; 2838 struct inode *inode = mapping->host;
2839 handle_t *handle; 2839 handle_t *handle;
2840 2840
2841 index = pos >> PAGE_CACHE_SHIFT; 2841 index = pos >> PAGE_SHIFT;
2842 2842
2843 if (ext4_nonda_switch(inode->i_sb)) { 2843 if (ext4_nonda_switch(inode->i_sb)) {
2844 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 2844 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
@@ -2881,7 +2881,7 @@ retry_journal:
2881 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2881 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2882 ext4_da_write_credits(inode, pos, len)); 2882 ext4_da_write_credits(inode, pos, len));
2883 if (IS_ERR(handle)) { 2883 if (IS_ERR(handle)) {
2884 page_cache_release(page); 2884 put_page(page);
2885 return PTR_ERR(handle); 2885 return PTR_ERR(handle);
2886 } 2886 }
2887 2887
@@ -2889,7 +2889,7 @@ retry_journal:
2889 if (page->mapping != mapping) { 2889 if (page->mapping != mapping) {
2890 /* The page got truncated from under us */ 2890 /* The page got truncated from under us */
2891 unlock_page(page); 2891 unlock_page(page);
2892 page_cache_release(page); 2892 put_page(page);
2893 ext4_journal_stop(handle); 2893 ext4_journal_stop(handle);
2894 goto retry_grab; 2894 goto retry_grab;
2895 } 2895 }
@@ -2917,7 +2917,7 @@ retry_journal:
2917 ext4_should_retry_alloc(inode->i_sb, &retries)) 2917 ext4_should_retry_alloc(inode->i_sb, &retries))
2918 goto retry_journal; 2918 goto retry_journal;
2919 2919
2920 page_cache_release(page); 2920 put_page(page);
2921 return ret; 2921 return ret;
2922 } 2922 }
2923 2923
@@ -2965,7 +2965,7 @@ static int ext4_da_write_end(struct file *file,
2965 len, copied, page, fsdata); 2965 len, copied, page, fsdata);
2966 2966
2967 trace_ext4_da_write_end(inode, pos, len, copied); 2967 trace_ext4_da_write_end(inode, pos, len, copied);
2968 start = pos & (PAGE_CACHE_SIZE - 1); 2968 start = pos & (PAGE_SIZE - 1);
2969 end = start + copied - 1; 2969 end = start + copied - 1;
2970 2970
2971 /* 2971 /*
@@ -3187,7 +3187,7 @@ static int __ext4_journalled_invalidatepage(struct page *page,
3187 /* 3187 /*
3188 * If it's a full truncate we just forget about the pending dirtying 3188 * If it's a full truncate we just forget about the pending dirtying
3189 */ 3189 */
3190 if (offset == 0 && length == PAGE_CACHE_SIZE) 3190 if (offset == 0 && length == PAGE_SIZE)
3191 ClearPageChecked(page); 3191 ClearPageChecked(page);
3192 3192
3193 return jbd2_journal_invalidatepage(journal, page, offset, length); 3193 return jbd2_journal_invalidatepage(journal, page, offset, length);
@@ -3556,8 +3556,8 @@ void ext4_set_aops(struct inode *inode)
3556static int __ext4_block_zero_page_range(handle_t *handle, 3556static int __ext4_block_zero_page_range(handle_t *handle,
3557 struct address_space *mapping, loff_t from, loff_t length) 3557 struct address_space *mapping, loff_t from, loff_t length)
3558{ 3558{
3559 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3559 ext4_fsblk_t index = from >> PAGE_SHIFT;
3560 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3560 unsigned offset = from & (PAGE_SIZE-1);
3561 unsigned blocksize, pos; 3561 unsigned blocksize, pos;
3562 ext4_lblk_t iblock; 3562 ext4_lblk_t iblock;
3563 struct inode *inode = mapping->host; 3563 struct inode *inode = mapping->host;
@@ -3565,14 +3565,14 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3565 struct page *page; 3565 struct page *page;
3566 int err = 0; 3566 int err = 0;
3567 3567
3568 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3568 page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3569 mapping_gfp_constraint(mapping, ~__GFP_FS)); 3569 mapping_gfp_constraint(mapping, ~__GFP_FS));
3570 if (!page) 3570 if (!page)
3571 return -ENOMEM; 3571 return -ENOMEM;
3572 3572
3573 blocksize = inode->i_sb->s_blocksize; 3573 blocksize = inode->i_sb->s_blocksize;
3574 3574
3575 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3575 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3576 3576
3577 if (!page_has_buffers(page)) 3577 if (!page_has_buffers(page))
3578 create_empty_buffers(page, blocksize, 0); 3578 create_empty_buffers(page, blocksize, 0);
@@ -3614,7 +3614,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3614 ext4_encrypted_inode(inode)) { 3614 ext4_encrypted_inode(inode)) {
3615 /* We expect the key to be set. */ 3615 /* We expect the key to be set. */
3616 BUG_ON(!ext4_has_encryption_key(inode)); 3616 BUG_ON(!ext4_has_encryption_key(inode));
3617 BUG_ON(blocksize != PAGE_CACHE_SIZE); 3617 BUG_ON(blocksize != PAGE_SIZE);
3618 WARN_ON_ONCE(ext4_decrypt(page)); 3618 WARN_ON_ONCE(ext4_decrypt(page));
3619 } 3619 }
3620 } 3620 }
@@ -3638,7 +3638,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3638 3638
3639unlock: 3639unlock:
3640 unlock_page(page); 3640 unlock_page(page);
3641 page_cache_release(page); 3641 put_page(page);
3642 return err; 3642 return err;
3643} 3643}
3644 3644
@@ -3653,7 +3653,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
3653 struct address_space *mapping, loff_t from, loff_t length) 3653 struct address_space *mapping, loff_t from, loff_t length)
3654{ 3654{
3655 struct inode *inode = mapping->host; 3655 struct inode *inode = mapping->host;
3656 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3656 unsigned offset = from & (PAGE_SIZE-1);
3657 unsigned blocksize = inode->i_sb->s_blocksize; 3657 unsigned blocksize = inode->i_sb->s_blocksize;
3658 unsigned max = blocksize - (offset & (blocksize - 1)); 3658 unsigned max = blocksize - (offset & (blocksize - 1));
3659 3659
@@ -3678,7 +3678,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
3678static int ext4_block_truncate_page(handle_t *handle, 3678static int ext4_block_truncate_page(handle_t *handle,
3679 struct address_space *mapping, loff_t from) 3679 struct address_space *mapping, loff_t from)
3680{ 3680{
3681 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3681 unsigned offset = from & (PAGE_SIZE-1);
3682 unsigned length; 3682 unsigned length;
3683 unsigned blocksize; 3683 unsigned blocksize;
3684 struct inode *inode = mapping->host; 3684 struct inode *inode = mapping->host;
@@ -3816,7 +3816,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3816 */ 3816 */
3817 if (offset + length > inode->i_size) { 3817 if (offset + length > inode->i_size) {
3818 length = inode->i_size + 3818 length = inode->i_size +
3819 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 3819 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3820 offset; 3820 offset;
3821 } 3821 }
3822 3822
@@ -4891,23 +4891,23 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
4891 tid_t commit_tid = 0; 4891 tid_t commit_tid = 0;
4892 int ret; 4892 int ret;
4893 4893
4894 offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 4894 offset = inode->i_size & (PAGE_SIZE - 1);
4895 /* 4895 /*
4896 * All buffers in the last page remain valid? Then there's nothing to 4896 * All buffers in the last page remain valid? Then there's nothing to
4897 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 4897 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
4898 * blocksize case 4898 * blocksize case
4899 */ 4899 */
4900 if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 4900 if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
4901 return; 4901 return;
4902 while (1) { 4902 while (1) {
4903 page = find_lock_page(inode->i_mapping, 4903 page = find_lock_page(inode->i_mapping,
4904 inode->i_size >> PAGE_CACHE_SHIFT); 4904 inode->i_size >> PAGE_SHIFT);
4905 if (!page) 4905 if (!page)
4906 return; 4906 return;
4907 ret = __ext4_journalled_invalidatepage(page, offset, 4907 ret = __ext4_journalled_invalidatepage(page, offset,
4908 PAGE_CACHE_SIZE - offset); 4908 PAGE_SIZE - offset);
4909 unlock_page(page); 4909 unlock_page(page);
4910 page_cache_release(page); 4910 put_page(page);
4911 if (ret != -EBUSY) 4911 if (ret != -EBUSY)
4912 return; 4912 return;
4913 commit_tid = 0; 4913 commit_tid = 0;
@@ -5546,10 +5546,10 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5546 goto out; 5546 goto out;
5547 } 5547 }
5548 5548
5549 if (page->index == size >> PAGE_CACHE_SHIFT) 5549 if (page->index == size >> PAGE_SHIFT)
5550 len = size & ~PAGE_CACHE_MASK; 5550 len = size & ~PAGE_MASK;
5551 else 5551 else
5552 len = PAGE_CACHE_SIZE; 5552 len = PAGE_SIZE;
5553 /* 5553 /*
5554 * Return if we have all the buffers mapped. This avoids the need to do 5554 * Return if we have all the buffers mapped. This avoids the need to do
5555 * journal_start/journal_stop which can block and take a long time 5555 * journal_start/journal_stop which can block and take a long time
@@ -5580,7 +5580,7 @@ retry_alloc:
5580 ret = block_page_mkwrite(vma, vmf, get_block); 5580 ret = block_page_mkwrite(vma, vmf, get_block);
5581 if (!ret && ext4_should_journal_data(inode)) { 5581 if (!ret && ext4_should_journal_data(inode)) {
5582 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 5582 if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5583 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 5583 PAGE_SIZE, NULL, do_journal_get_write_access)) {
5584 unlock_page(page); 5584 unlock_page(page);
5585 ret = VM_FAULT_SIGBUS; 5585 ret = VM_FAULT_SIGBUS;
5586 ext4_journal_stop(handle); 5586 ext4_journal_stop(handle);