diff options
Diffstat (limited to 'fs/mpage.c')
-rw-r--r-- | fs/mpage.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/fs/mpage.c b/fs/mpage.c index 6bd9fd90964e..eedc644b78d7 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -107,7 +107,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | |||
107 | * don't make any buffers if there is only one buffer on | 107 | * don't make any buffers if there is only one buffer on |
108 | * the page and the page just needs to be set up to date | 108 | * the page and the page just needs to be set up to date |
109 | */ | 109 | */ |
110 | if (inode->i_blkbits == PAGE_CACHE_SHIFT && | 110 | if (inode->i_blkbits == PAGE_SHIFT && |
111 | buffer_uptodate(bh)) { | 111 | buffer_uptodate(bh)) { |
112 | SetPageUptodate(page); | 112 | SetPageUptodate(page); |
113 | return; | 113 | return; |
@@ -145,7 +145,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
145 | { | 145 | { |
146 | struct inode *inode = page->mapping->host; | 146 | struct inode *inode = page->mapping->host; |
147 | const unsigned blkbits = inode->i_blkbits; | 147 | const unsigned blkbits = inode->i_blkbits; |
148 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | 148 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
149 | const unsigned blocksize = 1 << blkbits; | 149 | const unsigned blocksize = 1 << blkbits; |
150 | sector_t block_in_file; | 150 | sector_t block_in_file; |
151 | sector_t last_block; | 151 | sector_t last_block; |
@@ -162,7 +162,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
162 | if (page_has_buffers(page)) | 162 | if (page_has_buffers(page)) |
163 | goto confused; | 163 | goto confused; |
164 | 164 | ||
165 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 165 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); |
166 | last_block = block_in_file + nr_pages * blocks_per_page; | 166 | last_block = block_in_file + nr_pages * blocks_per_page; |
167 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | 167 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; |
168 | if (last_block > last_block_in_file) | 168 | if (last_block > last_block_in_file) |
@@ -249,7 +249,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
249 | } | 249 | } |
250 | 250 | ||
251 | if (first_hole != blocks_per_page) { | 251 | if (first_hole != blocks_per_page) { |
252 | zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); | 252 | zero_user_segment(page, first_hole << blkbits, PAGE_SIZE); |
253 | if (first_hole == 0) { | 253 | if (first_hole == 0) { |
254 | SetPageUptodate(page); | 254 | SetPageUptodate(page); |
255 | unlock_page(page); | 255 | unlock_page(page); |
@@ -331,7 +331,7 @@ confused: | |||
331 | * | 331 | * |
332 | * then this code just gives up and calls the buffer_head-based read function. | 332 | * then this code just gives up and calls the buffer_head-based read function. |
333 | * It does handle a page which has holes at the end - that is a common case: | 333 | * It does handle a page which has holes at the end - that is a common case: |
334 | * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. | 334 | * the end-of-file on blocksize < PAGE_SIZE setups. |
335 | * | 335 | * |
336 | * BH_Boundary explanation: | 336 | * BH_Boundary explanation: |
337 | * | 337 | * |
@@ -380,7 +380,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
380 | &first_logical_block, | 380 | &first_logical_block, |
381 | get_block, gfp); | 381 | get_block, gfp); |
382 | } | 382 | } |
383 | page_cache_release(page); | 383 | put_page(page); |
384 | } | 384 | } |
385 | BUG_ON(!list_empty(pages)); | 385 | BUG_ON(!list_empty(pages)); |
386 | if (bio) | 386 | if (bio) |
@@ -472,7 +472,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | |||
472 | struct inode *inode = page->mapping->host; | 472 | struct inode *inode = page->mapping->host; |
473 | const unsigned blkbits = inode->i_blkbits; | 473 | const unsigned blkbits = inode->i_blkbits; |
474 | unsigned long end_index; | 474 | unsigned long end_index; |
475 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | 475 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
476 | sector_t last_block; | 476 | sector_t last_block; |
477 | sector_t block_in_file; | 477 | sector_t block_in_file; |
478 | sector_t blocks[MAX_BUF_PER_PAGE]; | 478 | sector_t blocks[MAX_BUF_PER_PAGE]; |
@@ -542,7 +542,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | |||
542 | * The page has no buffers: map it to disk | 542 | * The page has no buffers: map it to disk |
543 | */ | 543 | */ |
544 | BUG_ON(!PageUptodate(page)); | 544 | BUG_ON(!PageUptodate(page)); |
545 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 545 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); |
546 | last_block = (i_size - 1) >> blkbits; | 546 | last_block = (i_size - 1) >> blkbits; |
547 | map_bh.b_page = page; | 547 | map_bh.b_page = page; |
548 | for (page_block = 0; page_block < blocks_per_page; ) { | 548 | for (page_block = 0; page_block < blocks_per_page; ) { |
@@ -574,7 +574,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | |||
574 | first_unmapped = page_block; | 574 | first_unmapped = page_block; |
575 | 575 | ||
576 | page_is_mapped: | 576 | page_is_mapped: |
577 | end_index = i_size >> PAGE_CACHE_SHIFT; | 577 | end_index = i_size >> PAGE_SHIFT; |
578 | if (page->index >= end_index) { | 578 | if (page->index >= end_index) { |
579 | /* | 579 | /* |
580 | * The page straddles i_size. It must be zeroed out on each | 580 | * The page straddles i_size. It must be zeroed out on each |
@@ -584,11 +584,11 @@ page_is_mapped: | |||
584 | * is zeroed when mapped, and writes to that region are not | 584 | * is zeroed when mapped, and writes to that region are not |
585 | * written out to the file." | 585 | * written out to the file." |
586 | */ | 586 | */ |
587 | unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); | 587 | unsigned offset = i_size & (PAGE_SIZE - 1); |
588 | 588 | ||
589 | if (page->index > end_index || !offset) | 589 | if (page->index > end_index || !offset) |
590 | goto confused; | 590 | goto confused; |
591 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 591 | zero_user_segment(page, offset, PAGE_SIZE); |
592 | } | 592 | } |
593 | 593 | ||
594 | /* | 594 | /* |