diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 100 |
1 files changed, 50 insertions, 50 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 33be29675358..af0d9a82a8ed 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -129,7 +129,7 @@ __clear_page_buffers(struct page *page) | |||
129 | { | 129 | { |
130 | ClearPagePrivate(page); | 130 | ClearPagePrivate(page); |
131 | set_page_private(page, 0); | 131 | set_page_private(page, 0); |
132 | page_cache_release(page); | 132 | put_page(page); |
133 | } | 133 | } |
134 | 134 | ||
135 | static void buffer_io_error(struct buffer_head *bh, char *msg) | 135 | static void buffer_io_error(struct buffer_head *bh, char *msg) |
@@ -207,7 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) | |||
207 | struct page *page; | 207 | struct page *page; |
208 | int all_mapped = 1; | 208 | int all_mapped = 1; |
209 | 209 | ||
210 | index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); | 210 | index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); |
211 | page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); | 211 | page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); |
212 | if (!page) | 212 | if (!page) |
213 | goto out; | 213 | goto out; |
@@ -245,7 +245,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) | |||
245 | } | 245 | } |
246 | out_unlock: | 246 | out_unlock: |
247 | spin_unlock(&bd_mapping->private_lock); | 247 | spin_unlock(&bd_mapping->private_lock); |
248 | page_cache_release(page); | 248 | put_page(page); |
249 | out: | 249 | out: |
250 | return ret; | 250 | return ret; |
251 | } | 251 | } |
@@ -1040,7 +1040,7 @@ done: | |||
1040 | ret = (block < end_block) ? 1 : -ENXIO; | 1040 | ret = (block < end_block) ? 1 : -ENXIO; |
1041 | failed: | 1041 | failed: |
1042 | unlock_page(page); | 1042 | unlock_page(page); |
1043 | page_cache_release(page); | 1043 | put_page(page); |
1044 | return ret; | 1044 | return ret; |
1045 | } | 1045 | } |
1046 | 1046 | ||
@@ -1533,7 +1533,7 @@ void block_invalidatepage(struct page *page, unsigned int offset, | |||
1533 | /* | 1533 | /* |
1534 | * Check for overflow | 1534 | * Check for overflow |
1535 | */ | 1535 | */ |
1536 | BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); | 1536 | BUG_ON(stop > PAGE_SIZE || stop < length); |
1537 | 1537 | ||
1538 | head = page_buffers(page); | 1538 | head = page_buffers(page); |
1539 | bh = head; | 1539 | bh = head; |
@@ -1716,7 +1716,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1716 | blocksize = bh->b_size; | 1716 | blocksize = bh->b_size; |
1717 | bbits = block_size_bits(blocksize); | 1717 | bbits = block_size_bits(blocksize); |
1718 | 1718 | ||
1719 | block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); | 1719 | block = (sector_t)page->index << (PAGE_SHIFT - bbits); |
1720 | last_block = (i_size_read(inode) - 1) >> bbits; | 1720 | last_block = (i_size_read(inode) - 1) >> bbits; |
1721 | 1721 | ||
1722 | /* | 1722 | /* |
@@ -1894,7 +1894,7 @@ EXPORT_SYMBOL(page_zero_new_buffers); | |||
1894 | int __block_write_begin(struct page *page, loff_t pos, unsigned len, | 1894 | int __block_write_begin(struct page *page, loff_t pos, unsigned len, |
1895 | get_block_t *get_block) | 1895 | get_block_t *get_block) |
1896 | { | 1896 | { |
1897 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); | 1897 | unsigned from = pos & (PAGE_SIZE - 1); |
1898 | unsigned to = from + len; | 1898 | unsigned to = from + len; |
1899 | struct inode *inode = page->mapping->host; | 1899 | struct inode *inode = page->mapping->host; |
1900 | unsigned block_start, block_end; | 1900 | unsigned block_start, block_end; |
@@ -1904,15 +1904,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, | |||
1904 | struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; | 1904 | struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; |
1905 | 1905 | ||
1906 | BUG_ON(!PageLocked(page)); | 1906 | BUG_ON(!PageLocked(page)); |
1907 | BUG_ON(from > PAGE_CACHE_SIZE); | 1907 | BUG_ON(from > PAGE_SIZE); |
1908 | BUG_ON(to > PAGE_CACHE_SIZE); | 1908 | BUG_ON(to > PAGE_SIZE); |
1909 | BUG_ON(from > to); | 1909 | BUG_ON(from > to); |
1910 | 1910 | ||
1911 | head = create_page_buffers(page, inode, 0); | 1911 | head = create_page_buffers(page, inode, 0); |
1912 | blocksize = head->b_size; | 1912 | blocksize = head->b_size; |
1913 | bbits = block_size_bits(blocksize); | 1913 | bbits = block_size_bits(blocksize); |
1914 | 1914 | ||
1915 | block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); | 1915 | block = (sector_t)page->index << (PAGE_SHIFT - bbits); |
1916 | 1916 | ||
1917 | for(bh = head, block_start = 0; bh != head || !block_start; | 1917 | for(bh = head, block_start = 0; bh != head || !block_start; |
1918 | block++, block_start=block_end, bh = bh->b_this_page) { | 1918 | block++, block_start=block_end, bh = bh->b_this_page) { |
@@ -2020,7 +2020,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, | |||
2020 | int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, | 2020 | int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, |
2021 | unsigned flags, struct page **pagep, get_block_t *get_block) | 2021 | unsigned flags, struct page **pagep, get_block_t *get_block) |
2022 | { | 2022 | { |
2023 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 2023 | pgoff_t index = pos >> PAGE_SHIFT; |
2024 | struct page *page; | 2024 | struct page *page; |
2025 | int status; | 2025 | int status; |
2026 | 2026 | ||
@@ -2031,7 +2031,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, | |||
2031 | status = __block_write_begin(page, pos, len, get_block); | 2031 | status = __block_write_begin(page, pos, len, get_block); |
2032 | if (unlikely(status)) { | 2032 | if (unlikely(status)) { |
2033 | unlock_page(page); | 2033 | unlock_page(page); |
2034 | page_cache_release(page); | 2034 | put_page(page); |
2035 | page = NULL; | 2035 | page = NULL; |
2036 | } | 2036 | } |
2037 | 2037 | ||
@@ -2047,7 +2047,7 @@ int block_write_end(struct file *file, struct address_space *mapping, | |||
2047 | struct inode *inode = mapping->host; | 2047 | struct inode *inode = mapping->host; |
2048 | unsigned start; | 2048 | unsigned start; |
2049 | 2049 | ||
2050 | start = pos & (PAGE_CACHE_SIZE - 1); | 2050 | start = pos & (PAGE_SIZE - 1); |
2051 | 2051 | ||
2052 | if (unlikely(copied < len)) { | 2052 | if (unlikely(copied < len)) { |
2053 | /* | 2053 | /* |
@@ -2099,7 +2099,7 @@ int generic_write_end(struct file *file, struct address_space *mapping, | |||
2099 | } | 2099 | } |
2100 | 2100 | ||
2101 | unlock_page(page); | 2101 | unlock_page(page); |
2102 | page_cache_release(page); | 2102 | put_page(page); |
2103 | 2103 | ||
2104 | if (old_size < pos) | 2104 | if (old_size < pos) |
2105 | pagecache_isize_extended(inode, old_size, pos); | 2105 | pagecache_isize_extended(inode, old_size, pos); |
@@ -2136,9 +2136,9 @@ int block_is_partially_uptodate(struct page *page, unsigned long from, | |||
2136 | 2136 | ||
2137 | head = page_buffers(page); | 2137 | head = page_buffers(page); |
2138 | blocksize = head->b_size; | 2138 | blocksize = head->b_size; |
2139 | to = min_t(unsigned, PAGE_CACHE_SIZE - from, count); | 2139 | to = min_t(unsigned, PAGE_SIZE - from, count); |
2140 | to = from + to; | 2140 | to = from + to; |
2141 | if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) | 2141 | if (from < blocksize && to > PAGE_SIZE - blocksize) |
2142 | return 0; | 2142 | return 0; |
2143 | 2143 | ||
2144 | bh = head; | 2144 | bh = head; |
@@ -2181,7 +2181,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) | |||
2181 | blocksize = head->b_size; | 2181 | blocksize = head->b_size; |
2182 | bbits = block_size_bits(blocksize); | 2182 | bbits = block_size_bits(blocksize); |
2183 | 2183 | ||
2184 | iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); | 2184 | iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); |
2185 | lblock = (i_size_read(inode)+blocksize-1) >> bbits; | 2185 | lblock = (i_size_read(inode)+blocksize-1) >> bbits; |
2186 | bh = head; | 2186 | bh = head; |
2187 | nr = 0; | 2187 | nr = 0; |
@@ -2295,16 +2295,16 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, | |||
2295 | unsigned zerofrom, offset, len; | 2295 | unsigned zerofrom, offset, len; |
2296 | int err = 0; | 2296 | int err = 0; |
2297 | 2297 | ||
2298 | index = pos >> PAGE_CACHE_SHIFT; | 2298 | index = pos >> PAGE_SHIFT; |
2299 | offset = pos & ~PAGE_CACHE_MASK; | 2299 | offset = pos & ~PAGE_MASK; |
2300 | 2300 | ||
2301 | while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { | 2301 | while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { |
2302 | zerofrom = curpos & ~PAGE_CACHE_MASK; | 2302 | zerofrom = curpos & ~PAGE_MASK; |
2303 | if (zerofrom & (blocksize-1)) { | 2303 | if (zerofrom & (blocksize-1)) { |
2304 | *bytes |= (blocksize-1); | 2304 | *bytes |= (blocksize-1); |
2305 | (*bytes)++; | 2305 | (*bytes)++; |
2306 | } | 2306 | } |
2307 | len = PAGE_CACHE_SIZE - zerofrom; | 2307 | len = PAGE_SIZE - zerofrom; |
2308 | 2308 | ||
2309 | err = pagecache_write_begin(file, mapping, curpos, len, | 2309 | err = pagecache_write_begin(file, mapping, curpos, len, |
2310 | AOP_FLAG_UNINTERRUPTIBLE, | 2310 | AOP_FLAG_UNINTERRUPTIBLE, |
@@ -2329,7 +2329,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, | |||
2329 | 2329 | ||
2330 | /* page covers the boundary, find the boundary offset */ | 2330 | /* page covers the boundary, find the boundary offset */ |
2331 | if (index == curidx) { | 2331 | if (index == curidx) { |
2332 | zerofrom = curpos & ~PAGE_CACHE_MASK; | 2332 | zerofrom = curpos & ~PAGE_MASK; |
2333 | /* if we will expand the thing last block will be filled */ | 2333 | /* if we will expand the thing last block will be filled */ |
2334 | if (offset <= zerofrom) { | 2334 | if (offset <= zerofrom) { |
2335 | goto out; | 2335 | goto out; |
@@ -2375,7 +2375,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping, | |||
2375 | if (err) | 2375 | if (err) |
2376 | return err; | 2376 | return err; |
2377 | 2377 | ||
2378 | zerofrom = *bytes & ~PAGE_CACHE_MASK; | 2378 | zerofrom = *bytes & ~PAGE_MASK; |
2379 | if (pos+len > *bytes && zerofrom & (blocksize-1)) { | 2379 | if (pos+len > *bytes && zerofrom & (blocksize-1)) { |
2380 | *bytes |= (blocksize-1); | 2380 | *bytes |= (blocksize-1); |
2381 | (*bytes)++; | 2381 | (*bytes)++; |
@@ -2430,10 +2430,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2430 | } | 2430 | } |
2431 | 2431 | ||
2432 | /* page is wholly or partially inside EOF */ | 2432 | /* page is wholly or partially inside EOF */ |
2433 | if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) | 2433 | if (((page->index + 1) << PAGE_SHIFT) > size) |
2434 | end = size & ~PAGE_CACHE_MASK; | 2434 | end = size & ~PAGE_MASK; |
2435 | else | 2435 | else |
2436 | end = PAGE_CACHE_SIZE; | 2436 | end = PAGE_SIZE; |
2437 | 2437 | ||
2438 | ret = __block_write_begin(page, 0, end, get_block); | 2438 | ret = __block_write_begin(page, 0, end, get_block); |
2439 | if (!ret) | 2439 | if (!ret) |
@@ -2508,8 +2508,8 @@ int nobh_write_begin(struct address_space *mapping, | |||
2508 | int ret = 0; | 2508 | int ret = 0; |
2509 | int is_mapped_to_disk = 1; | 2509 | int is_mapped_to_disk = 1; |
2510 | 2510 | ||
2511 | index = pos >> PAGE_CACHE_SHIFT; | 2511 | index = pos >> PAGE_SHIFT; |
2512 | from = pos & (PAGE_CACHE_SIZE - 1); | 2512 | from = pos & (PAGE_SIZE - 1); |
2513 | to = from + len; | 2513 | to = from + len; |
2514 | 2514 | ||
2515 | page = grab_cache_page_write_begin(mapping, index, flags); | 2515 | page = grab_cache_page_write_begin(mapping, index, flags); |
@@ -2543,7 +2543,7 @@ int nobh_write_begin(struct address_space *mapping, | |||
2543 | goto out_release; | 2543 | goto out_release; |
2544 | } | 2544 | } |
2545 | 2545 | ||
2546 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 2546 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); |
2547 | 2547 | ||
2548 | /* | 2548 | /* |
2549 | * We loop across all blocks in the page, whether or not they are | 2549 | * We loop across all blocks in the page, whether or not they are |
@@ -2551,7 +2551,7 @@ int nobh_write_begin(struct address_space *mapping, | |||
2551 | * page is fully mapped-to-disk. | 2551 | * page is fully mapped-to-disk. |
2552 | */ | 2552 | */ |
2553 | for (block_start = 0, block_in_page = 0, bh = head; | 2553 | for (block_start = 0, block_in_page = 0, bh = head; |
2554 | block_start < PAGE_CACHE_SIZE; | 2554 | block_start < PAGE_SIZE; |
2555 | block_in_page++, block_start += blocksize, bh = bh->b_this_page) { | 2555 | block_in_page++, block_start += blocksize, bh = bh->b_this_page) { |
2556 | int create; | 2556 | int create; |
2557 | 2557 | ||
@@ -2623,7 +2623,7 @@ failed: | |||
2623 | 2623 | ||
2624 | out_release: | 2624 | out_release: |
2625 | unlock_page(page); | 2625 | unlock_page(page); |
2626 | page_cache_release(page); | 2626 | put_page(page); |
2627 | *pagep = NULL; | 2627 | *pagep = NULL; |
2628 | 2628 | ||
2629 | return ret; | 2629 | return ret; |
@@ -2653,7 +2653,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping, | |||
2653 | } | 2653 | } |
2654 | 2654 | ||
2655 | unlock_page(page); | 2655 | unlock_page(page); |
2656 | page_cache_release(page); | 2656 | put_page(page); |
2657 | 2657 | ||
2658 | while (head) { | 2658 | while (head) { |
2659 | bh = head; | 2659 | bh = head; |
@@ -2675,7 +2675,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, | |||
2675 | { | 2675 | { |
2676 | struct inode * const inode = page->mapping->host; | 2676 | struct inode * const inode = page->mapping->host; |
2677 | loff_t i_size = i_size_read(inode); | 2677 | loff_t i_size = i_size_read(inode); |
2678 | const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | 2678 | const pgoff_t end_index = i_size >> PAGE_SHIFT; |
2679 | unsigned offset; | 2679 | unsigned offset; |
2680 | int ret; | 2680 | int ret; |
2681 | 2681 | ||
@@ -2684,7 +2684,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, | |||
2684 | goto out; | 2684 | goto out; |
2685 | 2685 | ||
2686 | /* Is the page fully outside i_size? (truncate in progress) */ | 2686 | /* Is the page fully outside i_size? (truncate in progress) */ |
2687 | offset = i_size & (PAGE_CACHE_SIZE-1); | 2687 | offset = i_size & (PAGE_SIZE-1); |
2688 | if (page->index >= end_index+1 || !offset) { | 2688 | if (page->index >= end_index+1 || !offset) { |
2689 | /* | 2689 | /* |
2690 | * The page may have dirty, unmapped buffers. For example, | 2690 | * The page may have dirty, unmapped buffers. For example, |
@@ -2707,7 +2707,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, | |||
2707 | * the page size, the remaining memory is zeroed when mapped, and | 2707 | * the page size, the remaining memory is zeroed when mapped, and |
2708 | * writes to that region are not written out to the file." | 2708 | * writes to that region are not written out to the file." |
2709 | */ | 2709 | */ |
2710 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 2710 | zero_user_segment(page, offset, PAGE_SIZE); |
2711 | out: | 2711 | out: |
2712 | ret = mpage_writepage(page, get_block, wbc); | 2712 | ret = mpage_writepage(page, get_block, wbc); |
2713 | if (ret == -EAGAIN) | 2713 | if (ret == -EAGAIN) |
@@ -2720,8 +2720,8 @@ EXPORT_SYMBOL(nobh_writepage); | |||
2720 | int nobh_truncate_page(struct address_space *mapping, | 2720 | int nobh_truncate_page(struct address_space *mapping, |
2721 | loff_t from, get_block_t *get_block) | 2721 | loff_t from, get_block_t *get_block) |
2722 | { | 2722 | { |
2723 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | 2723 | pgoff_t index = from >> PAGE_SHIFT; |
2724 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | 2724 | unsigned offset = from & (PAGE_SIZE-1); |
2725 | unsigned blocksize; | 2725 | unsigned blocksize; |
2726 | sector_t iblock; | 2726 | sector_t iblock; |
2727 | unsigned length, pos; | 2727 | unsigned length, pos; |
@@ -2738,7 +2738,7 @@ int nobh_truncate_page(struct address_space *mapping, | |||
2738 | return 0; | 2738 | return 0; |
2739 | 2739 | ||
2740 | length = blocksize - length; | 2740 | length = blocksize - length; |
2741 | iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); | 2741 | iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); |
2742 | 2742 | ||
2743 | page = grab_cache_page(mapping, index); | 2743 | page = grab_cache_page(mapping, index); |
2744 | err = -ENOMEM; | 2744 | err = -ENOMEM; |
@@ -2748,7 +2748,7 @@ int nobh_truncate_page(struct address_space *mapping, | |||
2748 | if (page_has_buffers(page)) { | 2748 | if (page_has_buffers(page)) { |
2749 | has_buffers: | 2749 | has_buffers: |
2750 | unlock_page(page); | 2750 | unlock_page(page); |
2751 | page_cache_release(page); | 2751 | put_page(page); |
2752 | return block_truncate_page(mapping, from, get_block); | 2752 | return block_truncate_page(mapping, from, get_block); |
2753 | } | 2753 | } |
2754 | 2754 | ||
@@ -2772,7 +2772,7 @@ has_buffers: | |||
2772 | if (!PageUptodate(page)) { | 2772 | if (!PageUptodate(page)) { |
2773 | err = mapping->a_ops->readpage(NULL, page); | 2773 | err = mapping->a_ops->readpage(NULL, page); |
2774 | if (err) { | 2774 | if (err) { |
2775 | page_cache_release(page); | 2775 | put_page(page); |
2776 | goto out; | 2776 | goto out; |
2777 | } | 2777 | } |
2778 | lock_page(page); | 2778 | lock_page(page); |
@@ -2789,7 +2789,7 @@ has_buffers: | |||
2789 | 2789 | ||
2790 | unlock: | 2790 | unlock: |
2791 | unlock_page(page); | 2791 | unlock_page(page); |
2792 | page_cache_release(page); | 2792 | put_page(page); |
2793 | out: | 2793 | out: |
2794 | return err; | 2794 | return err; |
2795 | } | 2795 | } |
@@ -2798,8 +2798,8 @@ EXPORT_SYMBOL(nobh_truncate_page); | |||
2798 | int block_truncate_page(struct address_space *mapping, | 2798 | int block_truncate_page(struct address_space *mapping, |
2799 | loff_t from, get_block_t *get_block) | 2799 | loff_t from, get_block_t *get_block) |
2800 | { | 2800 | { |
2801 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | 2801 | pgoff_t index = from >> PAGE_SHIFT; |
2802 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | 2802 | unsigned offset = from & (PAGE_SIZE-1); |
2803 | unsigned blocksize; | 2803 | unsigned blocksize; |
2804 | sector_t iblock; | 2804 | sector_t iblock; |
2805 | unsigned length, pos; | 2805 | unsigned length, pos; |
@@ -2816,7 +2816,7 @@ int block_truncate_page(struct address_space *mapping, | |||
2816 | return 0; | 2816 | return 0; |
2817 | 2817 | ||
2818 | length = blocksize - length; | 2818 | length = blocksize - length; |
2819 | iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); | 2819 | iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); |
2820 | 2820 | ||
2821 | page = grab_cache_page(mapping, index); | 2821 | page = grab_cache_page(mapping, index); |
2822 | err = -ENOMEM; | 2822 | err = -ENOMEM; |
@@ -2865,7 +2865,7 @@ int block_truncate_page(struct address_space *mapping, | |||
2865 | 2865 | ||
2866 | unlock: | 2866 | unlock: |
2867 | unlock_page(page); | 2867 | unlock_page(page); |
2868 | page_cache_release(page); | 2868 | put_page(page); |
2869 | out: | 2869 | out: |
2870 | return err; | 2870 | return err; |
2871 | } | 2871 | } |
@@ -2879,7 +2879,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2879 | { | 2879 | { |
2880 | struct inode * const inode = page->mapping->host; | 2880 | struct inode * const inode = page->mapping->host; |
2881 | loff_t i_size = i_size_read(inode); | 2881 | loff_t i_size = i_size_read(inode); |
2882 | const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | 2882 | const pgoff_t end_index = i_size >> PAGE_SHIFT; |
2883 | unsigned offset; | 2883 | unsigned offset; |
2884 | 2884 | ||
2885 | /* Is the page fully inside i_size? */ | 2885 | /* Is the page fully inside i_size? */ |
@@ -2888,14 +2888,14 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2888 | end_buffer_async_write); | 2888 | end_buffer_async_write); |
2889 | 2889 | ||
2890 | /* Is the page fully outside i_size? (truncate in progress) */ | 2890 | /* Is the page fully outside i_size? (truncate in progress) */ |
2891 | offset = i_size & (PAGE_CACHE_SIZE-1); | 2891 | offset = i_size & (PAGE_SIZE-1); |
2892 | if (page->index >= end_index+1 || !offset) { | 2892 | if (page->index >= end_index+1 || !offset) { |
2893 | /* | 2893 | /* |
2894 | * The page may have dirty, unmapped buffers. For example, | 2894 | * The page may have dirty, unmapped buffers. For example, |
2895 | * they may have been added in ext3_writepage(). Make them | 2895 | * they may have been added in ext3_writepage(). Make them |
2896 | * freeable here, so the page does not leak. | 2896 | * freeable here, so the page does not leak. |
2897 | */ | 2897 | */ |
2898 | do_invalidatepage(page, 0, PAGE_CACHE_SIZE); | 2898 | do_invalidatepage(page, 0, PAGE_SIZE); |
2899 | unlock_page(page); | 2899 | unlock_page(page); |
2900 | return 0; /* don't care */ | 2900 | return 0; /* don't care */ |
2901 | } | 2901 | } |
@@ -2907,7 +2907,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2907 | * the page size, the remaining memory is zeroed when mapped, and | 2907 | * the page size, the remaining memory is zeroed when mapped, and |
2908 | * writes to that region are not written out to the file." | 2908 | * writes to that region are not written out to the file." |
2909 | */ | 2909 | */ |
2910 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 2910 | zero_user_segment(page, offset, PAGE_SIZE); |
2911 | return __block_write_full_page(inode, page, get_block, wbc, | 2911 | return __block_write_full_page(inode, page, get_block, wbc, |
2912 | end_buffer_async_write); | 2912 | end_buffer_async_write); |
2913 | } | 2913 | } |