summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c104
1 files changed, 52 insertions, 52 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 41a5688ffdfe..2aaba58b4856 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -194,7 +194,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
194 while (compressed_size > 0) { 194 while (compressed_size > 0) {
195 cpage = compressed_pages[i]; 195 cpage = compressed_pages[i];
196 cur_size = min_t(unsigned long, compressed_size, 196 cur_size = min_t(unsigned long, compressed_size,
197 PAGE_CACHE_SIZE); 197 PAGE_SIZE);
198 198
199 kaddr = kmap_atomic(cpage); 199 kaddr = kmap_atomic(cpage);
200 write_extent_buffer(leaf, kaddr, ptr, cur_size); 200 write_extent_buffer(leaf, kaddr, ptr, cur_size);
@@ -208,13 +208,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
208 compress_type); 208 compress_type);
209 } else { 209 } else {
210 page = find_get_page(inode->i_mapping, 210 page = find_get_page(inode->i_mapping,
211 start >> PAGE_CACHE_SHIFT); 211 start >> PAGE_SHIFT);
212 btrfs_set_file_extent_compression(leaf, ei, 0); 212 btrfs_set_file_extent_compression(leaf, ei, 0);
213 kaddr = kmap_atomic(page); 213 kaddr = kmap_atomic(page);
214 offset = start & (PAGE_CACHE_SIZE - 1); 214 offset = start & (PAGE_SIZE - 1);
215 write_extent_buffer(leaf, kaddr + offset, ptr, size); 215 write_extent_buffer(leaf, kaddr + offset, ptr, size);
216 kunmap_atomic(kaddr); 216 kunmap_atomic(kaddr);
217 page_cache_release(page); 217 put_page(page);
218 } 218 }
219 btrfs_mark_buffer_dirty(leaf); 219 btrfs_mark_buffer_dirty(leaf);
220 btrfs_release_path(path); 220 btrfs_release_path(path);
@@ -322,7 +322,7 @@ out:
322 * And at reserve time, it's always aligned to page size, so 322 * And at reserve time, it's always aligned to page size, so
323 * just free one page here. 323 * just free one page here.
324 */ 324 */
325 btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); 325 btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
326 btrfs_free_path(path); 326 btrfs_free_path(path);
327 btrfs_end_transaction(trans, root); 327 btrfs_end_transaction(trans, root);
328 return ret; 328 return ret;
@@ -435,8 +435,8 @@ static noinline void compress_file_range(struct inode *inode,
435 actual_end = min_t(u64, isize, end + 1); 435 actual_end = min_t(u64, isize, end + 1);
436again: 436again:
437 will_compress = 0; 437 will_compress = 0;
438 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 438 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
439 nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE); 439 nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE);
440 440
441 /* 441 /*
442 * we don't want to send crud past the end of i_size through 442 * we don't want to send crud past the end of i_size through
@@ -514,7 +514,7 @@ again:
514 514
515 if (!ret) { 515 if (!ret) {
516 unsigned long offset = total_compressed & 516 unsigned long offset = total_compressed &
517 (PAGE_CACHE_SIZE - 1); 517 (PAGE_SIZE - 1);
518 struct page *page = pages[nr_pages_ret - 1]; 518 struct page *page = pages[nr_pages_ret - 1];
519 char *kaddr; 519 char *kaddr;
520 520
@@ -524,7 +524,7 @@ again:
524 if (offset) { 524 if (offset) {
525 kaddr = kmap_atomic(page); 525 kaddr = kmap_atomic(page);
526 memset(kaddr + offset, 0, 526 memset(kaddr + offset, 0,
527 PAGE_CACHE_SIZE - offset); 527 PAGE_SIZE - offset);
528 kunmap_atomic(kaddr); 528 kunmap_atomic(kaddr);
529 } 529 }
530 will_compress = 1; 530 will_compress = 1;
@@ -580,7 +580,7 @@ cont:
580 * one last check to make sure the compression is really a 580 * one last check to make sure the compression is really a
581 * win, compare the page count read with the blocks on disk 581 * win, compare the page count read with the blocks on disk
582 */ 582 */
583 total_in = ALIGN(total_in, PAGE_CACHE_SIZE); 583 total_in = ALIGN(total_in, PAGE_SIZE);
584 if (total_compressed >= total_in) { 584 if (total_compressed >= total_in) {
585 will_compress = 0; 585 will_compress = 0;
586 } else { 586 } else {
@@ -594,7 +594,7 @@ cont:
594 */ 594 */
595 for (i = 0; i < nr_pages_ret; i++) { 595 for (i = 0; i < nr_pages_ret; i++) {
596 WARN_ON(pages[i]->mapping); 596 WARN_ON(pages[i]->mapping);
597 page_cache_release(pages[i]); 597 put_page(pages[i]);
598 } 598 }
599 kfree(pages); 599 kfree(pages);
600 pages = NULL; 600 pages = NULL;
@@ -650,7 +650,7 @@ cleanup_and_bail_uncompressed:
650free_pages_out: 650free_pages_out:
651 for (i = 0; i < nr_pages_ret; i++) { 651 for (i = 0; i < nr_pages_ret; i++) {
652 WARN_ON(pages[i]->mapping); 652 WARN_ON(pages[i]->mapping);
653 page_cache_release(pages[i]); 653 put_page(pages[i]);
654 } 654 }
655 kfree(pages); 655 kfree(pages);
656} 656}
@@ -664,7 +664,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
664 664
665 for (i = 0; i < async_extent->nr_pages; i++) { 665 for (i = 0; i < async_extent->nr_pages; i++) {
666 WARN_ON(async_extent->pages[i]->mapping); 666 WARN_ON(async_extent->pages[i]->mapping);
667 page_cache_release(async_extent->pages[i]); 667 put_page(async_extent->pages[i]);
668 } 668 }
669 kfree(async_extent->pages); 669 kfree(async_extent->pages);
670 async_extent->nr_pages = 0; 670 async_extent->nr_pages = 0;
@@ -966,7 +966,7 @@ static noinline int cow_file_range(struct inode *inode,
966 PAGE_END_WRITEBACK); 966 PAGE_END_WRITEBACK);
967 967
968 *nr_written = *nr_written + 968 *nr_written = *nr_written +
969 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 969 (end - start + PAGE_SIZE) / PAGE_SIZE;
970 *page_started = 1; 970 *page_started = 1;
971 goto out; 971 goto out;
972 } else if (ret < 0) { 972 } else if (ret < 0) {
@@ -1106,8 +1106,8 @@ static noinline void async_cow_submit(struct btrfs_work *work)
1106 async_cow = container_of(work, struct async_cow, work); 1106 async_cow = container_of(work, struct async_cow, work);
1107 1107
1108 root = async_cow->root; 1108 root = async_cow->root;
1109 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1109 nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1110 PAGE_CACHE_SHIFT; 1110 PAGE_SHIFT;
1111 1111
1112 /* 1112 /*
1113 * atomic_sub_return implies a barrier for waitqueue_active 1113 * atomic_sub_return implies a barrier for waitqueue_active
@@ -1164,8 +1164,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1164 async_cow_start, async_cow_submit, 1164 async_cow_start, async_cow_submit,
1165 async_cow_free); 1165 async_cow_free);
1166 1166
1167 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1167 nr_pages = (cur_end - start + PAGE_SIZE) >>
1168 PAGE_CACHE_SHIFT; 1168 PAGE_SHIFT;
1169 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1169 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1170 1170
1171 btrfs_queue_work(root->fs_info->delalloc_workers, 1171 btrfs_queue_work(root->fs_info->delalloc_workers,
@@ -1960,7 +1960,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1960int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1960int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1961 struct extent_state **cached_state) 1961 struct extent_state **cached_state)
1962{ 1962{
1963 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); 1963 WARN_ON((end & (PAGE_SIZE - 1)) == 0);
1964 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1964 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1965 cached_state, GFP_NOFS); 1965 cached_state, GFP_NOFS);
1966} 1966}
@@ -1993,7 +1993,7 @@ again:
1993 1993
1994 inode = page->mapping->host; 1994 inode = page->mapping->host;
1995 page_start = page_offset(page); 1995 page_start = page_offset(page);
1996 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1996 page_end = page_offset(page) + PAGE_SIZE - 1;
1997 1997
1998 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 1998 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
1999 &cached_state); 1999 &cached_state);
@@ -2003,7 +2003,7 @@ again:
2003 goto out; 2003 goto out;
2004 2004
2005 ordered = btrfs_lookup_ordered_range(inode, page_start, 2005 ordered = btrfs_lookup_ordered_range(inode, page_start,
2006 PAGE_CACHE_SIZE); 2006 PAGE_SIZE);
2007 if (ordered) { 2007 if (ordered) {
2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 2008 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2009 page_end, &cached_state, GFP_NOFS); 2009 page_end, &cached_state, GFP_NOFS);
@@ -2014,7 +2014,7 @@ again:
2014 } 2014 }
2015 2015
2016 ret = btrfs_delalloc_reserve_space(inode, page_start, 2016 ret = btrfs_delalloc_reserve_space(inode, page_start,
2017 PAGE_CACHE_SIZE); 2017 PAGE_SIZE);
2018 if (ret) { 2018 if (ret) {
2019 mapping_set_error(page->mapping, ret); 2019 mapping_set_error(page->mapping, ret);
2020 end_extent_writepage(page, ret, page_start, page_end); 2020 end_extent_writepage(page, ret, page_start, page_end);
@@ -2030,7 +2030,7 @@ out:
2030 &cached_state, GFP_NOFS); 2030 &cached_state, GFP_NOFS);
2031out_page: 2031out_page:
2032 unlock_page(page); 2032 unlock_page(page);
2033 page_cache_release(page); 2033 put_page(page);
2034 kfree(fixup); 2034 kfree(fixup);
2035} 2035}
2036 2036
@@ -2063,7 +2063,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2063 return -EAGAIN; 2063 return -EAGAIN;
2064 2064
2065 SetPageChecked(page); 2065 SetPageChecked(page);
2066 page_cache_get(page); 2066 get_page(page);
2067 btrfs_init_work(&fixup->work, btrfs_fixup_helper, 2067 btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2068 btrfs_writepage_fixup_worker, NULL, NULL); 2068 btrfs_writepage_fixup_worker, NULL, NULL);
2069 fixup->page = page; 2069 fixup->page = page;
@@ -4247,7 +4247,7 @@ static int truncate_inline_extent(struct inode *inode,
4247 4247
4248 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { 4248 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4249 loff_t offset = new_size; 4249 loff_t offset = new_size;
4250 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); 4250 loff_t page_end = ALIGN(offset, PAGE_SIZE);
4251 4251
4252 /* 4252 /*
4253 * Zero out the remaining of the last page of our inline extent, 4253 * Zero out the remaining of the last page of our inline extent,
@@ -4633,7 +4633,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4633 struct extent_state *cached_state = NULL; 4633 struct extent_state *cached_state = NULL;
4634 char *kaddr; 4634 char *kaddr;
4635 u32 blocksize = root->sectorsize; 4635 u32 blocksize = root->sectorsize;
4636 pgoff_t index = from >> PAGE_CACHE_SHIFT; 4636 pgoff_t index = from >> PAGE_SHIFT;
4637 unsigned offset = from & (blocksize - 1); 4637 unsigned offset = from & (blocksize - 1);
4638 struct page *page; 4638 struct page *page;
4639 gfp_t mask = btrfs_alloc_write_mask(mapping); 4639 gfp_t mask = btrfs_alloc_write_mask(mapping);
@@ -4668,7 +4668,7 @@ again:
4668 lock_page(page); 4668 lock_page(page);
4669 if (page->mapping != mapping) { 4669 if (page->mapping != mapping) {
4670 unlock_page(page); 4670 unlock_page(page);
4671 page_cache_release(page); 4671 put_page(page);
4672 goto again; 4672 goto again;
4673 } 4673 }
4674 if (!PageUptodate(page)) { 4674 if (!PageUptodate(page)) {
@@ -4686,7 +4686,7 @@ again:
4686 unlock_extent_cached(io_tree, block_start, block_end, 4686 unlock_extent_cached(io_tree, block_start, block_end,
4687 &cached_state, GFP_NOFS); 4687 &cached_state, GFP_NOFS);
4688 unlock_page(page); 4688 unlock_page(page);
4689 page_cache_release(page); 4689 put_page(page);
4690 btrfs_start_ordered_extent(inode, ordered, 1); 4690 btrfs_start_ordered_extent(inode, ordered, 1);
4691 btrfs_put_ordered_extent(ordered); 4691 btrfs_put_ordered_extent(ordered);
4692 goto again; 4692 goto again;
@@ -4728,7 +4728,7 @@ out_unlock:
4728 btrfs_delalloc_release_space(inode, block_start, 4728 btrfs_delalloc_release_space(inode, block_start,
4729 blocksize); 4729 blocksize);
4730 unlock_page(page); 4730 unlock_page(page);
4731 page_cache_release(page); 4731 put_page(page);
4732out: 4732out:
4733 return ret; 4733 return ret;
4734} 4734}
@@ -6717,7 +6717,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
6717 6717
6718 read_extent_buffer(leaf, tmp, ptr, inline_size); 6718 read_extent_buffer(leaf, tmp, ptr, inline_size);
6719 6719
6720 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 6720 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6721 ret = btrfs_decompress(compress_type, tmp, page, 6721 ret = btrfs_decompress(compress_type, tmp, page,
6722 extent_offset, inline_size, max_size); 6722 extent_offset, inline_size, max_size);
6723 kfree(tmp); 6723 kfree(tmp);
@@ -6879,8 +6879,8 @@ next:
6879 6879
6880 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6880 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6881 extent_offset = page_offset(page) + pg_offset - extent_start; 6881 extent_offset = page_offset(page) + pg_offset - extent_start;
6882 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 6882 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6883 size - extent_offset); 6883 size - extent_offset);
6884 em->start = extent_start + extent_offset; 6884 em->start = extent_start + extent_offset;
6885 em->len = ALIGN(copy_size, root->sectorsize); 6885 em->len = ALIGN(copy_size, root->sectorsize);
6886 em->orig_block_len = em->len; 6886 em->orig_block_len = em->len;
@@ -6899,9 +6899,9 @@ next:
6899 map = kmap(page); 6899 map = kmap(page);
6900 read_extent_buffer(leaf, map + pg_offset, ptr, 6900 read_extent_buffer(leaf, map + pg_offset, ptr,
6901 copy_size); 6901 copy_size);
6902 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 6902 if (pg_offset + copy_size < PAGE_SIZE) {
6903 memset(map + pg_offset + copy_size, 0, 6903 memset(map + pg_offset + copy_size, 0,
6904 PAGE_CACHE_SIZE - pg_offset - 6904 PAGE_SIZE - pg_offset -
6905 copy_size); 6905 copy_size);
6906 } 6906 }
6907 kunmap(page); 6907 kunmap(page);
@@ -7336,12 +7336,12 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7336 int start_idx; 7336 int start_idx;
7337 int end_idx; 7337 int end_idx;
7338 7338
7339 start_idx = start >> PAGE_CACHE_SHIFT; 7339 start_idx = start >> PAGE_SHIFT;
7340 7340
7341 /* 7341 /*
7342 * end is the last byte in the last page. end == start is legal 7342 * end is the last byte in the last page. end == start is legal
7343 */ 7343 */
7344 end_idx = end >> PAGE_CACHE_SHIFT; 7344 end_idx = end >> PAGE_SHIFT;
7345 7345
7346 rcu_read_lock(); 7346 rcu_read_lock();
7347 7347
@@ -7382,7 +7382,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7382 * include/linux/pagemap.h for details. 7382 * include/linux/pagemap.h for details.
7383 */ 7383 */
7384 if (unlikely(page != *pagep)) { 7384 if (unlikely(page != *pagep)) {
7385 page_cache_release(page); 7385 put_page(page);
7386 page = NULL; 7386 page = NULL;
7387 } 7387 }
7388 } 7388 }
@@ -7390,7 +7390,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7390 if (page) { 7390 if (page) {
7391 if (page->index <= end_idx) 7391 if (page->index <= end_idx)
7392 found = true; 7392 found = true;
7393 page_cache_release(page); 7393 put_page(page);
7394 } 7394 }
7395 7395
7396 rcu_read_unlock(); 7396 rcu_read_unlock();
@@ -8719,7 +8719,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8719 if (ret == 1) { 8719 if (ret == 1) {
8720 ClearPagePrivate(page); 8720 ClearPagePrivate(page);
8721 set_page_private(page, 0); 8721 set_page_private(page, 0);
8722 page_cache_release(page); 8722 put_page(page);
8723 } 8723 }
8724 return ret; 8724 return ret;
8725} 8725}
@@ -8739,7 +8739,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8739 struct btrfs_ordered_extent *ordered; 8739 struct btrfs_ordered_extent *ordered;
8740 struct extent_state *cached_state = NULL; 8740 struct extent_state *cached_state = NULL;
8741 u64 page_start = page_offset(page); 8741 u64 page_start = page_offset(page);
8742 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 8742 u64 page_end = page_start + PAGE_SIZE - 1;
8743 u64 start; 8743 u64 start;
8744 u64 end; 8744 u64 end;
8745 int inode_evicting = inode->i_state & I_FREEING; 8745 int inode_evicting = inode->i_state & I_FREEING;
@@ -8822,7 +8822,7 @@ again:
8822 * 2) Not written to disk 8822 * 2) Not written to disk
8823 * This means the reserved space should be freed here. 8823 * This means the reserved space should be freed here.
8824 */ 8824 */
8825 btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); 8825 btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8826 if (!inode_evicting) { 8826 if (!inode_evicting) {
8827 clear_extent_bit(tree, page_start, page_end, 8827 clear_extent_bit(tree, page_start, page_end,
8828 EXTENT_LOCKED | EXTENT_DIRTY | 8828 EXTENT_LOCKED | EXTENT_DIRTY |
@@ -8837,7 +8837,7 @@ again:
8837 if (PagePrivate(page)) { 8837 if (PagePrivate(page)) {
8838 ClearPagePrivate(page); 8838 ClearPagePrivate(page);
8839 set_page_private(page, 0); 8839 set_page_private(page, 0);
8840 page_cache_release(page); 8840 put_page(page);
8841 } 8841 }
8842} 8842}
8843 8843
@@ -8874,11 +8874,11 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8874 u64 page_end; 8874 u64 page_end;
8875 u64 end; 8875 u64 end;
8876 8876
8877 reserved_space = PAGE_CACHE_SIZE; 8877 reserved_space = PAGE_SIZE;
8878 8878
8879 sb_start_pagefault(inode->i_sb); 8879 sb_start_pagefault(inode->i_sb);
8880 page_start = page_offset(page); 8880 page_start = page_offset(page);
8881 page_end = page_start + PAGE_CACHE_SIZE - 1; 8881 page_end = page_start + PAGE_SIZE - 1;
8882 end = page_end; 8882 end = page_end;
8883 8883
8884 /* 8884 /*
@@ -8934,15 +8934,15 @@ again:
8934 goto again; 8934 goto again;
8935 } 8935 }
8936 8936
8937 if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) { 8937 if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8938 reserved_space = round_up(size - page_start, root->sectorsize); 8938 reserved_space = round_up(size - page_start, root->sectorsize);
8939 if (reserved_space < PAGE_CACHE_SIZE) { 8939 if (reserved_space < PAGE_SIZE) {
8940 end = page_start + reserved_space - 1; 8940 end = page_start + reserved_space - 1;
8941 spin_lock(&BTRFS_I(inode)->lock); 8941 spin_lock(&BTRFS_I(inode)->lock);
8942 BTRFS_I(inode)->outstanding_extents++; 8942 BTRFS_I(inode)->outstanding_extents++;
8943 spin_unlock(&BTRFS_I(inode)->lock); 8943 spin_unlock(&BTRFS_I(inode)->lock);
8944 btrfs_delalloc_release_space(inode, page_start, 8944 btrfs_delalloc_release_space(inode, page_start,
8945 PAGE_CACHE_SIZE - reserved_space); 8945 PAGE_SIZE - reserved_space);
8946 } 8946 }
8947 } 8947 }
8948 8948
@@ -8969,14 +8969,14 @@ again:
8969 ret = 0; 8969 ret = 0;
8970 8970
8971 /* page is wholly or partially inside EOF */ 8971 /* page is wholly or partially inside EOF */
8972 if (page_start + PAGE_CACHE_SIZE > size) 8972 if (page_start + PAGE_SIZE > size)
8973 zero_start = size & ~PAGE_CACHE_MASK; 8973 zero_start = size & ~PAGE_MASK;
8974 else 8974 else
8975 zero_start = PAGE_CACHE_SIZE; 8975 zero_start = PAGE_SIZE;
8976 8976
8977 if (zero_start != PAGE_CACHE_SIZE) { 8977 if (zero_start != PAGE_SIZE) {
8978 kaddr = kmap(page); 8978 kaddr = kmap(page);
8979 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 8979 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
8980 flush_dcache_page(page); 8980 flush_dcache_page(page);
8981 kunmap(page); 8981 kunmap(page);
8982 } 8982 }