aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-05 01:28:29 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:13 -0500
commiteebd2aa355692afaf9906f62118620f1a1c19dbb (patch)
tree207eead3a736963c3e50942038c463f2f611ccce /fs
parentb98348bdd08dc4ec11828aa98a78edde15c53cfa (diff)
Pagecache zeroing: zero_user_segment, zero_user_segments and zero_user
Simplify page cache zeroing of segments of pages through 3 functions zero_user_segments(page, start1, end1, start2, end2) Zeros two segments of the page. It takes the position where to start and end the zeroing which avoids length calculations and makes code clearer. zero_user_segment(page, start, end) Same for a single segment. zero_user(page, start, length) Length variant for the case where we know the length. We remove the zero_user_page macro. Issues: 1. Its a macro. Inline functions are preferable. 2. The KM_USER0 macro is only defined for HIGHMEM. Having to treat this special case everywhere makes the code needlessly complex. The parameter for zeroing is always KM_USER0 except in one single case that we open code. Avoiding KM_USER0 makes a lot of code not having to be dealing with the special casing for HIGHMEM anymore. Dealing with kmap is only necessary for HIGHMEM configurations. In those configurations we use KM_USER0 like we do for a series of other functions defined in highmem.h. Since KM_USER0 is depends on HIGHMEM the existing zero_user_page function could not be a macro. zero_user_* functions introduced here can be be inline because that constant is not used when these functions are called. Also extract the flushing of the caches to be outside of the kmap. [akpm@linux-foundation.org: fix nfs and ntfs build] [akpm@linux-foundation.org: fix ntfs build some more] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Steven French <sfrench@us.ibm.com> Cc: Michael Halcrow <mhalcrow@us.ibm.com> Cc: <linux-ext4@vger.kernel.org> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: "J. Bruce Fields" <bfields@fieldses.org> Cc: Anton Altaparmakov <aia21@cantab.net> Cc: Mark Fasheh <mark.fasheh@oracle.com> Cc: David Chinner <dgc@sgi.com> Cc: Michael Halcrow <mhalcrow@us.ibm.com> Cc: Steven French <sfrench@us.ibm.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c44
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/ecryptfs/mmap.c5
-rw-r--r--fs/ext3/inode.c4
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/ops_address.c2
-rw-r--r--fs/libfs.c11
-rw-r--r--fs/mpage.c7
-rw-r--r--fs/nfs/read.c10
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/ntfs/aops.c20
-rw-r--r--fs/ntfs/compress.c2
-rw-r--r--fs/ntfs/file.c32
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c6
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c2
19 files changed, 71 insertions, 96 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 456c9ab7705b..1de921484eac 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1798,7 +1798,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1798 start = max(from, block_start); 1798 start = max(from, block_start);
1799 size = min(to, block_end) - start; 1799 size = min(to, block_end) - start;
1800 1800
1801 zero_user_page(page, start, size, KM_USER0); 1801 zero_user(page, start, size);
1802 set_buffer_uptodate(bh); 1802 set_buffer_uptodate(bh);
1803 } 1803 }
1804 1804
@@ -1861,19 +1861,10 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1861 mark_buffer_dirty(bh); 1861 mark_buffer_dirty(bh);
1862 continue; 1862 continue;
1863 } 1863 }
1864 if (block_end > to || block_start < from) { 1864 if (block_end > to || block_start < from)
1865 void *kaddr; 1865 zero_user_segments(page,
1866 1866 to, block_end,
1867 kaddr = kmap_atomic(page, KM_USER0); 1867 block_start, from);
1868 if (block_end > to)
1869 memset(kaddr+to, 0,
1870 block_end-to);
1871 if (block_start < from)
1872 memset(kaddr+block_start,
1873 0, from-block_start);
1874 flush_dcache_page(page);
1875 kunmap_atomic(kaddr, KM_USER0);
1876 }
1877 continue; 1868 continue;
1878 } 1869 }
1879 } 1870 }
@@ -2104,8 +2095,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2104 SetPageError(page); 2095 SetPageError(page);
2105 } 2096 }
2106 if (!buffer_mapped(bh)) { 2097 if (!buffer_mapped(bh)) {
2107 zero_user_page(page, i * blocksize, blocksize, 2098 zero_user(page, i * blocksize, blocksize);
2108 KM_USER0);
2109 if (!err) 2099 if (!err)
2110 set_buffer_uptodate(bh); 2100 set_buffer_uptodate(bh);
2111 continue; 2101 continue;
@@ -2218,7 +2208,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
2218 &page, &fsdata); 2208 &page, &fsdata);
2219 if (err) 2209 if (err)
2220 goto out; 2210 goto out;
2221 zero_user_page(page, zerofrom, len, KM_USER0); 2211 zero_user(page, zerofrom, len);
2222 err = pagecache_write_end(file, mapping, curpos, len, len, 2212 err = pagecache_write_end(file, mapping, curpos, len, len,
2223 page, fsdata); 2213 page, fsdata);
2224 if (err < 0) 2214 if (err < 0)
@@ -2245,7 +2235,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
2245 &page, &fsdata); 2235 &page, &fsdata);
2246 if (err) 2236 if (err)
2247 goto out; 2237 goto out;
2248 zero_user_page(page, zerofrom, len, KM_USER0); 2238 zero_user(page, zerofrom, len);
2249 err = pagecache_write_end(file, mapping, curpos, len, len, 2239 err = pagecache_write_end(file, mapping, curpos, len, len,
2250 page, fsdata); 2240 page, fsdata);
2251 if (err < 0) 2241 if (err < 0)
@@ -2422,7 +2412,6 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
2422 unsigned block_in_page; 2412 unsigned block_in_page;
2423 unsigned block_start, block_end; 2413 unsigned block_start, block_end;
2424 sector_t block_in_file; 2414 sector_t block_in_file;
2425 char *kaddr;
2426 int nr_reads = 0; 2415 int nr_reads = 0;
2427 int ret = 0; 2416 int ret = 0;
2428 int is_mapped_to_disk = 1; 2417 int is_mapped_to_disk = 1;
@@ -2493,13 +2482,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
2493 continue; 2482 continue;
2494 } 2483 }
2495 if (buffer_new(bh) || !buffer_mapped(bh)) { 2484 if (buffer_new(bh) || !buffer_mapped(bh)) {
2496 kaddr = kmap_atomic(page, KM_USER0); 2485 zero_user_segments(page, block_start, from,
2497 if (block_start < from) 2486 to, block_end);
2498 memset(kaddr+block_start, 0, from-block_start);
2499 if (block_end > to)
2500 memset(kaddr + to, 0, block_end - to);
2501 flush_dcache_page(page);
2502 kunmap_atomic(kaddr, KM_USER0);
2503 continue; 2487 continue;
2504 } 2488 }
2505 if (buffer_uptodate(bh)) 2489 if (buffer_uptodate(bh))
@@ -2636,7 +2620,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2636 * the page size, the remaining memory is zeroed when mapped, and 2620 * the page size, the remaining memory is zeroed when mapped, and
2637 * writes to that region are not written out to the file." 2621 * writes to that region are not written out to the file."
2638 */ 2622 */
2639 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 2623 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2640out: 2624out:
2641 ret = mpage_writepage(page, get_block, wbc); 2625 ret = mpage_writepage(page, get_block, wbc);
2642 if (ret == -EAGAIN) 2626 if (ret == -EAGAIN)
@@ -2709,7 +2693,7 @@ has_buffers:
2709 if (page_has_buffers(page)) 2693 if (page_has_buffers(page))
2710 goto has_buffers; 2694 goto has_buffers;
2711 } 2695 }
2712 zero_user_page(page, offset, length, KM_USER0); 2696 zero_user(page, offset, length);
2713 set_page_dirty(page); 2697 set_page_dirty(page);
2714 err = 0; 2698 err = 0;
2715 2699
@@ -2785,7 +2769,7 @@ int block_truncate_page(struct address_space *mapping,
2785 goto unlock; 2769 goto unlock;
2786 } 2770 }
2787 2771
2788 zero_user_page(page, offset, length, KM_USER0); 2772 zero_user(page, offset, length);
2789 mark_buffer_dirty(bh); 2773 mark_buffer_dirty(bh);
2790 err = 0; 2774 err = 0;
2791 2775
@@ -2831,7 +2815,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2831 * the page size, the remaining memory is zeroed when mapped, and 2815 * the page size, the remaining memory is zeroed when mapped, and
2832 * writes to that region are not written out to the file." 2816 * writes to that region are not written out to the file."
2833 */ 2817 */
2834 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 2818 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2835 return __block_write_full_page(inode, page, get_block, wbc); 2819 return __block_write_full_page(inode, page, get_block, wbc);
2836} 2820}
2837 2821
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d9567ba2960b..47f2621001e4 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1386,7 +1386,7 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
1386 if (!page) 1386 if (!page)
1387 return -ENOMEM; 1387 return -ENOMEM;
1388 1388
1389 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 1389 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1390 unlock_page(page); 1390 unlock_page(page);
1391 page_cache_release(page); 1391 page_cache_release(page);
1392 return rc; 1392 return rc;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index acf0da1bd257..9e81addbd6ea 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -878,8 +878,8 @@ do_holes:
878 page_cache_release(page); 878 page_cache_release(page);
879 goto out; 879 goto out;
880 } 880 }
881 zero_user_page(page, block_in_page << blkbits, 881 zero_user(page, block_in_page << blkbits,
882 1 << blkbits, KM_USER0); 882 1 << blkbits);
883 dio->block_in_file++; 883 dio->block_in_file++;
884 block_in_page++; 884 block_in_page++;
885 goto next_block; 885 goto next_block;
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 32c5711d79a3..0535412d8c64 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -257,8 +257,7 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
257 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 257 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
258 if (to > end_byte_in_page) 258 if (to > end_byte_in_page)
259 end_byte_in_page = to; 259 end_byte_in_page = to;
260 zero_user_page(page, end_byte_in_page, 260 zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
261 PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
262out: 261out:
263 return 0; 262 return 0;
264} 263}
@@ -307,7 +306,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
307 */ 306 */
308 if ((i_size_read(page->mapping->host) == prev_page_end_size) && 307 if ((i_size_read(page->mapping->host) == prev_page_end_size) &&
309 (from != 0)) { 308 (from != 0)) {
310 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 309 zero_user(page, 0, PAGE_CACHE_SIZE);
311 } 310 }
312out: 311out:
313 return rc; 312 return rc;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 9b162cd6c16c..077535439288 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1845,7 +1845,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1845 */ 1845 */
1846 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 1846 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1847 ext3_should_writeback_data(inode) && PageUptodate(page)) { 1847 ext3_should_writeback_data(inode) && PageUptodate(page)) {
1848 zero_user_page(page, offset, length, KM_USER0); 1848 zero_user(page, offset, length);
1849 set_page_dirty(page); 1849 set_page_dirty(page);
1850 goto unlock; 1850 goto unlock;
1851 } 1851 }
@@ -1898,7 +1898,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1898 goto unlock; 1898 goto unlock;
1899 } 1899 }
1900 1900
1901 zero_user_page(page, offset, length, KM_USER0); 1901 zero_user(page, offset, length);
1902 BUFFER_TRACE(bh, "zeroed end of block"); 1902 BUFFER_TRACE(bh, "zeroed end of block");
1903 1903
1904 err = 0; 1904 err = 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bb717cbb749c..05c4145dd27d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1840,7 +1840,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
1840 */ 1840 */
1841 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 1841 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1842 ext4_should_writeback_data(inode) && PageUptodate(page)) { 1842 ext4_should_writeback_data(inode) && PageUptodate(page)) {
1843 zero_user_page(page, offset, length, KM_USER0); 1843 zero_user(page, offset, length);
1844 set_page_dirty(page); 1844 set_page_dirty(page);
1845 goto unlock; 1845 goto unlock;
1846 } 1846 }
@@ -1893,7 +1893,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
1893 goto unlock; 1893 goto unlock;
1894 } 1894 }
1895 1895
1896 zero_user_page(page, offset, length, KM_USER0); 1896 zero_user(page, offset, length);
1897 1897
1898 BUFFER_TRACE(bh, "zeroed end of block"); 1898 BUFFER_TRACE(bh, "zeroed end of block");
1899 1899
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index e4effc47abfc..e9456ebd3bb6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -932,7 +932,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping)
932 if (!gfs2_is_writeback(ip)) 932 if (!gfs2_is_writeback(ip))
933 gfs2_trans_add_bh(ip->i_gl, bh, 0); 933 gfs2_trans_add_bh(ip->i_gl, bh, 0);
934 934
935 zero_user_page(page, offset, length, KM_USER0); 935 zero_user(page, offset, length);
936 936
937unlock: 937unlock:
938 unlock_page(page); 938 unlock_page(page);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 38dbe99a30ed..ac772b6d9dbb 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -446,7 +446,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
446 * so we need to supply one here. It doesn't happen often. 446 * so we need to supply one here. It doesn't happen often.
447 */ 447 */
448 if (unlikely(page->index)) { 448 if (unlikely(page->index)) {
449 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 449 zero_user(page, 0, PAGE_CACHE_SIZE);
450 return 0; 450 return 0;
451 } 451 }
452 452
diff --git a/fs/libfs.c b/fs/libfs.c
index 6e68b700958d..5523bde96387 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -341,13 +341,10 @@ int simple_prepare_write(struct file *file, struct page *page,
341 unsigned from, unsigned to) 341 unsigned from, unsigned to)
342{ 342{
343 if (!PageUptodate(page)) { 343 if (!PageUptodate(page)) {
344 if (to - from != PAGE_CACHE_SIZE) { 344 if (to - from != PAGE_CACHE_SIZE)
345 void *kaddr = kmap_atomic(page, KM_USER0); 345 zero_user_segments(page,
346 memset(kaddr, 0, from); 346 0, from,
347 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); 347 to, PAGE_CACHE_SIZE);
348 flush_dcache_page(page);
349 kunmap_atomic(kaddr, KM_USER0);
350 }
351 } 348 }
352 return 0; 349 return 0;
353} 350}
diff --git a/fs/mpage.c b/fs/mpage.c
index d54f8f897224..5df564366f36 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -276,9 +276,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
276 } 276 }
277 277
278 if (first_hole != blocks_per_page) { 278 if (first_hole != blocks_per_page) {
279 zero_user_page(page, first_hole << blkbits, 279 zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
280 PAGE_CACHE_SIZE - (first_hole << blkbits),
281 KM_USER0);
282 if (first_hole == 0) { 280 if (first_hole == 0) {
283 SetPageUptodate(page); 281 SetPageUptodate(page);
284 unlock_page(page); 282 unlock_page(page);
@@ -571,8 +569,7 @@ page_is_mapped:
571 569
572 if (page->index > end_index || !offset) 570 if (page->index > end_index || !offset)
573 goto confused; 571 goto confused;
574 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, 572 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
575 KM_USER0);
576 } 573 }
577 574
578 /* 575 /*
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 8fd6dfbe1bc3..3d7d9631e125 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data)
79static 79static
80int nfs_return_empty_page(struct page *page) 80int nfs_return_empty_page(struct page *page)
81{ 81{
82 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 82 zero_user(page, 0, PAGE_CACHE_SIZE);
83 SetPageUptodate(page); 83 SetPageUptodate(page);
84 unlock_page(page); 84 unlock_page(page);
85 return 0; 85 return 0;
@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
103 pglen = PAGE_CACHE_SIZE - base; 103 pglen = PAGE_CACHE_SIZE - base;
104 for (;;) { 104 for (;;) {
105 if (remainder <= pglen) { 105 if (remainder <= pglen) {
106 zero_user_page(*pages, base, remainder, KM_USER0); 106 zero_user(*pages, base, remainder);
107 break; 107 break;
108 } 108 }
109 zero_user_page(*pages, base, pglen, KM_USER0); 109 zero_user(*pages, base, pglen);
110 pages++; 110 pages++;
111 remainder -= pglen; 111 remainder -= pglen;
112 pglen = PAGE_CACHE_SIZE; 112 pglen = PAGE_CACHE_SIZE;
@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
130 return PTR_ERR(new); 130 return PTR_ERR(new);
131 } 131 }
132 if (len < PAGE_CACHE_SIZE) 132 if (len < PAGE_CACHE_SIZE)
133 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); 133 zero_user_segment(page, len, PAGE_CACHE_SIZE);
134 134
135 nfs_list_add_request(new, &one_request); 135 nfs_list_add_request(new, &one_request);
136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) 136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page)
532 goto out_error; 532 goto out_error;
533 533
534 if (len < PAGE_CACHE_SIZE) 534 if (len < PAGE_CACHE_SIZE)
535 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); 535 zero_user_segment(page, len, PAGE_CACHE_SIZE);
536 nfs_pageio_add_request(desc->pgio, new); 536 nfs_pageio_add_request(desc->pgio, new);
537 return 0; 537 return 0;
538out_error: 538out_error:
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 522efff3e2c5..b144b1957dd9 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -665,9 +665,7 @@ zero_page:
665 * then we need to zero any uninitalised data. */ 665 * then we need to zero any uninitalised data. */
666 if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE 666 if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
667 && !PageUptodate(req->wb_page)) 667 && !PageUptodate(req->wb_page))
668 zero_user_page(req->wb_page, req->wb_bytes, 668 zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
669 PAGE_CACHE_SIZE - req->wb_bytes,
670 KM_USER0);
671 return req; 669 return req;
672} 670}
673 671
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index ad87cb01299b..00e9ccde8e42 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -87,13 +87,17 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
87 /* Check for the current buffer head overflowing. */ 87 /* Check for the current buffer head overflowing. */
88 if (unlikely(file_ofs + bh->b_size > init_size)) { 88 if (unlikely(file_ofs + bh->b_size > init_size)) {
89 int ofs; 89 int ofs;
90 void *kaddr;
90 91
91 ofs = 0; 92 ofs = 0;
92 if (file_ofs < init_size) 93 if (file_ofs < init_size)
93 ofs = init_size - file_ofs; 94 ofs = init_size - file_ofs;
94 local_irq_save(flags); 95 local_irq_save(flags);
95 zero_user_page(page, bh_offset(bh) + ofs, 96 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
96 bh->b_size - ofs, KM_BIO_SRC_IRQ); 97 memset(kaddr + bh_offset(bh) + ofs, 0,
98 bh->b_size - ofs);
99 flush_dcache_page(page);
100 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
97 local_irq_restore(flags); 101 local_irq_restore(flags);
98 } 102 }
99 } else { 103 } else {
@@ -334,7 +338,7 @@ handle_hole:
334 bh->b_blocknr = -1UL; 338 bh->b_blocknr = -1UL;
335 clear_buffer_mapped(bh); 339 clear_buffer_mapped(bh);
336handle_zblock: 340handle_zblock:
337 zero_user_page(page, i * blocksize, blocksize, KM_USER0); 341 zero_user(page, i * blocksize, blocksize);
338 if (likely(!err)) 342 if (likely(!err))
339 set_buffer_uptodate(bh); 343 set_buffer_uptodate(bh);
340 } while (i++, iblock++, (bh = bh->b_this_page) != head); 344 } while (i++, iblock++, (bh = bh->b_this_page) != head);
@@ -410,7 +414,7 @@ retry_readpage:
410 /* Is the page fully outside i_size? (truncate in progress) */ 414 /* Is the page fully outside i_size? (truncate in progress) */
411 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> 415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
412 PAGE_CACHE_SHIFT)) { 416 PAGE_CACHE_SHIFT)) {
413 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 417 zero_user(page, 0, PAGE_CACHE_SIZE);
414 ntfs_debug("Read outside i_size - truncated?"); 418 ntfs_debug("Read outside i_size - truncated?");
415 goto done; 419 goto done;
416 } 420 }
@@ -459,7 +463,7 @@ retry_readpage:
459 * ok to ignore the compressed flag here. 463 * ok to ignore the compressed flag here.
460 */ 464 */
461 if (unlikely(page->index > 0)) { 465 if (unlikely(page->index > 0)) {
462 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 466 zero_user(page, 0, PAGE_CACHE_SIZE);
463 goto done; 467 goto done;
464 } 468 }
465 if (!NInoAttr(ni)) 469 if (!NInoAttr(ni))
@@ -788,8 +792,7 @@ lock_retry_remap:
788 if (err == -ENOENT || lcn == LCN_ENOENT) { 792 if (err == -ENOENT || lcn == LCN_ENOENT) {
789 bh->b_blocknr = -1; 793 bh->b_blocknr = -1;
790 clear_buffer_dirty(bh); 794 clear_buffer_dirty(bh);
791 zero_user_page(page, bh_offset(bh), blocksize, 795 zero_user(page, bh_offset(bh), blocksize);
792 KM_USER0);
793 set_buffer_uptodate(bh); 796 set_buffer_uptodate(bh);
794 err = 0; 797 err = 0;
795 continue; 798 continue;
@@ -1414,8 +1417,7 @@ retry_writepage:
1414 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { 1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1415 /* The page straddles i_size. */ 1418 /* The page straddles i_size. */
1416 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; 1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1417 zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs, 1420 zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
1418 KM_USER0);
1419 } 1421 }
1420 /* Handle mst protected attributes. */ 1422 /* Handle mst protected attributes. */
1421 if (NInoMstProtected(ni)) 1423 if (NInoMstProtected(ni))
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index d1619d05eb23..33ff314cc507 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -565,7 +565,7 @@ int ntfs_read_compressed_block(struct page *page)
565 if (xpage >= max_page) { 565 if (xpage >= max_page) {
566 kfree(bhs); 566 kfree(bhs);
567 kfree(pages); 567 kfree(pages);
568 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 568 zero_user(page, 0, PAGE_CACHE_SIZE);
569 ntfs_debug("Compressed read outside i_size - truncated?"); 569 ntfs_debug("Compressed read outside i_size - truncated?");
570 SetPageUptodate(page); 570 SetPageUptodate(page);
571 unlock_page(page); 571 unlock_page(page);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 6cd08dfdc2ed..3c5550cd11d6 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -607,8 +607,8 @@ do_next_page:
607 ntfs_submit_bh_for_read(bh); 607 ntfs_submit_bh_for_read(bh);
608 *wait_bh++ = bh; 608 *wait_bh++ = bh;
609 } else { 609 } else {
610 zero_user_page(page, bh_offset(bh), 610 zero_user(page, bh_offset(bh),
611 blocksize, KM_USER0); 611 blocksize);
612 set_buffer_uptodate(bh); 612 set_buffer_uptodate(bh);
613 } 613 }
614 } 614 }
@@ -683,9 +683,8 @@ map_buffer_cached:
683 ntfs_submit_bh_for_read(bh); 683 ntfs_submit_bh_for_read(bh);
684 *wait_bh++ = bh; 684 *wait_bh++ = bh;
685 } else { 685 } else {
686 zero_user_page(page, 686 zero_user(page, bh_offset(bh),
687 bh_offset(bh), 687 blocksize);
688 blocksize, KM_USER0);
689 set_buffer_uptodate(bh); 688 set_buffer_uptodate(bh);
690 } 689 }
691 } 690 }
@@ -703,8 +702,8 @@ map_buffer_cached:
703 */ 702 */
704 if (bh_end <= pos || bh_pos >= end) { 703 if (bh_end <= pos || bh_pos >= end) {
705 if (!buffer_uptodate(bh)) { 704 if (!buffer_uptodate(bh)) {
706 zero_user_page(page, bh_offset(bh), 705 zero_user(page, bh_offset(bh),
707 blocksize, KM_USER0); 706 blocksize);
708 set_buffer_uptodate(bh); 707 set_buffer_uptodate(bh);
709 } 708 }
710 mark_buffer_dirty(bh); 709 mark_buffer_dirty(bh);
@@ -743,8 +742,7 @@ map_buffer_cached:
743 if (!buffer_uptodate(bh)) 742 if (!buffer_uptodate(bh))
744 set_buffer_uptodate(bh); 743 set_buffer_uptodate(bh);
745 } else if (!buffer_uptodate(bh)) { 744 } else if (!buffer_uptodate(bh)) {
746 zero_user_page(page, bh_offset(bh), blocksize, 745 zero_user(page, bh_offset(bh), blocksize);
747 KM_USER0);
748 set_buffer_uptodate(bh); 746 set_buffer_uptodate(bh);
749 } 747 }
750 continue; 748 continue;
@@ -868,8 +866,8 @@ rl_not_mapped_enoent:
868 if (!buffer_uptodate(bh)) 866 if (!buffer_uptodate(bh))
869 set_buffer_uptodate(bh); 867 set_buffer_uptodate(bh);
870 } else if (!buffer_uptodate(bh)) { 868 } else if (!buffer_uptodate(bh)) {
871 zero_user_page(page, bh_offset(bh), 869 zero_user(page, bh_offset(bh),
872 blocksize, KM_USER0); 870 blocksize);
873 set_buffer_uptodate(bh); 871 set_buffer_uptodate(bh);
874 } 872 }
875 continue; 873 continue;
@@ -1128,8 +1126,8 @@ rl_not_mapped_enoent:
1128 1126
1129 if (likely(bh_pos < initialized_size)) 1127 if (likely(bh_pos < initialized_size))
1130 ofs = initialized_size - bh_pos; 1128 ofs = initialized_size - bh_pos;
1131 zero_user_page(page, bh_offset(bh) + ofs, 1129 zero_user_segment(page, bh_offset(bh) + ofs,
1132 blocksize - ofs, KM_USER0); 1130 blocksize);
1133 } 1131 }
1134 } else /* if (unlikely(!buffer_uptodate(bh))) */ 1132 } else /* if (unlikely(!buffer_uptodate(bh))) */
1135 err = -EIO; 1133 err = -EIO;
@@ -1269,8 +1267,8 @@ rl_not_mapped_enoent:
1269 if (PageUptodate(page)) 1267 if (PageUptodate(page))
1270 set_buffer_uptodate(bh); 1268 set_buffer_uptodate(bh);
1271 else { 1269 else {
1272 zero_user_page(page, bh_offset(bh), 1270 zero_user(page, bh_offset(bh),
1273 blocksize, KM_USER0); 1271 blocksize);
1274 set_buffer_uptodate(bh); 1272 set_buffer_uptodate(bh);
1275 } 1273 }
1276 } 1274 }
@@ -1330,7 +1328,7 @@ err_out:
1330 len = PAGE_CACHE_SIZE; 1328 len = PAGE_CACHE_SIZE;
1331 if (len > bytes) 1329 if (len > bytes)
1332 len = bytes; 1330 len = bytes;
1333 zero_user_page(*pages, 0, len, KM_USER0); 1331 zero_user(*pages, 0, len);
1334 } 1332 }
1335 goto out; 1333 goto out;
1336} 1334}
@@ -1451,7 +1449,7 @@ err_out:
1451 len = PAGE_CACHE_SIZE; 1449 len = PAGE_CACHE_SIZE;
1452 if (len > bytes) 1450 if (len > bytes)
1453 len = bytes; 1451 len = bytes;
1454 zero_user_page(*pages, 0, len, KM_USER0); 1452 zero_user(*pages, 0, len);
1455 } 1453 }
1456 goto out; 1454 goto out;
1457} 1455}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 64713e149e46..447206eb5c2e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5670,7 +5670,7 @@ static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
5670 mlog_errno(ret); 5670 mlog_errno(ret);
5671 5671
5672 if (zero) 5672 if (zero)
5673 zero_user_page(page, from, to - from, KM_USER0); 5673 zero_user_segment(page, from, to);
5674 5674
5675 /* 5675 /*
5676 * Need to set the buffers we zero'd into uptodate 5676 * Need to set the buffers we zero'd into uptodate
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index bc7b4cbbe8ec..82243127eebf 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -307,7 +307,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
307 * XXX sys_readahead() seems to get that wrong? 307 * XXX sys_readahead() seems to get that wrong?
308 */ 308 */
309 if (start >= i_size_read(inode)) { 309 if (start >= i_size_read(inode)) {
310 zero_user_page(page, 0, PAGE_SIZE, KM_USER0); 310 zero_user(page, 0, PAGE_SIZE);
311 SetPageUptodate(page); 311 SetPageUptodate(page);
312 ret = 0; 312 ret = 0;
313 goto out_alloc; 313 goto out_alloc;
@@ -869,7 +869,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
869 if (block_start >= to) 869 if (block_start >= to)
870 break; 870 break;
871 871
872 zero_user_page(page, block_start, bh->b_size, KM_USER0); 872 zero_user(page, block_start, bh->b_size);
873 set_buffer_uptodate(bh); 873 set_buffer_uptodate(bh);
874 mark_buffer_dirty(bh); 874 mark_buffer_dirty(bh);
875 875
@@ -1034,7 +1034,7 @@ static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to
1034 start = max(from, block_start); 1034 start = max(from, block_start);
1035 end = min(to, block_end); 1035 end = min(to, block_end);
1036 1036
1037 zero_user_page(page, start, end - start, KM_USER0); 1037 zero_user_segment(page, start, end);
1038 set_buffer_uptodate(bh); 1038 set_buffer_uptodate(bh);
1039 } 1039 }
1040 1040
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 231fd5ccadc5..195309857e63 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2143,7 +2143,7 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
2143 /* if we are not on a block boundary */ 2143 /* if we are not on a block boundary */
2144 if (length) { 2144 if (length) {
2145 length = blocksize - length; 2145 length = blocksize - length;
2146 zero_user_page(page, offset, length, KM_USER0); 2146 zero_user(page, offset, length);
2147 if (buffer_mapped(bh) && bh->b_blocknr != 0) { 2147 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2148 mark_buffer_dirty(bh); 2148 mark_buffer_dirty(bh);
2149 } 2149 }
@@ -2367,7 +2367,7 @@ static int reiserfs_write_full_page(struct page *page,
2367 unlock_page(page); 2367 unlock_page(page);
2368 return 0; 2368 return 0;
2369 } 2369 }
2370 zero_user_page(page, last_offset, PAGE_CACHE_SIZE - last_offset, KM_USER0); 2370 zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
2371 } 2371 }
2372 bh = head; 2372 bh = head;
2373 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); 2373 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index d6a8dddb2268..6f614f35f650 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -155,7 +155,7 @@ xfs_iozero(
155 if (status) 155 if (status)
156 break; 156 break;
157 157
158 zero_user_page(page, offset, bytes, KM_USER0); 158 zero_user(page, offset, bytes);
159 159
160 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, 160 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
161 page, fsdata); 161 page, fsdata);