diff options
author | Nate Diller <nate.diller@gmail.com> | 2007-05-12 13:36:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-12 13:55:39 -0400 |
commit | e3bf460f3eb86cdbc76725a0dac1f191e796676c (patch) | |
tree | 7f11e16b4196735f62d4e3848f9084000b5e6b55 /fs/ntfs/aops.c | |
parent | 6d690dcac92a84f98fd774862628ff871b713660 (diff) |
ntfs: use zero_user_page
Use zero_user_page() instead of open-coding it.
[akpm@linux-foundation.org: kmap-type fixes]
Signed-off-by: Nate Diller <nate.diller@gmail.com>
Acked-by: Anton Altaparmakov <aia21@cantab.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ntfs/aops.c')
-rw-r--r-- | fs/ntfs/aops.c | 36 |
1 files changed, 9 insertions, 27 deletions
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 629e7abdd840..6e5c2534f4bc 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
@@ -86,19 +86,15 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
86 | } | 86 | } |
87 | /* Check for the current buffer head overflowing. */ | 87 | /* Check for the current buffer head overflowing. */ |
88 | if (unlikely(file_ofs + bh->b_size > init_size)) { | 88 | if (unlikely(file_ofs + bh->b_size > init_size)) { |
89 | u8 *kaddr; | ||
90 | int ofs; | 89 | int ofs; |
91 | 90 | ||
92 | ofs = 0; | 91 | ofs = 0; |
93 | if (file_ofs < init_size) | 92 | if (file_ofs < init_size) |
94 | ofs = init_size - file_ofs; | 93 | ofs = init_size - file_ofs; |
95 | local_irq_save(flags); | 94 | local_irq_save(flags); |
96 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); | 95 | zero_user_page(page, bh_offset(bh) + ofs, |
97 | memset(kaddr + bh_offset(bh) + ofs, 0, | 96 | bh->b_size - ofs, KM_BIO_SRC_IRQ); |
98 | bh->b_size - ofs); | ||
99 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); | ||
100 | local_irq_restore(flags); | 97 | local_irq_restore(flags); |
101 | flush_dcache_page(page); | ||
102 | } | 98 | } |
103 | } else { | 99 | } else { |
104 | clear_buffer_uptodate(bh); | 100 | clear_buffer_uptodate(bh); |
@@ -245,8 +241,7 @@ static int ntfs_read_block(struct page *page) | |||
245 | rl = NULL; | 241 | rl = NULL; |
246 | nr = i = 0; | 242 | nr = i = 0; |
247 | do { | 243 | do { |
248 | u8 *kaddr; | 244 | int err = 0; |
249 | int err; | ||
250 | 245 | ||
251 | if (unlikely(buffer_uptodate(bh))) | 246 | if (unlikely(buffer_uptodate(bh))) |
252 | continue; | 247 | continue; |
@@ -254,7 +249,6 @@ static int ntfs_read_block(struct page *page) | |||
254 | arr[nr++] = bh; | 249 | arr[nr++] = bh; |
255 | continue; | 250 | continue; |
256 | } | 251 | } |
257 | err = 0; | ||
258 | bh->b_bdev = vol->sb->s_bdev; | 252 | bh->b_bdev = vol->sb->s_bdev; |
259 | /* Is the block within the allowed limits? */ | 253 | /* Is the block within the allowed limits? */ |
260 | if (iblock < lblock) { | 254 | if (iblock < lblock) { |
@@ -340,10 +334,7 @@ handle_hole: | |||
340 | bh->b_blocknr = -1UL; | 334 | bh->b_blocknr = -1UL; |
341 | clear_buffer_mapped(bh); | 335 | clear_buffer_mapped(bh); |
342 | handle_zblock: | 336 | handle_zblock: |
343 | kaddr = kmap_atomic(page, KM_USER0); | 337 | zero_user_page(page, i * blocksize, blocksize, KM_USER0); |
344 | memset(kaddr + i * blocksize, 0, blocksize); | ||
345 | kunmap_atomic(kaddr, KM_USER0); | ||
346 | flush_dcache_page(page); | ||
347 | if (likely(!err)) | 338 | if (likely(!err)) |
348 | set_buffer_uptodate(bh); | 339 | set_buffer_uptodate(bh); |
349 | } while (i++, iblock++, (bh = bh->b_this_page) != head); | 340 | } while (i++, iblock++, (bh = bh->b_this_page) != head); |
@@ -460,10 +451,7 @@ retry_readpage: | |||
460 | * ok to ignore the compressed flag here. | 451 | * ok to ignore the compressed flag here. |
461 | */ | 452 | */ |
462 | if (unlikely(page->index > 0)) { | 453 | if (unlikely(page->index > 0)) { |
463 | kaddr = kmap_atomic(page, KM_USER0); | 454 | zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); |
464 | memset(kaddr, 0, PAGE_CACHE_SIZE); | ||
465 | flush_dcache_page(page); | ||
466 | kunmap_atomic(kaddr, KM_USER0); | ||
467 | goto done; | 455 | goto done; |
468 | } | 456 | } |
469 | if (!NInoAttr(ni)) | 457 | if (!NInoAttr(ni)) |
@@ -790,14 +778,10 @@ lock_retry_remap: | |||
790 | * uptodate so it can get discarded by the VM. | 778 | * uptodate so it can get discarded by the VM. |
791 | */ | 779 | */ |
792 | if (err == -ENOENT || lcn == LCN_ENOENT) { | 780 | if (err == -ENOENT || lcn == LCN_ENOENT) { |
793 | u8 *kaddr; | ||
794 | |||
795 | bh->b_blocknr = -1; | 781 | bh->b_blocknr = -1; |
796 | clear_buffer_dirty(bh); | 782 | clear_buffer_dirty(bh); |
797 | kaddr = kmap_atomic(page, KM_USER0); | 783 | zero_user_page(page, bh_offset(bh), blocksize, |
798 | memset(kaddr + bh_offset(bh), 0, blocksize); | 784 | KM_USER0); |
799 | kunmap_atomic(kaddr, KM_USER0); | ||
800 | flush_dcache_page(page); | ||
801 | set_buffer_uptodate(bh); | 785 | set_buffer_uptodate(bh); |
802 | err = 0; | 786 | err = 0; |
803 | continue; | 787 | continue; |
@@ -1422,10 +1406,8 @@ retry_writepage: | |||
1422 | if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { | 1406 | if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { |
1423 | /* The page straddles i_size. */ | 1407 | /* The page straddles i_size. */ |
1424 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; | 1408 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; |
1425 | kaddr = kmap_atomic(page, KM_USER0); | 1409 | zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs, |
1426 | memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); | 1410 | KM_USER0); |
1427 | kunmap_atomic(kaddr, KM_USER0); | ||
1428 | flush_dcache_page(page); | ||
1429 | } | 1411 | } |
1430 | /* Handle mst protected attributes. */ | 1412 | /* Handle mst protected attributes. */ |
1431 | if (NInoMstProtected(ni)) | 1413 | if (NInoMstProtected(ni)) |