diff options
author | Nate Diller <nate.diller@gmail.com> | 2007-05-09 05:35:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:55 -0400 |
commit | 01f2705daf5a36208e69d7cf95db9c330f843af6 (patch) | |
tree | 2d2c7a042c2466ed985f6e0950450c099f02725f | |
parent | 38a23e311b6cd389b9d8af2ea6c28c8cffbe581c (diff) |
fs: convert core functions to zero_user_page
It's very common for file systems to need to zero part or all of a page,
the simplist way is just to use kmap_atomic() and memset(). There's
actually a library function in include/linux/highmem.h that does exactly
that, but it's confusingly named memclear_highpage_flush(), which is
descriptive of *how* it does the work rather than what the *purpose* is.
So this patchset renames the function to zero_user_page(), and calls it
from the various places that currently open code it.
This first patch introduces the new function call, and converts all the
core kernel callsites, both the open-coded ones and the old
memclear_highpage_flush() ones. Following this patch is a series of
conversions for each file system individually, per AKPM, and finally a
patch deprecating the old call. The diffstat below shows the entire
patchset.
[akpm@linux-foundation.org: fix a few things]
Signed-off-by: Nate Diller <nate.diller@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | drivers/block/loop.c | 6 | ||||
-rw-r--r-- | fs/buffer.c | 56 | ||||
-rw-r--r-- | fs/direct-io.c | 8 | ||||
-rw-r--r-- | fs/mpage.c | 15 | ||||
-rw-r--r-- | include/linux/highmem.h | 28 | ||||
-rw-r--r-- | mm/filemap_xip.c | 7 | ||||
-rw-r--r-- | mm/truncate.c | 3 |
7 files changed, 42 insertions, 81 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index af6d7274a7cc..18cdd8c77626 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, | |||
243 | transfer_result = lo_do_transfer(lo, WRITE, page, offset, | 243 | transfer_result = lo_do_transfer(lo, WRITE, page, offset, |
244 | bvec->bv_page, bv_offs, size, IV); | 244 | bvec->bv_page, bv_offs, size, IV); |
245 | if (unlikely(transfer_result)) { | 245 | if (unlikely(transfer_result)) { |
246 | char *kaddr; | ||
247 | |||
248 | /* | 246 | /* |
249 | * The transfer failed, but we still write the data to | 247 | * The transfer failed, but we still write the data to |
250 | * keep prepare/commit calls balanced. | 248 | * keep prepare/commit calls balanced. |
251 | */ | 249 | */ |
252 | printk(KERN_ERR "loop: transfer error block %llu\n", | 250 | printk(KERN_ERR "loop: transfer error block %llu\n", |
253 | (unsigned long long)index); | 251 | (unsigned long long)index); |
254 | kaddr = kmap_atomic(page, KM_USER0); | 252 | zero_user_page(page, offset, size, KM_USER0); |
255 | memset(kaddr + offset, 0, size); | ||
256 | kunmap_atomic(kaddr, KM_USER0); | ||
257 | } | 253 | } |
258 | flush_dcache_page(page); | 254 | flush_dcache_page(page); |
259 | ret = aops->commit_write(file, page, offset, | 255 | ret = aops->commit_write(file, page, offset, |
diff --git a/fs/buffer.c b/fs/buffer.c index eb820b82a636..fc2d763a8d78 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1846,13 +1846,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page, | |||
1846 | if (block_start >= to) | 1846 | if (block_start >= to) |
1847 | break; | 1847 | break; |
1848 | if (buffer_new(bh)) { | 1848 | if (buffer_new(bh)) { |
1849 | void *kaddr; | ||
1850 | |||
1851 | clear_buffer_new(bh); | 1849 | clear_buffer_new(bh); |
1852 | kaddr = kmap_atomic(page, KM_USER0); | 1850 | zero_user_page(page, block_start, bh->b_size, KM_USER0); |
1853 | memset(kaddr+block_start, 0, bh->b_size); | ||
1854 | flush_dcache_page(page); | ||
1855 | kunmap_atomic(kaddr, KM_USER0); | ||
1856 | set_buffer_uptodate(bh); | 1851 | set_buffer_uptodate(bh); |
1857 | mark_buffer_dirty(bh); | 1852 | mark_buffer_dirty(bh); |
1858 | } | 1853 | } |
@@ -1940,10 +1935,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block) | |||
1940 | SetPageError(page); | 1935 | SetPageError(page); |
1941 | } | 1936 | } |
1942 | if (!buffer_mapped(bh)) { | 1937 | if (!buffer_mapped(bh)) { |
1943 | void *kaddr = kmap_atomic(page, KM_USER0); | 1938 | zero_user_page(page, i * blocksize, blocksize, |
1944 | memset(kaddr + i * blocksize, 0, blocksize); | 1939 | KM_USER0); |
1945 | flush_dcache_page(page); | ||
1946 | kunmap_atomic(kaddr, KM_USER0); | ||
1947 | if (!err) | 1940 | if (!err) |
1948 | set_buffer_uptodate(bh); | 1941 | set_buffer_uptodate(bh); |
1949 | continue; | 1942 | continue; |
@@ -2086,7 +2079,6 @@ int cont_prepare_write(struct page *page, unsigned offset, | |||
2086 | long status; | 2079 | long status; |
2087 | unsigned zerofrom; | 2080 | unsigned zerofrom; |
2088 | unsigned blocksize = 1 << inode->i_blkbits; | 2081 | unsigned blocksize = 1 << inode->i_blkbits; |
2089 | void *kaddr; | ||
2090 | 2082 | ||
2091 | while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { | 2083 | while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { |
2092 | status = -ENOMEM; | 2084 | status = -ENOMEM; |
@@ -2108,10 +2100,8 @@ int cont_prepare_write(struct page *page, unsigned offset, | |||
2108 | PAGE_CACHE_SIZE, get_block); | 2100 | PAGE_CACHE_SIZE, get_block); |
2109 | if (status) | 2101 | if (status) |
2110 | goto out_unmap; | 2102 | goto out_unmap; |
2111 | kaddr = kmap_atomic(new_page, KM_USER0); | 2103 | zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom, |
2112 | memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); | 2104 | KM_USER0); |
2113 | flush_dcache_page(new_page); | ||
2114 | kunmap_atomic(kaddr, KM_USER0); | ||
2115 | generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); | 2105 | generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); |
2116 | unlock_page(new_page); | 2106 | unlock_page(new_page); |
2117 | page_cache_release(new_page); | 2107 | page_cache_release(new_page); |
@@ -2138,10 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset, | |||
2138 | if (status) | 2128 | if (status) |
2139 | goto out1; | 2129 | goto out1; |
2140 | if (zerofrom < offset) { | 2130 | if (zerofrom < offset) { |
2141 | kaddr = kmap_atomic(page, KM_USER0); | 2131 | zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0); |
2142 | memset(kaddr+zerofrom, 0, offset-zerofrom); | ||
2143 | flush_dcache_page(page); | ||
2144 | kunmap_atomic(kaddr, KM_USER0); | ||
2145 | __block_commit_write(inode, page, zerofrom, offset); | 2132 | __block_commit_write(inode, page, zerofrom, offset); |
2146 | } | 2133 | } |
2147 | return 0; | 2134 | return 0; |
@@ -2340,10 +2327,7 @@ failed: | |||
2340 | * Error recovery is pretty slack. Clear the page and mark it dirty | 2327 | * Error recovery is pretty slack. Clear the page and mark it dirty |
2341 | * so we'll later zero out any blocks which _were_ allocated. | 2328 | * so we'll later zero out any blocks which _were_ allocated. |
2342 | */ | 2329 | */ |
2343 | kaddr = kmap_atomic(page, KM_USER0); | 2330 | zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); |
2344 | memset(kaddr, 0, PAGE_CACHE_SIZE); | ||
2345 | flush_dcache_page(page); | ||
2346 | kunmap_atomic(kaddr, KM_USER0); | ||
2347 | SetPageUptodate(page); | 2331 | SetPageUptodate(page); |
2348 | set_page_dirty(page); | 2332 | set_page_dirty(page); |
2349 | return ret; | 2333 | return ret; |
@@ -2382,7 +2366,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block, | |||
2382 | loff_t i_size = i_size_read(inode); | 2366 | loff_t i_size = i_size_read(inode); |
2383 | const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | 2367 | const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; |
2384 | unsigned offset; | 2368 | unsigned offset; |
2385 | void *kaddr; | ||
2386 | int ret; | 2369 | int ret; |
2387 | 2370 | ||
2388 | /* Is the page fully inside i_size? */ | 2371 | /* Is the page fully inside i_size? */ |
@@ -2413,10 +2396,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, | |||
2413 | * the page size, the remaining memory is zeroed when mapped, and | 2396 | * the page size, the remaining memory is zeroed when mapped, and |
2414 | * writes to that region are not written out to the file." | 2397 | * writes to that region are not written out to the file." |
2415 | */ | 2398 | */ |
2416 | kaddr = kmap_atomic(page, KM_USER0); | 2399 | zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); |
2417 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | ||
2418 | flush_dcache_page(page); | ||
2419 | kunmap_atomic(kaddr, KM_USER0); | ||
2420 | out: | 2400 | out: |
2421 | ret = mpage_writepage(page, get_block, wbc); | 2401 | ret = mpage_writepage(page, get_block, wbc); |
2422 | if (ret == -EAGAIN) | 2402 | if (ret == -EAGAIN) |
@@ -2437,7 +2417,6 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) | |||
2437 | unsigned to; | 2417 | unsigned to; |
2438 | struct page *page; | 2418 | struct page *page; |
2439 | const struct address_space_operations *a_ops = mapping->a_ops; | 2419 | const struct address_space_operations *a_ops = mapping->a_ops; |
2440 | char *kaddr; | ||
2441 | int ret = 0; | 2420 | int ret = 0; |
2442 | 2421 | ||
2443 | if ((offset & (blocksize - 1)) == 0) | 2422 | if ((offset & (blocksize - 1)) == 0) |
@@ -2451,10 +2430,8 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) | |||
2451 | to = (offset + blocksize) & ~(blocksize - 1); | 2430 | to = (offset + blocksize) & ~(blocksize - 1); |
2452 | ret = a_ops->prepare_write(NULL, page, offset, to); | 2431 | ret = a_ops->prepare_write(NULL, page, offset, to); |
2453 | if (ret == 0) { | 2432 | if (ret == 0) { |
2454 | kaddr = kmap_atomic(page, KM_USER0); | 2433 | zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, |
2455 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | 2434 | KM_USER0); |
2456 | flush_dcache_page(page); | ||
2457 | kunmap_atomic(kaddr, KM_USER0); | ||
2458 | /* | 2435 | /* |
2459 | * It would be more correct to call aops->commit_write() | 2436 | * It would be more correct to call aops->commit_write() |
2460 | * here, but this is more efficient. | 2437 | * here, but this is more efficient. |
@@ -2480,7 +2457,6 @@ int block_truncate_page(struct address_space *mapping, | |||
2480 | struct inode *inode = mapping->host; | 2457 | struct inode *inode = mapping->host; |
2481 | struct page *page; | 2458 | struct page *page; |
2482 | struct buffer_head *bh; | 2459 | struct buffer_head *bh; |
2483 | void *kaddr; | ||
2484 | int err; | 2460 | int err; |
2485 | 2461 | ||
2486 | blocksize = 1 << inode->i_blkbits; | 2462 | blocksize = 1 << inode->i_blkbits; |
@@ -2534,11 +2510,7 @@ int block_truncate_page(struct address_space *mapping, | |||
2534 | goto unlock; | 2510 | goto unlock; |
2535 | } | 2511 | } |
2536 | 2512 | ||
2537 | kaddr = kmap_atomic(page, KM_USER0); | 2513 | zero_user_page(page, offset, length, KM_USER0); |
2538 | memset(kaddr + offset, 0, length); | ||
2539 | flush_dcache_page(page); | ||
2540 | kunmap_atomic(kaddr, KM_USER0); | ||
2541 | |||
2542 | mark_buffer_dirty(bh); | 2514 | mark_buffer_dirty(bh); |
2543 | err = 0; | 2515 | err = 0; |
2544 | 2516 | ||
@@ -2559,7 +2531,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2559 | loff_t i_size = i_size_read(inode); | 2531 | loff_t i_size = i_size_read(inode); |
2560 | const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | 2532 | const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; |
2561 | unsigned offset; | 2533 | unsigned offset; |
2562 | void *kaddr; | ||
2563 | 2534 | ||
2564 | /* Is the page fully inside i_size? */ | 2535 | /* Is the page fully inside i_size? */ |
2565 | if (page->index < end_index) | 2536 | if (page->index < end_index) |
@@ -2585,10 +2556,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2585 | * the page size, the remaining memory is zeroed when mapped, and | 2556 | * the page size, the remaining memory is zeroed when mapped, and |
2586 | * writes to that region are not written out to the file." | 2557 | * writes to that region are not written out to the file." |
2587 | */ | 2558 | */ |
2588 | kaddr = kmap_atomic(page, KM_USER0); | 2559 | zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); |
2589 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | ||
2590 | flush_dcache_page(page); | ||
2591 | kunmap_atomic(kaddr, KM_USER0); | ||
2592 | return __block_write_full_page(inode, page, get_block, wbc); | 2560 | return __block_write_full_page(inode, page, get_block, wbc); |
2593 | } | 2561 | } |
2594 | 2562 | ||
diff --git a/fs/direct-io.c b/fs/direct-io.c index d9d0833444f5..8aa2d8b04ef1 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -867,7 +867,6 @@ static int do_direct_IO(struct dio *dio) | |||
867 | do_holes: | 867 | do_holes: |
868 | /* Handle holes */ | 868 | /* Handle holes */ |
869 | if (!buffer_mapped(map_bh)) { | 869 | if (!buffer_mapped(map_bh)) { |
870 | char *kaddr; | ||
871 | loff_t i_size_aligned; | 870 | loff_t i_size_aligned; |
872 | 871 | ||
873 | /* AKPM: eargh, -ENOTBLK is a hack */ | 872 | /* AKPM: eargh, -ENOTBLK is a hack */ |
@@ -888,11 +887,8 @@ do_holes: | |||
888 | page_cache_release(page); | 887 | page_cache_release(page); |
889 | goto out; | 888 | goto out; |
890 | } | 889 | } |
891 | kaddr = kmap_atomic(page, KM_USER0); | 890 | zero_user_page(page, block_in_page << blkbits, |
892 | memset(kaddr + (block_in_page << blkbits), | 891 | 1 << blkbits, KM_USER0); |
893 | 0, 1 << blkbits); | ||
894 | flush_dcache_page(page); | ||
895 | kunmap_atomic(kaddr, KM_USER0); | ||
896 | dio->block_in_file++; | 892 | dio->block_in_file++; |
897 | block_in_page++; | 893 | block_in_page++; |
898 | goto next_block; | 894 | goto next_block; |
diff --git a/fs/mpage.c b/fs/mpage.c index fa2441f57b41..0fb914fc2ee0 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -284,11 +284,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
284 | } | 284 | } |
285 | 285 | ||
286 | if (first_hole != blocks_per_page) { | 286 | if (first_hole != blocks_per_page) { |
287 | char *kaddr = kmap_atomic(page, KM_USER0); | 287 | zero_user_page(page, first_hole << blkbits, |
288 | memset(kaddr + (first_hole << blkbits), 0, | 288 | PAGE_CACHE_SIZE - (first_hole << blkbits), |
289 | PAGE_CACHE_SIZE - (first_hole << blkbits)); | 289 | KM_USER0); |
290 | flush_dcache_page(page); | ||
291 | kunmap_atomic(kaddr, KM_USER0); | ||
292 | if (first_hole == 0) { | 290 | if (first_hole == 0) { |
293 | SetPageUptodate(page); | 291 | SetPageUptodate(page); |
294 | unlock_page(page); | 292 | unlock_page(page); |
@@ -576,14 +574,11 @@ page_is_mapped: | |||
576 | * written out to the file." | 574 | * written out to the file." |
577 | */ | 575 | */ |
578 | unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); | 576 | unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); |
579 | char *kaddr; | ||
580 | 577 | ||
581 | if (page->index > end_index || !offset) | 578 | if (page->index > end_index || !offset) |
582 | goto confused; | 579 | goto confused; |
583 | kaddr = kmap_atomic(page, KM_USER0); | 580 | zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, |
584 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | 581 | KM_USER0); |
585 | flush_dcache_page(page); | ||
586 | kunmap_atomic(kaddr, KM_USER0); | ||
587 | } | 582 | } |
588 | 583 | ||
589 | /* | 584 | /* |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index a515eb0afdfb..b5f2ab42d984 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -94,17 +94,27 @@ static inline void clear_highpage(struct page *page) | |||
94 | 94 | ||
95 | /* | 95 | /* |
96 | * Same but also flushes aliased cache contents to RAM. | 96 | * Same but also flushes aliased cache contents to RAM. |
97 | * | ||
98 | * This must be a macro because KM_USER0 and friends aren't defined if | ||
99 | * !CONFIG_HIGHMEM | ||
97 | */ | 100 | */ |
98 | static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) | 101 | #define zero_user_page(page, offset, size, km_type) \ |
102 | do { \ | ||
103 | void *kaddr; \ | ||
104 | \ | ||
105 | BUG_ON((offset) + (size) > PAGE_SIZE); \ | ||
106 | \ | ||
107 | kaddr = kmap_atomic(page, km_type); \ | ||
108 | memset((char *)kaddr + (offset), 0, (size)); \ | ||
109 | flush_dcache_page(page); \ | ||
110 | kunmap_atomic(kaddr, (km_type)); \ | ||
111 | } while (0) | ||
112 | |||
113 | |||
114 | static inline void memclear_highpage_flush(struct page *page, | ||
115 | unsigned int offset, unsigned int size) | ||
99 | { | 116 | { |
100 | void *kaddr; | 117 | zero_user_page(page, offset, size, KM_USER0); |
101 | |||
102 | BUG_ON(offset + size > PAGE_SIZE); | ||
103 | |||
104 | kaddr = kmap_atomic(page, KM_USER0); | ||
105 | memset((char *)kaddr + offset, 0, size); | ||
106 | flush_dcache_page(page); | ||
107 | kunmap_atomic(kaddr, KM_USER0); | ||
108 | } | 118 | } |
109 | 119 | ||
110 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE | 120 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index cbb335813ec0..1b49dab9b25d 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c | |||
@@ -434,7 +434,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from) | |||
434 | unsigned blocksize; | 434 | unsigned blocksize; |
435 | unsigned length; | 435 | unsigned length; |
436 | struct page *page; | 436 | struct page *page; |
437 | void *kaddr; | ||
438 | 437 | ||
439 | BUG_ON(!mapping->a_ops->get_xip_page); | 438 | BUG_ON(!mapping->a_ops->get_xip_page); |
440 | 439 | ||
@@ -458,11 +457,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from) | |||
458 | else | 457 | else |
459 | return PTR_ERR(page); | 458 | return PTR_ERR(page); |
460 | } | 459 | } |
461 | kaddr = kmap_atomic(page, KM_USER0); | 460 | zero_user_page(page, offset, length, KM_USER0); |
462 | memset(kaddr + offset, 0, length); | ||
463 | kunmap_atomic(kaddr, KM_USER0); | ||
464 | |||
465 | flush_dcache_page(page); | ||
466 | return 0; | 461 | return 0; |
467 | } | 462 | } |
468 | EXPORT_SYMBOL_GPL(xip_truncate_page); | 463 | EXPORT_SYMBOL_GPL(xip_truncate_page); |
diff --git a/mm/truncate.c b/mm/truncate.c index 0f4b6d18ab0e..4fbe1a2da5fb 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/swap.h> | 12 | #include <linux/swap.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
15 | #include <linux/highmem.h> | ||
15 | #include <linux/pagevec.h> | 16 | #include <linux/pagevec.h> |
16 | #include <linux/task_io_accounting_ops.h> | 17 | #include <linux/task_io_accounting_ops.h> |
17 | #include <linux/buffer_head.h> /* grr. try_to_release_page, | 18 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
@@ -46,7 +47,7 @@ void do_invalidatepage(struct page *page, unsigned long offset) | |||
46 | 47 | ||
47 | static inline void truncate_partial_page(struct page *page, unsigned partial) | 48 | static inline void truncate_partial_page(struct page *page, unsigned partial) |
48 | { | 49 | { |
49 | memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); | 50 | zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0); |
50 | if (PagePrivate(page)) | 51 | if (PagePrivate(page)) |
51 | do_invalidatepage(page, partial); | 52 | do_invalidatepage(page, partial); |
52 | } | 53 | } |