diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2009-09-16 05:50:13 -0400 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2009-09-16 05:50:13 -0400 |
commit | 83f786680aec8d030184f7ced1a0a3dd8ac81764 (patch) | |
tree | c519509ce47883936bc4759343b1b3a01749153f | |
parent | 750b4987b0cd4d408e54cb83a80a067cbe690feb (diff) |
HWPOISON: Add invalidate_inode_page
Add a simple way to invalidate a single page
This is just a refactoring of the truncate.c code.
Originally from Fengguang, modified by Andi Kleen.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/truncate.c | 26 |
2 files changed, 22 insertions, 6 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 8cbc0aafd5bd..b05bbde0296d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -796,6 +796,8 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | |||
796 | 796 | ||
797 | int truncate_inode_page(struct address_space *mapping, struct page *page); | 797 | int truncate_inode_page(struct address_space *mapping, struct page *page); |
798 | 798 | ||
799 | int invalidate_inode_page(struct page *page); | ||
800 | |||
799 | #ifdef CONFIG_MMU | 801 | #ifdef CONFIG_MMU |
800 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 802 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
801 | unsigned long address, unsigned int flags); | 803 | unsigned long address, unsigned int flags); |
diff --git a/mm/truncate.c b/mm/truncate.c index 2519a7c92873..ea132f7ea2d2 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -146,6 +146,24 @@ int truncate_inode_page(struct address_space *mapping, struct page *page) | |||
146 | return truncate_complete_page(mapping, page); | 146 | return truncate_complete_page(mapping, page); |
147 | } | 147 | } |
148 | 148 | ||
149 | /* | ||
150 | * Safely invalidate one page from its pagecache mapping. | ||
151 | * It only drops clean, unused pages. The page must be locked. | ||
152 | * | ||
153 | * Returns 1 if the page is successfully invalidated, otherwise 0. | ||
154 | */ | ||
155 | int invalidate_inode_page(struct page *page) | ||
156 | { | ||
157 | struct address_space *mapping = page_mapping(page); | ||
158 | if (!mapping) | ||
159 | return 0; | ||
160 | if (PageDirty(page) || PageWriteback(page)) | ||
161 | return 0; | ||
162 | if (page_mapped(page)) | ||
163 | return 0; | ||
164 | return invalidate_complete_page(mapping, page); | ||
165 | } | ||
166 | |||
149 | /** | 167 | /** |
150 | * truncate_inode_pages - truncate range of pages specified by start & end byte offsets | 168 | * truncate_inode_pages - truncate range of pages specified by start & end byte offsets |
151 | * @mapping: mapping to truncate | 169 | * @mapping: mapping to truncate |
@@ -312,12 +330,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, | |||
312 | if (lock_failed) | 330 | if (lock_failed) |
313 | continue; | 331 | continue; |
314 | 332 | ||
315 | if (PageDirty(page) || PageWriteback(page)) | 333 | ret += invalidate_inode_page(page); |
316 | goto unlock; | 334 | |
317 | if (page_mapped(page)) | ||
318 | goto unlock; | ||
319 | ret += invalidate_complete_page(mapping, page); | ||
320 | unlock: | ||
321 | unlock_page(page); | 335 | unlock_page(page); |
322 | if (next > end) | 336 | if (next > end) |
323 | break; | 337 | break; |