diff options
author | Nick Piggin <npiggin@suse.de> | 2009-09-16 05:50:12 -0400 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2009-09-16 05:50:12 -0400 |
commit | 750b4987b0cd4d408e54cb83a80a067cbe690feb (patch) | |
tree | 1372d0f64da3575eb93bac284c73e548e23b9ea6 | |
parent | 2a7684a23e9c263c2a1e8b2c0027ad1836a0f9df (diff) |
HWPOISON: Refactor truncate to allow direct truncating of page v2
Extract out truncate_inode_page() out of the truncate path so that
it can be used by memory-failure.c
[AK: description, headers, fix typos]
v2: Some white space changes from Fengguang Wu
Signed-off-by: Andi Kleen <ak@linux.intel.com>
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/truncate.c | 29 |
2 files changed, 17 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 082b68cb5ffe..8cbc0aafd5bd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -794,6 +794,8 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
794 | extern int vmtruncate(struct inode * inode, loff_t offset); | 794 | extern int vmtruncate(struct inode * inode, loff_t offset); |
795 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | 795 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); |
796 | 796 | ||
797 | int truncate_inode_page(struct address_space *mapping, struct page *page); | ||
798 | |||
797 | #ifdef CONFIG_MMU | 799 | #ifdef CONFIG_MMU |
798 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 800 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
799 | unsigned long address, unsigned int flags); | 801 | unsigned long address, unsigned int flags); |
diff --git a/mm/truncate.c b/mm/truncate.c index ccc3ecf7cb98..2519a7c92873 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -93,11 +93,11 @@ EXPORT_SYMBOL(cancel_dirty_page); | |||
93 | * its lock, b) when a concurrent invalidate_mapping_pages got there first and | 93 | * its lock, b) when a concurrent invalidate_mapping_pages got there first and |
94 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. | 94 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. |
95 | */ | 95 | */ |
96 | static void | 96 | static int |
97 | truncate_complete_page(struct address_space *mapping, struct page *page) | 97 | truncate_complete_page(struct address_space *mapping, struct page *page) |
98 | { | 98 | { |
99 | if (page->mapping != mapping) | 99 | if (page->mapping != mapping) |
100 | return; | 100 | return -EIO; |
101 | 101 | ||
102 | if (page_has_private(page)) | 102 | if (page_has_private(page)) |
103 | do_invalidatepage(page, 0); | 103 | do_invalidatepage(page, 0); |
@@ -108,6 +108,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) | |||
108 | remove_from_page_cache(page); | 108 | remove_from_page_cache(page); |
109 | ClearPageMappedToDisk(page); | 109 | ClearPageMappedToDisk(page); |
110 | page_cache_release(page); /* pagecache ref */ | 110 | page_cache_release(page); /* pagecache ref */ |
111 | return 0; | ||
111 | } | 112 | } |
112 | 113 | ||
113 | /* | 114 | /* |
@@ -135,6 +136,16 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) | |||
135 | return ret; | 136 | return ret; |
136 | } | 137 | } |
137 | 138 | ||
139 | int truncate_inode_page(struct address_space *mapping, struct page *page) | ||
140 | { | ||
141 | if (page_mapped(page)) { | ||
142 | unmap_mapping_range(mapping, | ||
143 | (loff_t)page->index << PAGE_CACHE_SHIFT, | ||
144 | PAGE_CACHE_SIZE, 0); | ||
145 | } | ||
146 | return truncate_complete_page(mapping, page); | ||
147 | } | ||
148 | |||
138 | /** | 149 | /** |
139 | * truncate_inode_pages - truncate range of pages specified by start & end byte offsets | 150 | * truncate_inode_pages - truncate range of pages specified by start & end byte offsets |
140 | * @mapping: mapping to truncate | 151 | * @mapping: mapping to truncate |
@@ -196,12 +207,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
196 | unlock_page(page); | 207 | unlock_page(page); |
197 | continue; | 208 | continue; |
198 | } | 209 | } |
199 | if (page_mapped(page)) { | 210 | truncate_inode_page(mapping, page); |
200 | unmap_mapping_range(mapping, | ||
201 | (loff_t)page_index<<PAGE_CACHE_SHIFT, | ||
202 | PAGE_CACHE_SIZE, 0); | ||
203 | } | ||
204 | truncate_complete_page(mapping, page); | ||
205 | unlock_page(page); | 211 | unlock_page(page); |
206 | } | 212 | } |
207 | pagevec_release(&pvec); | 213 | pagevec_release(&pvec); |
@@ -238,15 +244,10 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
238 | break; | 244 | break; |
239 | lock_page(page); | 245 | lock_page(page); |
240 | wait_on_page_writeback(page); | 246 | wait_on_page_writeback(page); |
241 | if (page_mapped(page)) { | 247 | truncate_inode_page(mapping, page); |
242 | unmap_mapping_range(mapping, | ||
243 | (loff_t)page->index<<PAGE_CACHE_SHIFT, | ||
244 | PAGE_CACHE_SIZE, 0); | ||
245 | } | ||
246 | if (page->index > next) | 248 | if (page->index > next) |
247 | next = page->index; | 249 | next = page->index; |
248 | next++; | 250 | next++; |
249 | truncate_complete_page(mapping, page); | ||
250 | unlock_page(page); | 251 | unlock_page(page); |
251 | } | 252 | } |
252 | pagevec_release(&pvec); | 253 | pagevec_release(&pvec); |