aboutsummaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c60
1 files changed, 58 insertions, 2 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index a654928323d..f4edbc179d1 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -17,6 +17,32 @@
17 do_invalidatepage */ 17 do_invalidatepage */
18 18
19 19
20/**
21 * do_invalidatepage - invalidate part of all of a page
22 * @page: the page which is affected
23 * @offset: the index of the truncation point
24 *
25 * do_invalidatepage() is called when all or part of the page has become
26 * invalidated by a truncate operation.
27 *
28 * do_invalidatepage() does not have to release all buffers, but it must
29 * ensure that no dirty buffer is left outside @offset and that no I/O
30 * is underway against any of the blocks which are outside the truncation
31 * point. Because the caller is about to free (and possibly reuse) those
32 * blocks on-disk.
33 */
34void do_invalidatepage(struct page *page, unsigned long offset)
35{
36 void (*invalidatepage)(struct page *, unsigned long);
37 invalidatepage = page->mapping->a_ops->invalidatepage;
38#ifdef CONFIG_BLOCK
39 if (!invalidatepage)
40 invalidatepage = block_invalidatepage;
41#endif
42 if (invalidatepage)
43 (*invalidatepage)(page, offset);
44}
45
20static inline void truncate_partial_page(struct page *page, unsigned partial) 46static inline void truncate_partial_page(struct page *page, unsigned partial)
21{ 47{
22 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); 48 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
@@ -261,9 +287,39 @@ unsigned long invalidate_inode_pages(struct address_space *mapping)
261{ 287{
262 return invalidate_mapping_pages(mapping, 0, ~0UL); 288 return invalidate_mapping_pages(mapping, 0, ~0UL);
263} 289}
264
265EXPORT_SYMBOL(invalidate_inode_pages); 290EXPORT_SYMBOL(invalidate_inode_pages);
266 291
292/*
293 * This is like invalidate_complete_page(), except it ignores the page's
294 * refcount. We do this because invalidate_inode_pages2() needs stronger
295 * invalidation guarantees, and cannot afford to leave pages behind because
296 * shrink_list() has a temp ref on them, or because they're transiently sitting
297 * in the lru_cache_add() pagevecs.
298 */
299static int
300invalidate_complete_page2(struct address_space *mapping, struct page *page)
301{
302 if (page->mapping != mapping)
303 return 0;
304
305 if (PagePrivate(page) && !try_to_release_page(page, 0))
306 return 0;
307
308 write_lock_irq(&mapping->tree_lock);
309 if (PageDirty(page))
310 goto failed;
311
312 BUG_ON(PagePrivate(page));
313 __remove_from_page_cache(page);
314 write_unlock_irq(&mapping->tree_lock);
315 ClearPageUptodate(page);
316 page_cache_release(page); /* pagecache ref */
317 return 1;
318failed:
319 write_unlock_irq(&mapping->tree_lock);
320 return 0;
321}
322
267/** 323/**
268 * invalidate_inode_pages2_range - remove range of pages from an address_space 324 * invalidate_inode_pages2_range - remove range of pages from an address_space
269 * @mapping: the address_space 325 * @mapping: the address_space
@@ -330,7 +386,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
330 } 386 }
331 } 387 }
332 was_dirty = test_clear_page_dirty(page); 388 was_dirty = test_clear_page_dirty(page);
333 if (!invalidate_complete_page(mapping, page)) { 389 if (!invalidate_complete_page2(mapping, page)) {
334 if (was_dirty) 390 if (was_dirty)
335 set_page_dirty(page); 391 set_page_dirty(page);
336 ret = -EIO; 392 ret = -EIO;