aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h2
-rw-r--r--mm/truncate.c40
2 files changed, 41 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cf7982336103..630068184265 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -954,7 +954,7 @@ extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
954extern void truncate_setsize(struct inode *inode, loff_t newsize); 954extern void truncate_setsize(struct inode *inode, loff_t newsize);
955extern int vmtruncate(struct inode *inode, loff_t offset); 955extern int vmtruncate(struct inode *inode, loff_t offset);
956extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); 956extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
957 957void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
958int truncate_inode_page(struct address_space *mapping, struct page *page); 958int truncate_inode_page(struct address_space *mapping, struct page *page);
959int generic_error_remove_page(struct address_space *mapping, struct page *page); 959int generic_error_remove_page(struct address_space *mapping, struct page *page);
960 960
diff --git a/mm/truncate.c b/mm/truncate.c
index 18aded3a89fc..61a183b89df6 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -626,3 +626,43 @@ int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
626 626
627 return 0; 627 return 0;
628} 628}
629
630/**
631 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
632 * @inode: inode
633 * @lstart: offset of beginning of hole
634 * @lend: offset of last byte of hole
635 *
636 * This function should typically be called before the filesystem
637 * releases resources associated with the freed range (eg. deallocates
638 * blocks). This way, pagecache will always stay logically coherent
639 * with on-disk format, and the filesystem would not have to deal with
640 * situations such as writepage being called for a page that has already
641 * had its underlying blocks deallocated.
642 */
643void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
644{
645 struct address_space *mapping = inode->i_mapping;
646 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
647 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
648 /*
649 * This rounding is currently just for example: unmap_mapping_range
650 * expands its hole outwards, whereas we want it to contract the hole
651 * inwards. However, existing callers of truncate_pagecache_range are
652 * doing their own page rounding first; and truncate_inode_pages_range
653 * currently BUGs if lend is not pagealigned-1 (it handles partial
654 * page at start of hole, but not partial page at end of hole). Note
655 * unmap_mapping_range allows holelen 0 for all, and we allow lend -1.
656 */
657
658 /*
659 * Unlike in truncate_pagecache, unmap_mapping_range is called only
660 * once (before truncating pagecache), and without "even_cows" flag:
661 * hole-punching should not remove private COWed pages from the hole.
662 */
663 if ((u64)unmap_end > (u64)unmap_start)
664 unmap_mapping_range(mapping, unmap_start,
665 1 + unmap_end - unmap_start, 0);
666 truncate_inode_pages_range(mapping, lstart, lend);
667}
668EXPORT_SYMBOL(truncate_pagecache_range);