diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 32 |
1 files changed, 27 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c index d8dde07a3656..7197f9bcd384 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1498,7 +1498,7 @@ gotten: | |||
1498 | update_mmu_cache(vma, address, entry); | 1498 | update_mmu_cache(vma, address, entry); |
1499 | lazy_mmu_prot_update(entry); | 1499 | lazy_mmu_prot_update(entry); |
1500 | lru_cache_add_active(new_page); | 1500 | lru_cache_add_active(new_page); |
1501 | page_add_anon_rmap(new_page, vma, address); | 1501 | page_add_new_anon_rmap(new_page, vma, address); |
1502 | 1502 | ||
1503 | /* Free the old page.. */ | 1503 | /* Free the old page.. */ |
1504 | new_page = old_page; | 1504 | new_page = old_page; |
@@ -1770,9 +1770,32 @@ out_big: | |||
1770 | out_busy: | 1770 | out_busy: |
1771 | return -ETXTBSY; | 1771 | return -ETXTBSY; |
1772 | } | 1772 | } |
1773 | |||
1774 | EXPORT_SYMBOL(vmtruncate); | 1773 | EXPORT_SYMBOL(vmtruncate); |
1775 | 1774 | ||
1775 | int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | ||
1776 | { | ||
1777 | struct address_space *mapping = inode->i_mapping; | ||
1778 | |||
1779 | /* | ||
1780 | * If the underlying filesystem is not going to provide | ||
1781 | * a way to truncate a range of blocks (punch a hole) - | ||
1782 | * we should return failure right now. | ||
1783 | */ | ||
1784 | if (!inode->i_op || !inode->i_op->truncate_range) | ||
1785 | return -ENOSYS; | ||
1786 | |||
1787 | down(&inode->i_sem); | ||
1788 | down_write(&inode->i_alloc_sem); | ||
1789 | unmap_mapping_range(mapping, offset, (end - offset), 1); | ||
1790 | truncate_inode_pages_range(mapping, offset, end); | ||
1791 | inode->i_op->truncate_range(inode, offset, end); | ||
1792 | up_write(&inode->i_alloc_sem); | ||
1793 | up(&inode->i_sem); | ||
1794 | |||
1795 | return 0; | ||
1796 | } | ||
1797 | EXPORT_SYMBOL(vmtruncate_range); | ||
1798 | |||
1776 | /* | 1799 | /* |
1777 | * Primitive swap readahead code. We simply read an aligned block of | 1800 | * Primitive swap readahead code. We simply read an aligned block of |
1778 | * (1 << page_cluster) entries in the swap area. This method is chosen | 1801 | * (1 << page_cluster) entries in the swap area. This method is chosen |
@@ -1954,8 +1977,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1954 | goto release; | 1977 | goto release; |
1955 | inc_mm_counter(mm, anon_rss); | 1978 | inc_mm_counter(mm, anon_rss); |
1956 | lru_cache_add_active(page); | 1979 | lru_cache_add_active(page); |
1957 | SetPageReferenced(page); | 1980 | page_add_new_anon_rmap(page, vma, address); |
1958 | page_add_anon_rmap(page, vma, address); | ||
1959 | } else { | 1981 | } else { |
1960 | /* Map the ZERO_PAGE - vm_page_prot is readonly */ | 1982 | /* Map the ZERO_PAGE - vm_page_prot is readonly */ |
1961 | page = ZERO_PAGE(address); | 1983 | page = ZERO_PAGE(address); |
@@ -2086,7 +2108,7 @@ retry: | |||
2086 | if (anon) { | 2108 | if (anon) { |
2087 | inc_mm_counter(mm, anon_rss); | 2109 | inc_mm_counter(mm, anon_rss); |
2088 | lru_cache_add_active(new_page); | 2110 | lru_cache_add_active(new_page); |
2089 | page_add_anon_rmap(new_page, vma, address); | 2111 | page_add_new_anon_rmap(new_page, vma, address); |
2090 | } else { | 2112 | } else { |
2091 | inc_mm_counter(mm, file_rss); | 2113 | inc_mm_counter(mm, file_rss); |
2092 | page_add_file_rmap(new_page); | 2114 | page_add_file_rmap(new_page); |