aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c51
1 files changed, 42 insertions, 9 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index d85a99d28c03..df2c41c2a9a2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -52,6 +52,7 @@
52#include <linux/init.h> 52#include <linux/init.h>
53#include <linux/rmap.h> 53#include <linux/rmap.h>
54#include <linux/rcupdate.h> 54#include <linux/rcupdate.h>
55#include <linux/module.h>
55 56
56#include <asm/tlbflush.h> 57#include <asm/tlbflush.h>
57 58
@@ -205,6 +206,36 @@ out:
205 return anon_vma; 206 return anon_vma;
206} 207}
207 208
209#ifdef CONFIG_MIGRATION
210/*
211 * Remove an anonymous page from swap replacing the swap pte's
212 * through real pte's pointing to valid pages and then releasing
213 * the page from the swap cache.
214 *
215 * Must hold page lock on page.
216 */
217void remove_from_swap(struct page *page)
218{
219 struct anon_vma *anon_vma;
220 struct vm_area_struct *vma;
221
222 if (!PageAnon(page) || !PageSwapCache(page))
223 return;
224
225 anon_vma = page_lock_anon_vma(page);
226 if (!anon_vma)
227 return;
228
229 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
230 remove_vma_swap(vma, page);
231
232 spin_unlock(&anon_vma->lock);
233
234 delete_from_swap_cache(page);
235}
236EXPORT_SYMBOL(remove_from_swap);
237#endif
238
208/* 239/*
209 * At what user virtual address is page expected in vma? 240 * At what user virtual address is page expected in vma?
210 */ 241 */
@@ -541,7 +572,8 @@ void page_remove_rmap(struct page *page)
541 * Subfunctions of try_to_unmap: try_to_unmap_one called 572 * Subfunctions of try_to_unmap: try_to_unmap_one called
542 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 573 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
543 */ 574 */
544static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) 575static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
576 int ignore_refs)
545{ 577{
546 struct mm_struct *mm = vma->vm_mm; 578 struct mm_struct *mm = vma->vm_mm;
547 unsigned long address; 579 unsigned long address;
@@ -564,7 +596,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
564 * skipped over this mm) then we should reactivate it. 596 * skipped over this mm) then we should reactivate it.
565 */ 597 */
566 if ((vma->vm_flags & VM_LOCKED) || 598 if ((vma->vm_flags & VM_LOCKED) ||
567 ptep_clear_flush_young(vma, address, pte)) { 599 (ptep_clear_flush_young(vma, address, pte)
600 && !ignore_refs)) {
568 ret = SWAP_FAIL; 601 ret = SWAP_FAIL;
569 goto out_unmap; 602 goto out_unmap;
570 } 603 }
@@ -698,7 +731,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
698 pte_unmap_unlock(pte - 1, ptl); 731 pte_unmap_unlock(pte - 1, ptl);
699} 732}
700 733
701static int try_to_unmap_anon(struct page *page) 734static int try_to_unmap_anon(struct page *page, int ignore_refs)
702{ 735{
703 struct anon_vma *anon_vma; 736 struct anon_vma *anon_vma;
704 struct vm_area_struct *vma; 737 struct vm_area_struct *vma;
@@ -709,7 +742,7 @@ static int try_to_unmap_anon(struct page *page)
709 return ret; 742 return ret;
710 743
711 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 744 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
712 ret = try_to_unmap_one(page, vma); 745 ret = try_to_unmap_one(page, vma, ignore_refs);
713 if (ret == SWAP_FAIL || !page_mapped(page)) 746 if (ret == SWAP_FAIL || !page_mapped(page))
714 break; 747 break;
715 } 748 }
@@ -726,7 +759,7 @@ static int try_to_unmap_anon(struct page *page)
726 * 759 *
727 * This function is only called from try_to_unmap for object-based pages. 760 * This function is only called from try_to_unmap for object-based pages.
728 */ 761 */
729static int try_to_unmap_file(struct page *page) 762static int try_to_unmap_file(struct page *page, int ignore_refs)
730{ 763{
731 struct address_space *mapping = page->mapping; 764 struct address_space *mapping = page->mapping;
732 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 765 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -740,7 +773,7 @@ static int try_to_unmap_file(struct page *page)
740 773
741 spin_lock(&mapping->i_mmap_lock); 774 spin_lock(&mapping->i_mmap_lock);
742 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 775 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
743 ret = try_to_unmap_one(page, vma); 776 ret = try_to_unmap_one(page, vma, ignore_refs);
744 if (ret == SWAP_FAIL || !page_mapped(page)) 777 if (ret == SWAP_FAIL || !page_mapped(page))
745 goto out; 778 goto out;
746 } 779 }
@@ -825,16 +858,16 @@ out:
825 * SWAP_AGAIN - we missed a mapping, try again later 858 * SWAP_AGAIN - we missed a mapping, try again later
826 * SWAP_FAIL - the page is unswappable 859 * SWAP_FAIL - the page is unswappable
827 */ 860 */
828int try_to_unmap(struct page *page) 861int try_to_unmap(struct page *page, int ignore_refs)
829{ 862{
830 int ret; 863 int ret;
831 864
832 BUG_ON(!PageLocked(page)); 865 BUG_ON(!PageLocked(page));
833 866
834 if (PageAnon(page)) 867 if (PageAnon(page))
835 ret = try_to_unmap_anon(page); 868 ret = try_to_unmap_anon(page, ignore_refs);
836 else 869 else
837 ret = try_to_unmap_file(page); 870 ret = try_to_unmap_file(page, ignore_refs);
838 871
839 if (!page_mapped(page)) 872 if (!page_mapped(page))
840 ret = SWAP_SUCCESS; 873 ret = SWAP_SUCCESS;