diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 62 |
1 files changed, 50 insertions, 12 deletions
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/init.h> | 52 | #include <linux/init.h> |
53 | #include <linux/rmap.h> | 53 | #include <linux/rmap.h> |
54 | #include <linux/rcupdate.h> | 54 | #include <linux/rcupdate.h> |
55 | #include <linux/module.h> | ||
55 | 56 | ||
56 | #include <asm/tlbflush.h> | 57 | #include <asm/tlbflush.h> |
57 | 58 | ||
@@ -205,6 +206,44 @@ out: | |||
205 | return anon_vma; | 206 | return anon_vma; |
206 | } | 207 | } |
207 | 208 | ||
209 | #ifdef CONFIG_MIGRATION | ||
210 | /* | ||
211 | * Remove an anonymous page from swap replacing the swap pte's | ||
212 | * through real pte's pointing to valid pages and then releasing | ||
213 | * the page from the swap cache. | ||
214 | * | ||
215 | * Must hold page lock on page and mmap_sem of one vma that contains | ||
216 | * the page. | ||
217 | */ | ||
218 | void remove_from_swap(struct page *page) | ||
219 | { | ||
220 | struct anon_vma *anon_vma; | ||
221 | struct vm_area_struct *vma; | ||
222 | unsigned long mapping; | ||
223 | |||
224 | if (!PageSwapCache(page)) | ||
225 | return; | ||
226 | |||
227 | mapping = (unsigned long)page->mapping; | ||
228 | |||
229 | if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) | ||
230 | return; | ||
231 | |||
232 | /* | ||
233 | * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. | ||
234 | */ | ||
235 | anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); | ||
236 | spin_lock(&anon_vma->lock); | ||
237 | |||
238 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) | ||
239 | remove_vma_swap(vma, page); | ||
240 | |||
241 | spin_unlock(&anon_vma->lock); | ||
242 | delete_from_swap_cache(page); | ||
243 | } | ||
244 | EXPORT_SYMBOL(remove_from_swap); | ||
245 | #endif | ||
246 | |||
208 | /* | 247 | /* |
209 | * At what user virtual address is page expected in vma? | 248 | * At what user virtual address is page expected in vma? |
210 | */ | 249 | */ |
@@ -498,9 +537,6 @@ void page_add_new_anon_rmap(struct page *page, | |||
498 | */ | 537 | */ |
499 | void page_add_file_rmap(struct page *page) | 538 | void page_add_file_rmap(struct page *page) |
500 | { | 539 | { |
501 | BUG_ON(PageAnon(page)); | ||
502 | BUG_ON(!pfn_valid(page_to_pfn(page))); | ||
503 | |||
504 | if (atomic_inc_and_test(&page->_mapcount)) | 540 | if (atomic_inc_and_test(&page->_mapcount)) |
505 | __inc_page_state(nr_mapped); | 541 | __inc_page_state(nr_mapped); |
506 | } | 542 | } |
@@ -541,7 +577,8 @@ void page_remove_rmap(struct page *page) | |||
541 | * Subfunctions of try_to_unmap: try_to_unmap_one called | 577 | * Subfunctions of try_to_unmap: try_to_unmap_one called |
542 | * repeatedly from either try_to_unmap_anon or try_to_unmap_file. | 578 | * repeatedly from either try_to_unmap_anon or try_to_unmap_file. |
543 | */ | 579 | */ |
544 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) | 580 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, |
581 | int ignore_refs) | ||
545 | { | 582 | { |
546 | struct mm_struct *mm = vma->vm_mm; | 583 | struct mm_struct *mm = vma->vm_mm; |
547 | unsigned long address; | 584 | unsigned long address; |
@@ -564,7 +601,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) | |||
564 | * skipped over this mm) then we should reactivate it. | 601 | * skipped over this mm) then we should reactivate it. |
565 | */ | 602 | */ |
566 | if ((vma->vm_flags & VM_LOCKED) || | 603 | if ((vma->vm_flags & VM_LOCKED) || |
567 | ptep_clear_flush_young(vma, address, pte)) { | 604 | (ptep_clear_flush_young(vma, address, pte) |
605 | && !ignore_refs)) { | ||
568 | ret = SWAP_FAIL; | 606 | ret = SWAP_FAIL; |
569 | goto out_unmap; | 607 | goto out_unmap; |
570 | } | 608 | } |
@@ -698,7 +736,7 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
698 | pte_unmap_unlock(pte - 1, ptl); | 736 | pte_unmap_unlock(pte - 1, ptl); |
699 | } | 737 | } |
700 | 738 | ||
701 | static int try_to_unmap_anon(struct page *page) | 739 | static int try_to_unmap_anon(struct page *page, int ignore_refs) |
702 | { | 740 | { |
703 | struct anon_vma *anon_vma; | 741 | struct anon_vma *anon_vma; |
704 | struct vm_area_struct *vma; | 742 | struct vm_area_struct *vma; |
@@ -709,7 +747,7 @@ static int try_to_unmap_anon(struct page *page) | |||
709 | return ret; | 747 | return ret; |
710 | 748 | ||
711 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 749 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
712 | ret = try_to_unmap_one(page, vma); | 750 | ret = try_to_unmap_one(page, vma, ignore_refs); |
713 | if (ret == SWAP_FAIL || !page_mapped(page)) | 751 | if (ret == SWAP_FAIL || !page_mapped(page)) |
714 | break; | 752 | break; |
715 | } | 753 | } |
@@ -726,7 +764,7 @@ static int try_to_unmap_anon(struct page *page) | |||
726 | * | 764 | * |
727 | * This function is only called from try_to_unmap for object-based pages. | 765 | * This function is only called from try_to_unmap for object-based pages. |
728 | */ | 766 | */ |
729 | static int try_to_unmap_file(struct page *page) | 767 | static int try_to_unmap_file(struct page *page, int ignore_refs) |
730 | { | 768 | { |
731 | struct address_space *mapping = page->mapping; | 769 | struct address_space *mapping = page->mapping; |
732 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 770 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
@@ -740,7 +778,7 @@ static int try_to_unmap_file(struct page *page) | |||
740 | 778 | ||
741 | spin_lock(&mapping->i_mmap_lock); | 779 | spin_lock(&mapping->i_mmap_lock); |
742 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 780 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
743 | ret = try_to_unmap_one(page, vma); | 781 | ret = try_to_unmap_one(page, vma, ignore_refs); |
744 | if (ret == SWAP_FAIL || !page_mapped(page)) | 782 | if (ret == SWAP_FAIL || !page_mapped(page)) |
745 | goto out; | 783 | goto out; |
746 | } | 784 | } |
@@ -825,16 +863,16 @@ out: | |||
825 | * SWAP_AGAIN - we missed a mapping, try again later | 863 | * SWAP_AGAIN - we missed a mapping, try again later |
826 | * SWAP_FAIL - the page is unswappable | 864 | * SWAP_FAIL - the page is unswappable |
827 | */ | 865 | */ |
828 | int try_to_unmap(struct page *page) | 866 | int try_to_unmap(struct page *page, int ignore_refs) |
829 | { | 867 | { |
830 | int ret; | 868 | int ret; |
831 | 869 | ||
832 | BUG_ON(!PageLocked(page)); | 870 | BUG_ON(!PageLocked(page)); |
833 | 871 | ||
834 | if (PageAnon(page)) | 872 | if (PageAnon(page)) |
835 | ret = try_to_unmap_anon(page); | 873 | ret = try_to_unmap_anon(page, ignore_refs); |
836 | else | 874 | else |
837 | ret = try_to_unmap_file(page); | 875 | ret = try_to_unmap_file(page, ignore_refs); |
838 | 876 | ||
839 | if (!page_mapped(page)) | 877 | if (!page_mapped(page)) |
840 | ret = SWAP_SUCCESS; | 878 | ret = SWAP_SUCCESS; |