aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-02-01 06:05:38 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 11:53:16 -0500
commita48d07afdf18212de22b959715b16793c5a6e57a (patch)
tree36d5963c29ceb5c2f6df53036cef5c0d30383dbf /mm/rmap.c
parentb16664e44c54525be89dc07ad15a13b4eeec5634 (diff)
[PATCH] Direct Migration V9: migrate_pages() extension
Add direct migration support with fall back to swap. Direct migration support on top of the swap based page migration facility. This allows the direct migration of anonymous pages and the migration of file backed pages by dropping the associated buffers (requires writeout). Fall back to swap out if necessary. The patch is based on lots of patches from the hotplug project but the code was restructured, documented and simplified as much as possible. Note that an additional patch that defines the migrate_page() method for filesystems is necessary in order to avoid writeback for anonymous and file backed pages. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Mike Kravetz <kravetz@us.ibm.com> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index d85a99d28c03..13fad5fcdf79 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -52,6 +52,7 @@
52#include <linux/init.h> 52#include <linux/init.h>
53#include <linux/rmap.h> 53#include <linux/rmap.h>
54#include <linux/rcupdate.h> 54#include <linux/rcupdate.h>
55#include <linux/module.h>
55 56
56#include <asm/tlbflush.h> 57#include <asm/tlbflush.h>
57 58
@@ -541,7 +542,8 @@ void page_remove_rmap(struct page *page)
541 * Subfunctions of try_to_unmap: try_to_unmap_one called 542 * Subfunctions of try_to_unmap: try_to_unmap_one called
542 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 543 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
543 */ 544 */
544static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) 545static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
546 int ignore_refs)
545{ 547{
546 struct mm_struct *mm = vma->vm_mm; 548 struct mm_struct *mm = vma->vm_mm;
547 unsigned long address; 549 unsigned long address;
@@ -564,7 +566,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
564 * skipped over this mm) then we should reactivate it. 566 * skipped over this mm) then we should reactivate it.
565 */ 567 */
566 if ((vma->vm_flags & VM_LOCKED) || 568 if ((vma->vm_flags & VM_LOCKED) ||
567 ptep_clear_flush_young(vma, address, pte)) { 569 (ptep_clear_flush_young(vma, address, pte)
570 && !ignore_refs)) {
568 ret = SWAP_FAIL; 571 ret = SWAP_FAIL;
569 goto out_unmap; 572 goto out_unmap;
570 } 573 }
@@ -698,7 +701,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
698 pte_unmap_unlock(pte - 1, ptl); 701 pte_unmap_unlock(pte - 1, ptl);
699} 702}
700 703
701static int try_to_unmap_anon(struct page *page) 704static int try_to_unmap_anon(struct page *page, int ignore_refs)
702{ 705{
703 struct anon_vma *anon_vma; 706 struct anon_vma *anon_vma;
704 struct vm_area_struct *vma; 707 struct vm_area_struct *vma;
@@ -709,7 +712,7 @@ static int try_to_unmap_anon(struct page *page)
709 return ret; 712 return ret;
710 713
711 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 714 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
712 ret = try_to_unmap_one(page, vma); 715 ret = try_to_unmap_one(page, vma, ignore_refs);
713 if (ret == SWAP_FAIL || !page_mapped(page)) 716 if (ret == SWAP_FAIL || !page_mapped(page))
714 break; 717 break;
715 } 718 }
@@ -726,7 +729,7 @@ static int try_to_unmap_anon(struct page *page)
726 * 729 *
727 * This function is only called from try_to_unmap for object-based pages. 730 * This function is only called from try_to_unmap for object-based pages.
728 */ 731 */
729static int try_to_unmap_file(struct page *page) 732static int try_to_unmap_file(struct page *page, int ignore_refs)
730{ 733{
731 struct address_space *mapping = page->mapping; 734 struct address_space *mapping = page->mapping;
732 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 735 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -740,7 +743,7 @@ static int try_to_unmap_file(struct page *page)
740 743
741 spin_lock(&mapping->i_mmap_lock); 744 spin_lock(&mapping->i_mmap_lock);
742 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 745 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
743 ret = try_to_unmap_one(page, vma); 746 ret = try_to_unmap_one(page, vma, ignore_refs);
744 if (ret == SWAP_FAIL || !page_mapped(page)) 747 if (ret == SWAP_FAIL || !page_mapped(page))
745 goto out; 748 goto out;
746 } 749 }
@@ -825,16 +828,16 @@ out:
825 * SWAP_AGAIN - we missed a mapping, try again later 828 * SWAP_AGAIN - we missed a mapping, try again later
826 * SWAP_FAIL - the page is unswappable 829 * SWAP_FAIL - the page is unswappable
827 */ 830 */
828int try_to_unmap(struct page *page) 831int try_to_unmap(struct page *page, int ignore_refs)
829{ 832{
830 int ret; 833 int ret;
831 834
832 BUG_ON(!PageLocked(page)); 835 BUG_ON(!PageLocked(page));
833 836
834 if (PageAnon(page)) 837 if (PageAnon(page))
835 ret = try_to_unmap_anon(page); 838 ret = try_to_unmap_anon(page, ignore_refs);
836 else 839 else
837 ret = try_to_unmap_file(page); 840 ret = try_to_unmap_file(page, ignore_refs);
838 841
839 if (!page_mapped(page)) 842 if (!page_mapped(page))
840 ret = SWAP_SUCCESS; 843 ret = SWAP_SUCCESS;