diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-09-29 17:01:40 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-09-29 17:01:40 -0400 |
commit | 87c96270aa53f5c1c67fca941a2a6061178cb0a0 (patch) | |
tree | ba429314529431541ec894961dd5b224f28d1a22 | |
parent | 0e6a44a09800be09924707025646b3f3e3700306 (diff) |
try_to_unmap() - try to remove all page table mappings to a page
-rw-r--r-- | mm/migrate.c | 131 |
1 files changed, 69 insertions, 62 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index ee6732e1d590..d135547b3a3f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -646,6 +646,63 @@ void migrate_page_copy(struct page *newpage, struct page *page) | |||
646 | end_page_writeback(newpage); | 646 | end_page_writeback(newpage); |
647 | } | 647 | } |
648 | 648 | ||
649 | /* | ||
650 | * Copy the page to its new location | ||
651 | */ | ||
652 | void replicate_page_copy(struct page *newpage, struct page *page) | ||
653 | { | ||
654 | if (PageHuge(page) || PageTransHuge(page)) | ||
655 | copy_huge_page(newpage, page); | ||
656 | else | ||
657 | copy_highpage(newpage, page); | ||
658 | |||
659 | if (PageError(page)) | ||
660 | SetPageError(newpage); | ||
661 | if (PageReferenced(page)) | ||
662 | SetPageReferenced(newpage); | ||
663 | if (PageUptodate(page)) | ||
664 | SetPageUptodate(newpage); | ||
665 | if (PageActive(page)) { | ||
666 | VM_BUG_ON_PAGE(PageUnevictable(page), page); | ||
667 | SetPageActive(newpage); | ||
668 | } else if (PageUnevictable(page)) | ||
669 | SetPageUnevictable(newpage); | ||
670 | if (PageChecked(page)) | ||
671 | SetPageChecked(newpage); | ||
672 | if (PageMappedToDisk(page)) | ||
673 | SetPageMappedToDisk(newpage); | ||
674 | |||
675 | if (PageDirty(page)) { | ||
676 | BUG(); | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * Copy NUMA information to the new page, to prevent over-eager | ||
681 | * future migrations of this same page. | ||
682 | */ | ||
683 | #ifdef CONFIG_NUMA_BALANCING | ||
684 | BUG(); | ||
685 | #endif | ||
686 | |||
687 | if (PageMlocked(page)) { | ||
688 | unsigned long flags; | ||
689 | int nr_pages = hpage_nr_pages(page); | ||
690 | |||
691 | local_irq_save(flags); | ||
692 | SetPageMlocked(newpage); | ||
693 | __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); | ||
694 | local_irq_restore(flags); | ||
695 | } | ||
696 | |||
697 | /* | ||
698 | * If any waiters have accumulated on the new page then | ||
699 | * wake them up. | ||
700 | */ | ||
701 | if (PageWriteback(newpage)) | ||
702 | end_page_writeback(newpage); | ||
703 | TRACE_TASK(current, "replicate_page_copy done!\n"); | ||
704 | } | ||
705 | |||
649 | /************************************************************ | 706 | /************************************************************ |
650 | * Migration functions | 707 | * Migration functions |
651 | ***********************************************************/ | 708 | ***********************************************************/ |
@@ -688,7 +745,7 @@ int replicate_page(struct address_space *mapping, | |||
688 | return rc; | 745 | return rc; |
689 | 746 | ||
690 | if (has_replica == 0) | 747 | if (has_replica == 0) |
691 | migrate_page_copy(newpage, page); | 748 | replicate_page_copy(newpage, page); |
692 | return MIGRATEPAGE_SUCCESS; | 749 | return MIGRATEPAGE_SUCCESS; |
693 | } | 750 | } |
694 | 751 | ||
@@ -754,62 +811,6 @@ EXPORT_SYMBOL(buffer_migrate_page); | |||
754 | 811 | ||
755 | extern struct list_head shared_lib_pages; | 812 | extern struct list_head shared_lib_pages; |
756 | 813 | ||
757 | int replicate_buffer_page(struct address_space *mapping, | ||
758 | struct page *newpage, struct page *page, enum migrate_mode mode, | ||
759 | int has_replica) | ||
760 | { | ||
761 | struct buffer_head *bh, *head; | ||
762 | int rc; | ||
763 | |||
764 | if (!page_has_buffers(page)) { | ||
765 | TRACE_TASK(current, "page does not have buffers\n"); | ||
766 | return replicate_page(mapping, newpage, page, mode, has_replica); | ||
767 | } | ||
768 | |||
769 | head = page_buffers(page); | ||
770 | |||
771 | rc = replicate_page_move_mapping(mapping, newpage, page, head, mode, 0); | ||
772 | |||
773 | if (rc != MIGRATEPAGE_SUCCESS) | ||
774 | return rc; | ||
775 | |||
776 | /* | ||
777 | * In the async case, migrate_page_move_mapping locked the buffers | ||
778 | * with an IRQ-safe spinlock held. In the sync case, the buffers | ||
779 | * need to be locked now | ||
780 | */ | ||
781 | if (mode != MIGRATE_ASYNC) | ||
782 | BUG_ON(!buffer_migrate_lock_buffers(head, mode)); | ||
783 | |||
784 | ClearPagePrivate(page); | ||
785 | set_page_private(newpage, page_private(page)); | ||
786 | set_page_private(page, 0); | ||
787 | put_page(page); | ||
788 | get_page(newpage); | ||
789 | |||
790 | bh = head; | ||
791 | do { | ||
792 | set_bh_page(bh, newpage, bh_offset(bh)); | ||
793 | bh = bh->b_this_page; | ||
794 | |||
795 | } while (bh != head); | ||
796 | |||
797 | SetPagePrivate(newpage); | ||
798 | |||
799 | if (has_replica == 0) | ||
800 | migrate_page_copy(newpage, page); | ||
801 | |||
802 | bh = head; | ||
803 | do { | ||
804 | unlock_buffer(bh); | ||
805 | put_bh(bh); | ||
806 | bh = bh->b_this_page; | ||
807 | |||
808 | } while (bh != head); | ||
809 | |||
810 | return MIGRATEPAGE_SUCCESS; | ||
811 | } | ||
812 | |||
813 | /* | 814 | /* |
814 | * Writeback a page to clean the dirty state | 815 | * Writeback a page to clean the dirty state |
815 | */ | 816 | */ |
@@ -991,14 +992,11 @@ static int copy_to_new_page(struct page *newpage, struct page *page, | |||
991 | if (rc != MIGRATEPAGE_SUCCESS) { | 992 | if (rc != MIGRATEPAGE_SUCCESS) { |
992 | newpage->mapping = NULL; | 993 | newpage->mapping = NULL; |
993 | } else { | 994 | } else { |
994 | if (mem_cgroup_disabled()) | ||
995 | TRACE_TASK(current, "mem_cgroup_disabled()\n"); | ||
996 | mem_cgroup_migrate(page, newpage, false); | ||
997 | if (page_was_mapped) { | 995 | if (page_was_mapped) { |
998 | TRACE_TASK(current, "PAGE_WAS_MAPPED = 1\n"); | 996 | TRACE_TASK(current, "PAGE_WAS_MAPPED = 1\n"); |
999 | remove_migration_ptes(page, newpage); | 997 | remove_migration_ptes(page, newpage); |
1000 | } | 998 | } |
1001 | page->mapping = NULL; | 999 | //page->mapping = NULL; |
1002 | } | 1000 | } |
1003 | 1001 | ||
1004 | unlock_page(newpage); | 1002 | unlock_page(newpage); |
@@ -1202,16 +1200,22 @@ static int __unmap_and_copy(struct page *page, struct page *newpage, | |||
1202 | * just care Anon page here. | 1200 | * just care Anon page here. |
1203 | */ | 1201 | */ |
1204 | if (PageAnon(page) && !PageKsm(page)) { | 1202 | if (PageAnon(page) && !PageKsm(page)) { |
1203 | printk(KERN_INFO "ANON but not KSM\n"); | ||
1204 | BUG(); | ||
1205 | /* | 1205 | /* |
1206 | * Only page_lock_anon_vma_read() understands the subtleties of | 1206 | * Only page_lock_anon_vma_read() understands the subtleties of |
1207 | * getting a hold on an anon_vma from outside one of its mms. | 1207 | * getting a hold on an anon_vma from outside one of its mms. |
1208 | */ | 1208 | */ |
1209 | /* | ||
1209 | anon_vma = page_get_anon_vma(page); | 1210 | anon_vma = page_get_anon_vma(page); |
1210 | if (anon_vma) { | 1211 | if (anon_vma) { |
1212 | */ | ||
1211 | /* | 1213 | /* |
1212 | * Anon page | 1214 | * Anon page |
1213 | */ | 1215 | */ |
1216 | /* | ||
1214 | } else if (PageSwapCache(page)) { | 1217 | } else if (PageSwapCache(page)) { |
1218 | */ | ||
1215 | /* | 1219 | /* |
1216 | * We cannot be sure that the anon_vma of an unmapped | 1220 | * We cannot be sure that the anon_vma of an unmapped |
1217 | * swapcache page is safe to use because we don't | 1221 | * swapcache page is safe to use because we don't |
@@ -1224,12 +1228,14 @@ static int __unmap_and_copy(struct page *page, struct page *newpage, | |||
1224 | * migrated but are not remapped when migration | 1228 | * migrated but are not remapped when migration |
1225 | * completes | 1229 | * completes |
1226 | */ | 1230 | */ |
1227 | } else { | 1231 | /* } else { |
1228 | goto out_unlock; | 1232 | goto out_unlock; |
1229 | } | 1233 | } |
1234 | */ | ||
1230 | } | 1235 | } |
1231 | 1236 | ||
1232 | if (unlikely(isolated_balloon_page(page))) { | 1237 | if (unlikely(isolated_balloon_page(page))) { |
1238 | BUG(); | ||
1233 | /* | 1239 | /* |
1234 | * A ballooned page does not need any special attention from | 1240 | * A ballooned page does not need any special attention from |
1235 | * physical to virtual reverse mapping procedures. | 1241 | * physical to virtual reverse mapping procedures. |
@@ -1267,6 +1273,7 @@ static int __unmap_and_copy(struct page *page, struct page *newpage, | |||
1267 | try_to_unmap(page, | 1273 | try_to_unmap(page, |
1268 | TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 1274 | TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); |
1269 | page_was_mapped = 1; | 1275 | page_was_mapped = 1; |
1276 | TRACE_TASK(current, "Page %d unmapped from all PTEs\n", page_to_pfn(page)); | ||
1270 | } | 1277 | } |
1271 | 1278 | ||
1272 | skip_unmap: | 1279 | skip_unmap: |