aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c24
-rw-r--r--mm/page_alloc.c6
2 files changed, 23 insertions, 7 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 34d8ada053e4..37c73b902008 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -49,9 +49,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist)
49 struct zone *zone = page_zone(page); 49 struct zone *zone = page_zone(page);
50 50
51 spin_lock_irq(&zone->lru_lock); 51 spin_lock_irq(&zone->lru_lock);
52 if (PageLRU(page)) { 52 if (PageLRU(page) && get_page_unless_zero(page)) {
53 ret = 0; 53 ret = 0;
54 get_page(page);
55 ClearPageLRU(page); 54 ClearPageLRU(page);
56 if (PageActive(page)) 55 if (PageActive(page))
57 del_page_from_active_list(zone, page); 56 del_page_from_active_list(zone, page);
@@ -632,18 +631,35 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
632 goto unlock; 631 goto unlock;
633 wait_on_page_writeback(page); 632 wait_on_page_writeback(page);
634 } 633 }
635
636 /* 634 /*
637 * Establish migration ptes or remove ptes 635 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
636 * we cannot notice that anon_vma is freed while we migrates a page.
637 * This rcu_read_lock() delays freeing anon_vma pointer until the end
638 * of migration. File cache pages are no problem because of page_lock()
639 */
640 rcu_read_lock();
641 /*
642 * This is a corner case handling.
643 * When a new swap-cache is read into, it is linked to LRU
644 * and treated as swapcache but has no rmap yet.
645 * Calling try_to_unmap() against a page->mapping==NULL page is
646 * BUG. So handle it here.
638 */ 647 */
648 if (!page->mapping)
649 goto rcu_unlock;
650 /* Establish migration ptes or remove ptes */
639 try_to_unmap(page, 1); 651 try_to_unmap(page, 1);
652
640 if (!page_mapped(page)) 653 if (!page_mapped(page))
641 rc = move_to_new_page(newpage, page); 654 rc = move_to_new_page(newpage, page);
642 655
643 if (rc) 656 if (rc)
644 remove_migration_ptes(page, page); 657 remove_migration_ptes(page, page);
658rcu_unlock:
659 rcu_read_unlock();
645 660
646unlock: 661unlock:
662
647 unlock_page(page); 663 unlock_page(page);
648 664
649 if (rc != -EAGAIN) { 665 if (rc != -EAGAIN) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40954fb81598..6d3550ca0282 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2775,11 +2775,11 @@ unsigned long __meminit __absent_pages_in_range(int nid,
2775 if (i == -1) 2775 if (i == -1)
2776 return 0; 2776 return 0;
2777 2777
2778 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
2779
2778 /* Account for ranges before physical memory on this node */ 2780 /* Account for ranges before physical memory on this node */
2779 if (early_node_map[i].start_pfn > range_start_pfn) 2781 if (early_node_map[i].start_pfn > range_start_pfn)
2780 hole_pages = early_node_map[i].start_pfn - range_start_pfn; 2782 hole_pages = prev_end_pfn - range_start_pfn;
2781
2782 prev_end_pfn = early_node_map[i].start_pfn;
2783 2783
2784 /* Find all holes for the zone within the node */ 2784 /* Find all holes for the zone within the node */
2785 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 2785 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {