aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c23
-rw-r--r--mm/migrate.c10
2 files changed, 31 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06315560bd23..e51723866fb1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -48,7 +48,8 @@ static unsigned long __initdata default_hstate_max_huge_pages;
48static unsigned long __initdata default_hstate_size; 48static unsigned long __initdata default_hstate_size;
49 49
50/* 50/*
51 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 51 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
52 * free_huge_pages, and surplus_huge_pages.
52 */ 53 */
53DEFINE_SPINLOCK(hugetlb_lock); 54DEFINE_SPINLOCK(hugetlb_lock);
54 55
@@ -3422,3 +3423,23 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
3422 return ret; 3423 return ret;
3423} 3424}
3424#endif 3425#endif
3426
3427bool isolate_huge_page(struct page *page, struct list_head *list)
3428{
3429 VM_BUG_ON(!PageHead(page));
3430 if (!get_page_unless_zero(page))
3431 return false;
3432 spin_lock(&hugetlb_lock);
3433 list_move_tail(&page->lru, list);
3434 spin_unlock(&hugetlb_lock);
3435 return true;
3436}
3437
3438void putback_active_hugepage(struct page *page)
3439{
3440 VM_BUG_ON(!PageHead(page));
3441 spin_lock(&hugetlb_lock);
3442 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3443 spin_unlock(&hugetlb_lock);
3444 put_page(page);
3445}
diff --git a/mm/migrate.c b/mm/migrate.c
index 6f0c24438bba..b44a067fee10 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -100,6 +100,10 @@ void putback_movable_pages(struct list_head *l)
100 struct page *page2; 100 struct page *page2;
101 101
102 list_for_each_entry_safe(page, page2, l, lru) { 102 list_for_each_entry_safe(page, page2, l, lru) {
103 if (unlikely(PageHuge(page))) {
104 putback_active_hugepage(page);
105 continue;
106 }
103 list_del(&page->lru); 107 list_del(&page->lru);
104 dec_zone_page_state(page, NR_ISOLATED_ANON + 108 dec_zone_page_state(page, NR_ISOLATED_ANON +
105 page_is_file_cache(page)); 109 page_is_file_cache(page));
@@ -1025,7 +1029,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1025 list_for_each_entry_safe(page, page2, from, lru) { 1029 list_for_each_entry_safe(page, page2, from, lru) {
1026 cond_resched(); 1030 cond_resched();
1027 1031
1028 rc = unmap_and_move(get_new_page, private, 1032 if (PageHuge(page))
1033 rc = unmap_and_move_huge_page(get_new_page,
1034 private, page, pass > 2, mode);
1035 else
1036 rc = unmap_and_move(get_new_page, private,
1029 page, pass > 2, mode); 1037 page, pass > 2, mode);
1030 1038
1031 switch(rc) { 1039 switch(rc) {