diff options
-rw-r--r-- | include/linux/hugetlb.h | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 23 | ||||
-rw-r--r-- | mm/migrate.c | 10 |
3 files changed, 35 insertions, 2 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index c2b1801a160b..bc8d8370cd0d 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -66,6 +66,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to, | |||
66 | vm_flags_t vm_flags); | 66 | vm_flags_t vm_flags); |
67 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); | 67 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); |
68 | int dequeue_hwpoisoned_huge_page(struct page *page); | 68 | int dequeue_hwpoisoned_huge_page(struct page *page); |
69 | bool isolate_huge_page(struct page *page, struct list_head *list); | ||
70 | void putback_active_hugepage(struct page *page); | ||
69 | void copy_huge_page(struct page *dst, struct page *src); | 71 | void copy_huge_page(struct page *dst, struct page *src); |
70 | 72 | ||
71 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE | 73 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE |
@@ -134,6 +136,8 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) | |||
134 | return 0; | 136 | return 0; |
135 | } | 137 | } |
136 | 138 | ||
139 | #define isolate_huge_page(p, l) false | ||
140 | #define putback_active_hugepage(p) do {} while (0) | ||
137 | static inline void copy_huge_page(struct page *dst, struct page *src) | 141 | static inline void copy_huge_page(struct page *dst, struct page *src) |
138 | { | 142 | { |
139 | } | 143 | } |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 06315560bd23..e51723866fb1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -48,7 +48,8 @@ static unsigned long __initdata default_hstate_max_huge_pages; | |||
48 | static unsigned long __initdata default_hstate_size; | 48 | static unsigned long __initdata default_hstate_size; |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages | 51 | * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, |
52 | * free_huge_pages, and surplus_huge_pages. | ||
52 | */ | 53 | */ |
53 | DEFINE_SPINLOCK(hugetlb_lock); | 54 | DEFINE_SPINLOCK(hugetlb_lock); |
54 | 55 | ||
@@ -3422,3 +3423,23 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage) | |||
3422 | return ret; | 3423 | return ret; |
3423 | } | 3424 | } |
3424 | #endif | 3425 | #endif |
3426 | |||
3427 | bool isolate_huge_page(struct page *page, struct list_head *list) | ||
3428 | { | ||
3429 | VM_BUG_ON(!PageHead(page)); | ||
3430 | if (!get_page_unless_zero(page)) | ||
3431 | return false; | ||
3432 | spin_lock(&hugetlb_lock); | ||
3433 | list_move_tail(&page->lru, list); | ||
3434 | spin_unlock(&hugetlb_lock); | ||
3435 | return true; | ||
3436 | } | ||
3437 | |||
3438 | void putback_active_hugepage(struct page *page) | ||
3439 | { | ||
3440 | VM_BUG_ON(!PageHead(page)); | ||
3441 | spin_lock(&hugetlb_lock); | ||
3442 | list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); | ||
3443 | spin_unlock(&hugetlb_lock); | ||
3444 | put_page(page); | ||
3445 | } | ||
diff --git a/mm/migrate.c b/mm/migrate.c index 6f0c24438bba..b44a067fee10 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -100,6 +100,10 @@ void putback_movable_pages(struct list_head *l) | |||
100 | struct page *page2; | 100 | struct page *page2; |
101 | 101 | ||
102 | list_for_each_entry_safe(page, page2, l, lru) { | 102 | list_for_each_entry_safe(page, page2, l, lru) { |
103 | if (unlikely(PageHuge(page))) { | ||
104 | putback_active_hugepage(page); | ||
105 | continue; | ||
106 | } | ||
103 | list_del(&page->lru); | 107 | list_del(&page->lru); |
104 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 108 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
105 | page_is_file_cache(page)); | 109 | page_is_file_cache(page)); |
@@ -1025,7 +1029,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, | |||
1025 | list_for_each_entry_safe(page, page2, from, lru) { | 1029 | list_for_each_entry_safe(page, page2, from, lru) { |
1026 | cond_resched(); | 1030 | cond_resched(); |
1027 | 1031 | ||
1028 | rc = unmap_and_move(get_new_page, private, | 1032 | if (PageHuge(page)) |
1033 | rc = unmap_and_move_huge_page(get_new_page, | ||
1034 | private, page, pass > 2, mode); | ||
1035 | else | ||
1036 | rc = unmap_and_move(get_new_page, private, | ||
1029 | page, pass > 2, mode); | 1037 | page, pass > 2, mode); |
1030 | 1038 | ||
1031 | switch(rc) { | 1039 | switch(rc) { |