diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2012-07-31 19:42:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 21:42:41 -0400 |
commit | 79dbb2368ae3515fad9c8b7c8f831cd86be59b1d (patch) | |
tree | c25369353f439d2af5b78899cb91c43520ea48d3 /mm | |
parent | 585e27ea6d2e71d0091443c39a00a35e6a5c5e8f (diff) |
hugetlb: move all the in use pages to active list
When we fail to allocate pages from the reserve pool, hugetlb tries to
allocate huge pages using alloc_buddy_huge_page. Add these to the active
list. We also need to add the huge page we allocate when we soft offline
the oldpage to active list.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c57740bb203a..ec7b86ebf9d9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -928,8 +928,14 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid) | |||
928 | page = dequeue_huge_page_node(h, nid); | 928 | page = dequeue_huge_page_node(h, nid); |
929 | spin_unlock(&hugetlb_lock); | 929 | spin_unlock(&hugetlb_lock); |
930 | 930 | ||
931 | if (!page) | 931 | if (!page) { |
932 | page = alloc_buddy_huge_page(h, nid); | 932 | page = alloc_buddy_huge_page(h, nid); |
933 | if (page) { | ||
934 | spin_lock(&hugetlb_lock); | ||
935 | list_move(&page->lru, &h->hugepage_activelist); | ||
936 | spin_unlock(&hugetlb_lock); | ||
937 | } | ||
938 | } | ||
933 | 939 | ||
934 | return page; | 940 | return page; |
935 | } | 941 | } |
@@ -1155,6 +1161,9 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, | |||
1155 | hugepage_subpool_put_pages(spool, chg); | 1161 | hugepage_subpool_put_pages(spool, chg); |
1156 | return ERR_PTR(-ENOSPC); | 1162 | return ERR_PTR(-ENOSPC); |
1157 | } | 1163 | } |
1164 | spin_lock(&hugetlb_lock); | ||
1165 | list_move(&page->lru, &h->hugepage_activelist); | ||
1166 | spin_unlock(&hugetlb_lock); | ||
1158 | } | 1167 | } |
1159 | 1168 | ||
1160 | set_page_private(page, (unsigned long)spool); | 1169 | set_page_private(page, (unsigned long)spool); |