aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2008-08-12 18:08:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-12 19:07:27 -0400
commitcaff3a2c333e11a794308bd9a875a09b94fee24a (patch)
tree3cc787b8b61913bc6b93b28eafd963f786d96b53 /mm/hugetlb.c
parent02eb7eeb8990b9cbd32c7bc1ef2d431ca390e44a (diff)
hugetlb: call arch_prepare_hugepage() for surplus pages
The s390 software large page emulation implements shared page tables by using page->index of the first tail page from a compound large page to store page table information. This is set up in arch_prepare_hugepage(), which is called from alloc_fresh_huge_page_node(). A similar call to arch_prepare_hugepage() is missing for surplus large pages that are allocated in alloc_buddy_huge_page(), which breaks the software emulation mode for (surplus) large pages on s390. This patch adds the missing call to arch_prepare_hugepage(). It will have no effect on other architectures where arch_prepare_hugepage() is a nop. Also, use the correct order in the error path in alloc_fresh_huge_page_node(). Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Acked-by: Nick Piggin <npiggin@suse.de> Acked-by: Adam Litke <agl@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 757ca983fd9..92155db888b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -565,7 +565,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
565 huge_page_order(h)); 565 huge_page_order(h));
566 if (page) { 566 if (page) {
567 if (arch_prepare_hugepage(page)) { 567 if (arch_prepare_hugepage(page)) {
568 __free_pages(page, HUGETLB_PAGE_ORDER); 568 __free_pages(page, huge_page_order(h));
569 return NULL; 569 return NULL;
570 } 570 }
571 prep_new_huge_page(h, page, nid); 571 prep_new_huge_page(h, page, nid);
@@ -665,6 +665,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
665 __GFP_REPEAT|__GFP_NOWARN, 665 __GFP_REPEAT|__GFP_NOWARN,
666 huge_page_order(h)); 666 huge_page_order(h));
667 667
668 if (page && arch_prepare_hugepage(page)) {
669 __free_pages(page, huge_page_order(h));
670 return NULL;
671 }
672
668 spin_lock(&hugetlb_lock); 673 spin_lock(&hugetlb_lock);
669 if (page) { 674 if (page) {
670 /* 675 /*