aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c106
1 files changed, 67 insertions, 39 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e83ad2c9228c..a56e6f3ce979 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -578,41 +578,6 @@ static void free_huge_page(struct page *page)
578 hugetlb_put_quota(mapping, 1); 578 hugetlb_put_quota(mapping, 1);
579} 579}
580 580
581/*
582 * Increment or decrement surplus_huge_pages. Keep node-specific counters
583 * balanced by operating on them in a round-robin fashion.
584 * Returns 1 if an adjustment was made.
585 */
586static int adjust_pool_surplus(struct hstate *h, int delta)
587{
588 static int prev_nid;
589 int nid = prev_nid;
590 int ret = 0;
591
592 VM_BUG_ON(delta != -1 && delta != 1);
593 do {
594 nid = next_node(nid, node_online_map);
595 if (nid == MAX_NUMNODES)
596 nid = first_node(node_online_map);
597
598 /* To shrink on this node, there must be a surplus page */
599 if (delta < 0 && !h->surplus_huge_pages_node[nid])
600 continue;
601 /* Surplus cannot exceed the total number of pages */
602 if (delta > 0 && h->surplus_huge_pages_node[nid] >=
603 h->nr_huge_pages_node[nid])
604 continue;
605
606 h->surplus_huge_pages += delta;
607 h->surplus_huge_pages_node[nid] += delta;
608 ret = 1;
609 break;
610 } while (nid != prev_nid);
611
612 prev_nid = nid;
613 return ret;
614}
615
616static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 581static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
617{ 582{
618 set_compound_page_dtor(page, free_huge_page); 583 set_compound_page_dtor(page, free_huge_page);
@@ -623,6 +588,34 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
623 put_page(page); /* free it into the hugepage allocator */ 588 put_page(page); /* free it into the hugepage allocator */
624} 589}
625 590
591static void prep_compound_gigantic_page(struct page *page, unsigned long order)
592{
593 int i;
594 int nr_pages = 1 << order;
595 struct page *p = page + 1;
596
597 /* we rely on prep_new_huge_page to set the destructor */
598 set_compound_order(page, order);
599 __SetPageHead(page);
600 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
601 __SetPageTail(p);
602 p->first_page = page;
603 }
604}
605
606int PageHuge(struct page *page)
607{
608 compound_page_dtor *dtor;
609
610 if (!PageCompound(page))
611 return 0;
612
613 page = compound_head(page);
614 dtor = get_compound_page_dtor(page);
615
616 return dtor == free_huge_page;
617}
618
626static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 619static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
627{ 620{
628 struct page *page; 621 struct page *page;
@@ -630,7 +623,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
630 if (h->order >= MAX_ORDER) 623 if (h->order >= MAX_ORDER)
631 return NULL; 624 return NULL;
632 625
633 page = alloc_pages_node(nid, 626 page = alloc_pages_exact_node(nid,
634 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 627 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
635 __GFP_REPEAT|__GFP_NOWARN, 628 __GFP_REPEAT|__GFP_NOWARN,
636 huge_page_order(h)); 629 huge_page_order(h));
@@ -649,7 +642,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
649 * Use a helper variable to find the next node and then 642 * Use a helper variable to find the next node and then
650 * copy it back to hugetlb_next_nid afterwards: 643 * copy it back to hugetlb_next_nid afterwards:
651 * otherwise there's a window in which a racer might 644 * otherwise there's a window in which a racer might
652 * pass invalid nid MAX_NUMNODES to alloc_pages_node. 645 * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
653 * But we don't need to use a spin_lock here: it really 646 * But we don't need to use a spin_lock here: it really
654 * doesn't matter if occasionally a racer chooses the 647 * doesn't matter if occasionally a racer chooses the
655 * same nid as we do. Move nid forward in the mask even 648 * same nid as we do. Move nid forward in the mask even
@@ -875,7 +868,7 @@ static void return_unused_surplus_pages(struct hstate *h,
875 * can no longer free unreserved surplus pages. This occurs when 868 * can no longer free unreserved surplus pages. This occurs when
876 * the nodes with surplus pages have no free pages. 869 * the nodes with surplus pages have no free pages.
877 */ 870 */
878 unsigned long remaining_iterations = num_online_nodes(); 871 unsigned long remaining_iterations = nr_online_nodes;
879 872
880 /* Uncommit the reservation */ 873 /* Uncommit the reservation */
881 h->resv_huge_pages -= unused_resv_pages; 874 h->resv_huge_pages -= unused_resv_pages;
@@ -904,7 +897,7 @@ static void return_unused_surplus_pages(struct hstate *h,
904 h->surplus_huge_pages--; 897 h->surplus_huge_pages--;
905 h->surplus_huge_pages_node[nid]--; 898 h->surplus_huge_pages_node[nid]--;
906 nr_pages--; 899 nr_pages--;
907 remaining_iterations = num_online_nodes(); 900 remaining_iterations = nr_online_nodes;
908 } 901 }
909 } 902 }
910} 903}
@@ -1140,6 +1133,41 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
1140} 1133}
1141#endif 1134#endif
1142 1135
1136/*
1137 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1138 * balanced by operating on them in a round-robin fashion.
1139 * Returns 1 if an adjustment was made.
1140 */
1141static int adjust_pool_surplus(struct hstate *h, int delta)
1142{
1143 static int prev_nid;
1144 int nid = prev_nid;
1145 int ret = 0;
1146
1147 VM_BUG_ON(delta != -1 && delta != 1);
1148 do {
1149 nid = next_node(nid, node_online_map);
1150 if (nid == MAX_NUMNODES)
1151 nid = first_node(node_online_map);
1152
1153 /* To shrink on this node, there must be a surplus page */
1154 if (delta < 0 && !h->surplus_huge_pages_node[nid])
1155 continue;
1156 /* Surplus cannot exceed the total number of pages */
1157 if (delta > 0 && h->surplus_huge_pages_node[nid] >=
1158 h->nr_huge_pages_node[nid])
1159 continue;
1160
1161 h->surplus_huge_pages += delta;
1162 h->surplus_huge_pages_node[nid] += delta;
1163 ret = 1;
1164 break;
1165 } while (nid != prev_nid);
1166
1167 prev_nid = nid;
1168 return ret;
1169}
1170
1143#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 1171#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1144static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) 1172static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
1145{ 1173{