diff options
author | Lee Schermerhorn <lee.schermerhorn@hp.com> | 2009-12-14 20:58:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:12 -0500 |
commit | 9a76db099709388ae4126c4f441358b97c6ba20c (patch) | |
tree | 16b4a616695d8c54af8da5b731c0d67e2f06bf4e /mm/hugetlb.c | |
parent | 4e7b8a6cef64a4c1f1194f9926f794c2b75ebdd7 (diff) |
hugetlb: rework hstate_next_node_* functions
Modify the hstate_next_node* functions to allow them to be called to
obtain the "start_nid". Then, whereas prior to this patch we
unconditionally called hstate_next_node_to_{alloc|free}(), whether or not
we successfully allocated/freed a huge page on the node, now we only call
these functions on failure to alloc/free to advance to next allowed node.
Factor out the next_node_allowed() function to handle wrap at end of
node_online_map. In this version, the allowed nodes include all of the
online nodes.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Reviewed-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Andi Kleen <andi@firstfloor.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@canonical.com>
Cc: Eric Whitney <eric.whitney@hp.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 70 |
1 files changed, 45 insertions, 25 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5d7601b0287..bffcf774f60 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -622,6 +622,20 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) | |||
622 | } | 622 | } |
623 | 623 | ||
624 | /* | 624 | /* |
625 | * common helper function for hstate_next_node_to_{alloc|free}. | ||
626 | * return next node in node_online_map, wrapping at end. | ||
627 | */ | ||
628 | static int next_node_allowed(int nid) | ||
629 | { | ||
630 | nid = next_node(nid, node_online_map); | ||
631 | if (nid == MAX_NUMNODES) | ||
632 | nid = first_node(node_online_map); | ||
633 | VM_BUG_ON(nid >= MAX_NUMNODES); | ||
634 | |||
635 | return nid; | ||
636 | } | ||
637 | |||
638 | /* | ||
625 | * Use a helper variable to find the next node and then | 639 | * Use a helper variable to find the next node and then |
626 | * copy it back to next_nid_to_alloc afterwards: | 640 | * copy it back to next_nid_to_alloc afterwards: |
627 | * otherwise there's a window in which a racer might | 641 | * otherwise there's a window in which a racer might |
@@ -634,12 +648,12 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) | |||
634 | */ | 648 | */ |
635 | static int hstate_next_node_to_alloc(struct hstate *h) | 649 | static int hstate_next_node_to_alloc(struct hstate *h) |
636 | { | 650 | { |
637 | int next_nid; | 651 | int nid, next_nid; |
638 | next_nid = next_node(h->next_nid_to_alloc, node_online_map); | 652 | |
639 | if (next_nid == MAX_NUMNODES) | 653 | nid = h->next_nid_to_alloc; |
640 | next_nid = first_node(node_online_map); | 654 | next_nid = next_node_allowed(nid); |
641 | h->next_nid_to_alloc = next_nid; | 655 | h->next_nid_to_alloc = next_nid; |
642 | return next_nid; | 656 | return nid; |
643 | } | 657 | } |
644 | 658 | ||
645 | static int alloc_fresh_huge_page(struct hstate *h) | 659 | static int alloc_fresh_huge_page(struct hstate *h) |
@@ -649,15 +663,17 @@ static int alloc_fresh_huge_page(struct hstate *h) | |||
649 | int next_nid; | 663 | int next_nid; |
650 | int ret = 0; | 664 | int ret = 0; |
651 | 665 | ||
652 | start_nid = h->next_nid_to_alloc; | 666 | start_nid = hstate_next_node_to_alloc(h); |
653 | next_nid = start_nid; | 667 | next_nid = start_nid; |
654 | 668 | ||
655 | do { | 669 | do { |
656 | page = alloc_fresh_huge_page_node(h, next_nid); | 670 | page = alloc_fresh_huge_page_node(h, next_nid); |
657 | if (page) | 671 | if (page) { |
658 | ret = 1; | 672 | ret = 1; |
673 | break; | ||
674 | } | ||
659 | next_nid = hstate_next_node_to_alloc(h); | 675 | next_nid = hstate_next_node_to_alloc(h); |
660 | } while (!page && next_nid != start_nid); | 676 | } while (next_nid != start_nid); |
661 | 677 | ||
662 | if (ret) | 678 | if (ret) |
663 | count_vm_event(HTLB_BUDDY_PGALLOC); | 679 | count_vm_event(HTLB_BUDDY_PGALLOC); |
@@ -668,17 +684,19 @@ static int alloc_fresh_huge_page(struct hstate *h) | |||
668 | } | 684 | } |
669 | 685 | ||
670 | /* | 686 | /* |
671 | * helper for free_pool_huge_page() - find next node | 687 | * helper for free_pool_huge_page() - return the next node |
672 | * from which to free a huge page | 688 | * from which to free a huge page. Advance the next node id |
689 | * whether or not we find a free huge page to free so that the | ||
690 | * next attempt to free addresses the next node. | ||
673 | */ | 691 | */ |
674 | static int hstate_next_node_to_free(struct hstate *h) | 692 | static int hstate_next_node_to_free(struct hstate *h) |
675 | { | 693 | { |
676 | int next_nid; | 694 | int nid, next_nid; |
677 | next_nid = next_node(h->next_nid_to_free, node_online_map); | 695 | |
678 | if (next_nid == MAX_NUMNODES) | 696 | nid = h->next_nid_to_free; |
679 | next_nid = first_node(node_online_map); | 697 | next_nid = next_node_allowed(nid); |
680 | h->next_nid_to_free = next_nid; | 698 | h->next_nid_to_free = next_nid; |
681 | return next_nid; | 699 | return nid; |
682 | } | 700 | } |
683 | 701 | ||
684 | /* | 702 | /* |
@@ -693,7 +711,7 @@ static int free_pool_huge_page(struct hstate *h, bool acct_surplus) | |||
693 | int next_nid; | 711 | int next_nid; |
694 | int ret = 0; | 712 | int ret = 0; |
695 | 713 | ||
696 | start_nid = h->next_nid_to_free; | 714 | start_nid = hstate_next_node_to_free(h); |
697 | next_nid = start_nid; | 715 | next_nid = start_nid; |
698 | 716 | ||
699 | do { | 717 | do { |
@@ -715,9 +733,10 @@ static int free_pool_huge_page(struct hstate *h, bool acct_surplus) | |||
715 | } | 733 | } |
716 | update_and_free_page(h, page); | 734 | update_and_free_page(h, page); |
717 | ret = 1; | 735 | ret = 1; |
736 | break; | ||
718 | } | 737 | } |
719 | next_nid = hstate_next_node_to_free(h); | 738 | next_nid = hstate_next_node_to_free(h); |
720 | } while (!ret && next_nid != start_nid); | 739 | } while (next_nid != start_nid); |
721 | 740 | ||
722 | return ret; | 741 | return ret; |
723 | } | 742 | } |
@@ -1028,10 +1047,9 @@ int __weak alloc_bootmem_huge_page(struct hstate *h) | |||
1028 | void *addr; | 1047 | void *addr; |
1029 | 1048 | ||
1030 | addr = __alloc_bootmem_node_nopanic( | 1049 | addr = __alloc_bootmem_node_nopanic( |
1031 | NODE_DATA(h->next_nid_to_alloc), | 1050 | NODE_DATA(hstate_next_node_to_alloc(h)), |
1032 | huge_page_size(h), huge_page_size(h), 0); | 1051 | huge_page_size(h), huge_page_size(h), 0); |
1033 | 1052 | ||
1034 | hstate_next_node_to_alloc(h); | ||
1035 | if (addr) { | 1053 | if (addr) { |
1036 | /* | 1054 | /* |
1037 | * Use the beginning of the huge page to store the | 1055 | * Use the beginning of the huge page to store the |
@@ -1167,29 +1185,31 @@ static int adjust_pool_surplus(struct hstate *h, int delta) | |||
1167 | VM_BUG_ON(delta != -1 && delta != 1); | 1185 | VM_BUG_ON(delta != -1 && delta != 1); |
1168 | 1186 | ||
1169 | if (delta < 0) | 1187 | if (delta < 0) |
1170 | start_nid = h->next_nid_to_alloc; | 1188 | start_nid = hstate_next_node_to_alloc(h); |
1171 | else | 1189 | else |
1172 | start_nid = h->next_nid_to_free; | 1190 | start_nid = hstate_next_node_to_free(h); |
1173 | next_nid = start_nid; | 1191 | next_nid = start_nid; |
1174 | 1192 | ||
1175 | do { | 1193 | do { |
1176 | int nid = next_nid; | 1194 | int nid = next_nid; |
1177 | if (delta < 0) { | 1195 | if (delta < 0) { |
1178 | next_nid = hstate_next_node_to_alloc(h); | ||
1179 | /* | 1196 | /* |
1180 | * To shrink on this node, there must be a surplus page | 1197 | * To shrink on this node, there must be a surplus page |
1181 | */ | 1198 | */ |
1182 | if (!h->surplus_huge_pages_node[nid]) | 1199 | if (!h->surplus_huge_pages_node[nid]) { |
1200 | next_nid = hstate_next_node_to_alloc(h); | ||
1183 | continue; | 1201 | continue; |
1202 | } | ||
1184 | } | 1203 | } |
1185 | if (delta > 0) { | 1204 | if (delta > 0) { |
1186 | next_nid = hstate_next_node_to_free(h); | ||
1187 | /* | 1205 | /* |
1188 | * Surplus cannot exceed the total number of pages | 1206 | * Surplus cannot exceed the total number of pages |
1189 | */ | 1207 | */ |
1190 | if (h->surplus_huge_pages_node[nid] >= | 1208 | if (h->surplus_huge_pages_node[nid] >= |
1191 | h->nr_huge_pages_node[nid]) | 1209 | h->nr_huge_pages_node[nid]) { |
1210 | next_nid = hstate_next_node_to_free(h); | ||
1192 | continue; | 1211 | continue; |
1212 | } | ||
1193 | } | 1213 | } |
1194 | 1214 | ||
1195 | h->surplus_huge_pages += delta; | 1215 | h->surplus_huge_pages += delta; |