diff options
author | Lee Schermerhorn <lee.schermerhorn@hp.com> | 2009-09-21 20:01:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:26 -0400 |
commit | 685f345708096ed21078aa44a6f4a6e6d1d1b580 (patch) | |
tree | 9ba51e5721d4749317be33623a382940c0299bec /mm/hugetlb.c | |
parent | e8c5c8249878fb6564125680a1d15e06adbd5639 (diff) |
hugetlb: use free_pool_huge_page() to return unused surplus pages
Use the [modified] free_pool_huge_page() function to return unused
surplus pages. This will help keep huge pages balanced across nodes
between freeing of unused surplus pages and freeing of persistent huge
pages [from set_max_huge_pages] by using the same node id "cursor". It
also eliminates some code duplication.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@canonical.com>
Cc: Eric Whitney <eric.whitney@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 57 |
1 files changed, 24 insertions, 33 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 38dab5586827..f10cc274a7d9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -687,7 +687,7 @@ static int hstate_next_node_to_free(struct hstate *h) | |||
687 | * balanced over allowed nodes. | 687 | * balanced over allowed nodes. |
688 | * Called with hugetlb_lock locked. | 688 | * Called with hugetlb_lock locked. |
689 | */ | 689 | */ |
690 | static int free_pool_huge_page(struct hstate *h) | 690 | static int free_pool_huge_page(struct hstate *h, bool acct_surplus) |
691 | { | 691 | { |
692 | int start_nid; | 692 | int start_nid; |
693 | int next_nid; | 693 | int next_nid; |
@@ -697,13 +697,22 @@ static int free_pool_huge_page(struct hstate *h) | |||
697 | next_nid = start_nid; | 697 | next_nid = start_nid; |
698 | 698 | ||
699 | do { | 699 | do { |
700 | if (!list_empty(&h->hugepage_freelists[next_nid])) { | 700 | /* |
701 | * If we're returning unused surplus pages, only examine | ||
702 | * nodes with surplus pages. | ||
703 | */ | ||
704 | if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) && | ||
705 | !list_empty(&h->hugepage_freelists[next_nid])) { | ||
701 | struct page *page = | 706 | struct page *page = |
702 | list_entry(h->hugepage_freelists[next_nid].next, | 707 | list_entry(h->hugepage_freelists[next_nid].next, |
703 | struct page, lru); | 708 | struct page, lru); |
704 | list_del(&page->lru); | 709 | list_del(&page->lru); |
705 | h->free_huge_pages--; | 710 | h->free_huge_pages--; |
706 | h->free_huge_pages_node[next_nid]--; | 711 | h->free_huge_pages_node[next_nid]--; |
712 | if (acct_surplus) { | ||
713 | h->surplus_huge_pages--; | ||
714 | h->surplus_huge_pages_node[next_nid]--; | ||
715 | } | ||
707 | update_and_free_page(h, page); | 716 | update_and_free_page(h, page); |
708 | ret = 1; | 717 | ret = 1; |
709 | } | 718 | } |
@@ -884,22 +893,13 @@ free: | |||
884 | * When releasing a hugetlb pool reservation, any surplus pages that were | 893 | * When releasing a hugetlb pool reservation, any surplus pages that were |
885 | * allocated to satisfy the reservation must be explicitly freed if they were | 894 | * allocated to satisfy the reservation must be explicitly freed if they were |
886 | * never used. | 895 | * never used. |
896 | * Called with hugetlb_lock held. | ||
887 | */ | 897 | */ |
888 | static void return_unused_surplus_pages(struct hstate *h, | 898 | static void return_unused_surplus_pages(struct hstate *h, |
889 | unsigned long unused_resv_pages) | 899 | unsigned long unused_resv_pages) |
890 | { | 900 | { |
891 | static int nid = -1; | ||
892 | struct page *page; | ||
893 | unsigned long nr_pages; | 901 | unsigned long nr_pages; |
894 | 902 | ||
895 | /* | ||
896 | * We want to release as many surplus pages as possible, spread | ||
897 | * evenly across all nodes. Iterate across all nodes until we | ||
898 | * can no longer free unreserved surplus pages. This occurs when | ||
899 | * the nodes with surplus pages have no free pages. | ||
900 | */ | ||
901 | unsigned long remaining_iterations = nr_online_nodes; | ||
902 | |||
903 | /* Uncommit the reservation */ | 903 | /* Uncommit the reservation */ |
904 | h->resv_huge_pages -= unused_resv_pages; | 904 | h->resv_huge_pages -= unused_resv_pages; |
905 | 905 | ||
@@ -909,26 +909,17 @@ static void return_unused_surplus_pages(struct hstate *h, | |||
909 | 909 | ||
910 | nr_pages = min(unused_resv_pages, h->surplus_huge_pages); | 910 | nr_pages = min(unused_resv_pages, h->surplus_huge_pages); |
911 | 911 | ||
912 | while (remaining_iterations-- && nr_pages) { | 912 | /* |
913 | nid = next_node(nid, node_online_map); | 913 | * We want to release as many surplus pages as possible, spread |
914 | if (nid == MAX_NUMNODES) | 914 | * evenly across all nodes. Iterate across all nodes until we |
915 | nid = first_node(node_online_map); | 915 | * can no longer free unreserved surplus pages. This occurs when |
916 | 916 | * the nodes with surplus pages have no free pages. | |
917 | if (!h->surplus_huge_pages_node[nid]) | 917 | * free_pool_huge_page() will balance the the frees across the |
918 | continue; | 918 | * on-line nodes for us and will handle the hstate accounting. |
919 | 919 | */ | |
920 | if (!list_empty(&h->hugepage_freelists[nid])) { | 920 | while (nr_pages--) { |
921 | page = list_entry(h->hugepage_freelists[nid].next, | 921 | if (!free_pool_huge_page(h, 1)) |
922 | struct page, lru); | 922 | break; |
923 | list_del(&page->lru); | ||
924 | update_and_free_page(h, page); | ||
925 | h->free_huge_pages--; | ||
926 | h->free_huge_pages_node[nid]--; | ||
927 | h->surplus_huge_pages--; | ||
928 | h->surplus_huge_pages_node[nid]--; | ||
929 | nr_pages--; | ||
930 | remaining_iterations = nr_online_nodes; | ||
931 | } | ||
932 | } | 923 | } |
933 | } | 924 | } |
934 | 925 | ||
@@ -1268,7 +1259,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) | |||
1268 | min_count = max(count, min_count); | 1259 | min_count = max(count, min_count); |
1269 | try_to_free_low(h, min_count); | 1260 | try_to_free_low(h, min_count); |
1270 | while (min_count < persistent_huge_pages(h)) { | 1261 | while (min_count < persistent_huge_pages(h)) { |
1271 | if (!free_pool_huge_page(h)) | 1262 | if (!free_pool_huge_page(h, 0)) |
1272 | break; | 1263 | break; |
1273 | } | 1264 | } |
1274 | while (count < persistent_huge_pages(h)) { | 1265 | while (count < persistent_huge_pages(h)) { |