diff options
author | Hillf Danton <dhillf@gmail.com> | 2012-03-21 19:34:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 20:54:57 -0400 |
commit | 28073b02bfaaed1e3278acfb8e6e7c9f76d9f2b6 (patch) | |
tree | 54ab2952e2bee85780438a011cf5c1f198ff3dd9 /mm/hugetlb.c | |
parent | cc715d99e529d470dde2f33a6614f255adea71f3 (diff) |
mm: hugetlb: defer freeing pages when gathering surplus pages
When gathering surplus pages, the number of needed pages is recomputed
after reacquiring hugetlb lock to catch changes in resv_huge_pages and
free_huge_pages. Plus it is recomputed with the number of newly allocated
pages involved.
Thus freeing pages can be deferred a bit to see if the final page request
is satisfied, though pages could be allocated less than needed.
Signed-off-by: Hillf Danton <dhillf@gmail.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 30 |
1 files changed, 17 insertions, 13 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a876871f6be5..afe3e1ff919b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -852,6 +852,7 @@ static int gather_surplus_pages(struct hstate *h, int delta) | |||
852 | struct page *page, *tmp; | 852 | struct page *page, *tmp; |
853 | int ret, i; | 853 | int ret, i; |
854 | int needed, allocated; | 854 | int needed, allocated; |
855 | bool alloc_ok = true; | ||
855 | 856 | ||
856 | needed = (h->resv_huge_pages + delta) - h->free_huge_pages; | 857 | needed = (h->resv_huge_pages + delta) - h->free_huge_pages; |
857 | if (needed <= 0) { | 858 | if (needed <= 0) { |
@@ -867,17 +868,13 @@ retry: | |||
867 | spin_unlock(&hugetlb_lock); | 868 | spin_unlock(&hugetlb_lock); |
868 | for (i = 0; i < needed; i++) { | 869 | for (i = 0; i < needed; i++) { |
869 | page = alloc_buddy_huge_page(h, NUMA_NO_NODE); | 870 | page = alloc_buddy_huge_page(h, NUMA_NO_NODE); |
870 | if (!page) | 871 | if (!page) { |
871 | /* | 872 | alloc_ok = false; |
872 | * We were not able to allocate enough pages to | 873 | break; |
873 | * satisfy the entire reservation so we free what | 874 | } |
874 | * we've allocated so far. | ||
875 | */ | ||
876 | goto free; | ||
877 | |||
878 | list_add(&page->lru, &surplus_list); | 875 | list_add(&page->lru, &surplus_list); |
879 | } | 876 | } |
880 | allocated += needed; | 877 | allocated += i; |
881 | 878 | ||
882 | /* | 879 | /* |
883 | * After retaking hugetlb_lock, we need to recalculate 'needed' | 880 | * After retaking hugetlb_lock, we need to recalculate 'needed' |
@@ -886,9 +883,16 @@ retry: | |||
886 | spin_lock(&hugetlb_lock); | 883 | spin_lock(&hugetlb_lock); |
887 | needed = (h->resv_huge_pages + delta) - | 884 | needed = (h->resv_huge_pages + delta) - |
888 | (h->free_huge_pages + allocated); | 885 | (h->free_huge_pages + allocated); |
889 | if (needed > 0) | 886 | if (needed > 0) { |
890 | goto retry; | 887 | if (alloc_ok) |
891 | 888 | goto retry; | |
889 | /* | ||
890 | * We were not able to allocate enough pages to | ||
891 | * satisfy the entire reservation so we free what | ||
892 | * we've allocated so far. | ||
893 | */ | ||
894 | goto free; | ||
895 | } | ||
892 | /* | 896 | /* |
893 | * The surplus_list now contains _at_least_ the number of extra pages | 897 | * The surplus_list now contains _at_least_ the number of extra pages |
894 | * needed to accommodate the reservation. Add the appropriate number | 898 | * needed to accommodate the reservation. Add the appropriate number |
@@ -914,10 +918,10 @@ retry: | |||
914 | VM_BUG_ON(page_count(page)); | 918 | VM_BUG_ON(page_count(page)); |
915 | enqueue_huge_page(h, page); | 919 | enqueue_huge_page(h, page); |
916 | } | 920 | } |
921 | free: | ||
917 | spin_unlock(&hugetlb_lock); | 922 | spin_unlock(&hugetlb_lock); |
918 | 923 | ||
919 | /* Free unnecessary surplus pages to the buddy allocator */ | 924 | /* Free unnecessary surplus pages to the buddy allocator */ |
920 | free: | ||
921 | if (!list_empty(&surplus_list)) { | 925 | if (!list_empty(&surplus_list)) { |
922 | list_for_each_entry_safe(page, tmp, &surplus_list, lru) { | 926 | list_for_each_entry_safe(page, tmp, &surplus_list, lru) { |
923 | list_del(&page->lru); | 927 | list_del(&page->lru); |