aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/hugetlb.c63
1 files changed, 43 insertions, 20 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8fb86ba452b0..82efecbab96f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -32,6 +32,7 @@ static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
32static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 32static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
33unsigned long hugepages_treat_as_movable; 33unsigned long hugepages_treat_as_movable;
34int hugetlb_dynamic_pool; 34int hugetlb_dynamic_pool;
35static int hugetlb_next_nid;
35 36
36/* 37/*
37 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 38 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
@@ -165,36 +166,56 @@ static int adjust_pool_surplus(int delta)
165 return ret; 166 return ret;
166} 167}
167 168
168static int alloc_fresh_huge_page(void) 169static struct page *alloc_fresh_huge_page_node(int nid)
169{ 170{
170 static int prev_nid;
171 struct page *page; 171 struct page *page;
172 int nid;
173
174 /*
175 * Copy static prev_nid to local nid, work on that, then copy it
176 * back to prev_nid afterwards: otherwise there's a window in which
177 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
178 * But we don't need to use a spin_lock here: it really doesn't
179 * matter if occasionally a racer chooses the same nid as we do.
180 */
181 nid = next_node(prev_nid, node_online_map);
182 if (nid == MAX_NUMNODES)
183 nid = first_node(node_online_map);
184 prev_nid = nid;
185 172
186 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, 173 page = alloc_pages_node(nid,
187 HUGETLB_PAGE_ORDER); 174 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
175 HUGETLB_PAGE_ORDER);
188 if (page) { 176 if (page) {
189 set_compound_page_dtor(page, free_huge_page); 177 set_compound_page_dtor(page, free_huge_page);
190 spin_lock(&hugetlb_lock); 178 spin_lock(&hugetlb_lock);
191 nr_huge_pages++; 179 nr_huge_pages++;
192 nr_huge_pages_node[page_to_nid(page)]++; 180 nr_huge_pages_node[nid]++;
193 spin_unlock(&hugetlb_lock); 181 spin_unlock(&hugetlb_lock);
194 put_page(page); /* free it into the hugepage allocator */ 182 put_page(page); /* free it into the hugepage allocator */
195 return 1;
196 } 183 }
197 return 0; 184
185 return page;
186}
187
188static int alloc_fresh_huge_page(void)
189{
190 struct page *page;
191 int start_nid;
192 int next_nid;
193 int ret = 0;
194
195 start_nid = hugetlb_next_nid;
196
197 do {
198 page = alloc_fresh_huge_page_node(hugetlb_next_nid);
199 if (page)
200 ret = 1;
201 /*
202 * Use a helper variable to find the next node and then
203 * copy it back to hugetlb_next_nid afterwards:
204 * otherwise there's a window in which a racer might
205 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
206 * But we don't need to use a spin_lock here: it really
207 * doesn't matter if occasionally a racer chooses the
208 * same nid as we do. Move nid forward in the mask even
209 * if we just successfully allocated a hugepage so that
210 * the next caller gets hugepages on the next node.
211 */
212 next_nid = next_node(hugetlb_next_nid, node_online_map);
213 if (next_nid == MAX_NUMNODES)
214 next_nid = first_node(node_online_map);
215 hugetlb_next_nid = next_nid;
216 } while (!page && hugetlb_next_nid != start_nid);
217
218 return ret;
198} 219}
199 220
200static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 221static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
@@ -365,6 +386,8 @@ static int __init hugetlb_init(void)
365 for (i = 0; i < MAX_NUMNODES; ++i) 386 for (i = 0; i < MAX_NUMNODES; ++i)
366 INIT_LIST_HEAD(&hugepage_freelists[i]); 387 INIT_LIST_HEAD(&hugepage_freelists[i]);
367 388
389 hugetlb_next_nid = first_node(node_online_map);
390
368 for (i = 0; i < max_huge_pages; ++i) { 391 for (i = 0; i < max_huge_pages; ++i) {
369 if (!alloc_fresh_huge_page()) 392 if (!alloc_fresh_huge_page())
370 break; 393 break;