diff options
author | Andi Kleen <ak@suse.de> | 2008-07-24 00:27:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:17 -0400 |
commit | 5ced66c901f1cf0b684feb15c2cd8b126e263d07 (patch) | |
tree | d5faa514fa226a2fcba97aef1673c404369c1ad2 /mm/hugetlb.c | |
parent | a3437870160cf2caaac6bdd76c7377a5a4145a8c (diff) |
hugetlb: abstract numa round robin selection
Need this as a separate function for a future patch.
No behaviour change.
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: Nishanth Aravamudan <nacc@us.ibm.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 37 |
1 files changed, 22 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bb49ce5d0067..5e620e25cf08 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -565,6 +565,27 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) | |||
565 | return page; | 565 | return page; |
566 | } | 566 | } |
567 | 567 | ||
568 | /* | ||
569 | * Use a helper variable to find the next node and then | ||
570 | * copy it back to hugetlb_next_nid afterwards: | ||
571 | * otherwise there's a window in which a racer might | ||
572 | * pass invalid nid MAX_NUMNODES to alloc_pages_node. | ||
573 | * But we don't need to use a spin_lock here: it really | ||
574 | * doesn't matter if occasionally a racer chooses the | ||
575 | * same nid as we do. Move nid forward in the mask even | ||
576 | * if we just successfully allocated a hugepage so that | ||
577 | * the next caller gets hugepages on the next node. | ||
578 | */ | ||
579 | static int hstate_next_node(struct hstate *h) | ||
580 | { | ||
581 | int next_nid; | ||
582 | next_nid = next_node(h->hugetlb_next_nid, node_online_map); | ||
583 | if (next_nid == MAX_NUMNODES) | ||
584 | next_nid = first_node(node_online_map); | ||
585 | h->hugetlb_next_nid = next_nid; | ||
586 | return next_nid; | ||
587 | } | ||
588 | |||
568 | static int alloc_fresh_huge_page(struct hstate *h) | 589 | static int alloc_fresh_huge_page(struct hstate *h) |
569 | { | 590 | { |
570 | struct page *page; | 591 | struct page *page; |
@@ -578,21 +599,7 @@ static int alloc_fresh_huge_page(struct hstate *h) | |||
578 | page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); | 599 | page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); |
579 | if (page) | 600 | if (page) |
580 | ret = 1; | 601 | ret = 1; |
581 | /* | 602 | next_nid = hstate_next_node(h); |
582 | * Use a helper variable to find the next node and then | ||
583 | * copy it back to hugetlb_next_nid afterwards: | ||
584 | * otherwise there's a window in which a racer might | ||
585 | * pass invalid nid MAX_NUMNODES to alloc_pages_node. | ||
586 | * But we don't need to use a spin_lock here: it really | ||
587 | * doesn't matter if occasionally a racer chooses the | ||
588 | * same nid as we do. Move nid forward in the mask even | ||
589 | * if we just successfully allocated a hugepage so that | ||
590 | * the next caller gets hugepages on the next node. | ||
591 | */ | ||
592 | next_nid = next_node(h->hugetlb_next_nid, node_online_map); | ||
593 | if (next_nid == MAX_NUMNODES) | ||
594 | next_nid = first_node(node_online_map); | ||
595 | h->hugetlb_next_nid = next_nid; | ||
596 | } while (!page && h->hugetlb_next_nid != start_nid); | 603 | } while (!page && h->hugetlb_next_nid != start_nid); |
597 | 604 | ||
598 | if (ret) | 605 | if (ret) |