diff options
-rw-r--r-- | mm/hugetlb.c | 37 |
1 files changed, 22 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bb49ce5d0067..5e620e25cf08 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -565,6 +565,27 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) | |||
565 | return page; | 565 | return page; |
566 | } | 566 | } |
567 | 567 | ||
568 | /* | ||
569 | * Use a helper variable to find the next node and then | ||
570 | * copy it back to hugetlb_next_nid afterwards: | ||
571 | * otherwise there's a window in which a racer might | ||
572 | * pass invalid nid MAX_NUMNODES to alloc_pages_node. | ||
573 | * But we don't need to use a spin_lock here: it really | ||
574 | * doesn't matter if occasionally a racer chooses the | ||
575 | * same nid as we do. Move nid forward in the mask even | ||
576 | * if we just successfully allocated a hugepage so that | ||
577 | * the next caller gets hugepages on the next node. | ||
578 | */ | ||
579 | static int hstate_next_node(struct hstate *h) | ||
580 | { | ||
581 | int next_nid; | ||
582 | next_nid = next_node(h->hugetlb_next_nid, node_online_map); | ||
583 | if (next_nid == MAX_NUMNODES) | ||
584 | next_nid = first_node(node_online_map); | ||
585 | h->hugetlb_next_nid = next_nid; | ||
586 | return next_nid; | ||
587 | } | ||
588 | |||
568 | static int alloc_fresh_huge_page(struct hstate *h) | 589 | static int alloc_fresh_huge_page(struct hstate *h) |
569 | { | 590 | { |
570 | struct page *page; | 591 | struct page *page; |
@@ -578,21 +599,7 @@ static int alloc_fresh_huge_page(struct hstate *h) | |||
578 | page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); | 599 | page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); |
579 | if (page) | 600 | if (page) |
580 | ret = 1; | 601 | ret = 1; |
581 | /* | 602 | next_nid = hstate_next_node(h); |
582 | * Use a helper variable to find the next node and then | ||
583 | * copy it back to hugetlb_next_nid afterwards: | ||
584 | * otherwise there's a window in which a racer might | ||
585 | * pass invalid nid MAX_NUMNODES to alloc_pages_node. | ||
586 | * But we don't need to use a spin_lock here: it really | ||
587 | * doesn't matter if occasionally a racer chooses the | ||
588 | * same nid as we do. Move nid forward in the mask even | ||
589 | * if we just successfully allocated a hugepage so that | ||
590 | * the next caller gets hugepages on the next node. | ||
591 | */ | ||
592 | next_nid = next_node(h->hugetlb_next_nid, node_online_map); | ||
593 | if (next_nid == MAX_NUMNODES) | ||
594 | next_nid = first_node(node_online_map); | ||
595 | h->hugetlb_next_nid = next_nid; | ||
596 | } while (!page && h->hugetlb_next_nid != start_nid); | 603 | } while (!page && h->hugetlb_next_nid != start_nid); |
597 | 604 | ||
598 | if (ret) | 605 | if (ret) |