aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2007-07-19 04:49:11 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:50 -0400
commit7ed5cb2b73d0c4165c0504c95454fade0c0bf3d9 (patch)
tree37b15b57986466531c5a0debb6b52660ca47e86a
parente3aded3cc289113c7bc729ef4cb75e56d9aa71be (diff)
Remove nid_lock from alloc_fresh_huge_page
The fix to that race in alloc_fresh_huge_page() which could give an illegal node ID did not need nid_lock at all: the fix was to replace static int nid by static int prev_nid and do the work on local int nid. nid_lock did make sure that racers strictly roundrobin the nodes, but that's not something we need to enforce strictly. Kill nid_lock. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 15fc7b000772..2d7611cf276a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -104,15 +104,19 @@ static int alloc_fresh_huge_page(void)
104{ 104{
105 static int prev_nid; 105 static int prev_nid;
106 struct page *page; 106 struct page *page;
107 static DEFINE_SPINLOCK(nid_lock);
108 int nid; 107 int nid;
109 108
110 spin_lock(&nid_lock); 109 /*
110 * Copy static prev_nid to local nid, work on that, then copy it
111 * back to prev_nid afterwards: otherwise there's a window in which
112 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
113 * But we don't need to use a spin_lock here: it really doesn't
114 * matter if occasionally a racer chooses the same nid as we do.
115 */
111 nid = next_node(prev_nid, node_online_map); 116 nid = next_node(prev_nid, node_online_map);
112 if (nid == MAX_NUMNODES) 117 if (nid == MAX_NUMNODES)
113 nid = first_node(node_online_map); 118 nid = first_node(node_online_map);
114 prev_nid = nid; 119 prev_nid = nid;
115 spin_unlock(&nid_lock);
116 120
117 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, 121 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
118 HUGETLB_PAGE_ORDER); 122 HUGETLB_PAGE_ORDER);