aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2011-05-02 11:24:49 -0400
committerTejun Heo <tj@kernel.org>2011-05-02 11:24:49 -0400
commite5a10c1bd12a5d71bbb6406c1b0dbbc9d8958397 (patch)
tree1b8ee2a5cddd890e2058f167155f396ae9d69f40 /arch/x86/mm
parenta56bca80db8903bb557b9ac38da68dc5b98ea672 (diff)
x86, NUMA: Trim numa meminfo with max_pfn in a separate loop
During testing 32bit numa unifying code from tj, found one system with more than 64g fails to use numa. It turns out we do not trim numa meminfo correctly against max_pfn in case start address of a node is higher than 64GiB. Bug fix made it to tip tree. This patch moves the checking and trimming to a separate loop. So we don't need to compare low/high in following merge loops. It makes the code more readable. Also it makes the node merge printouts less strange. On a 512GiB numa system with 32bit, before: > NUMA: Node 0 [0,a0000) + [100000,80000000) -> [0,80000000) > NUMA: Node 0 [0,80000000) + [100000000,1080000000) -> [0,1000000000) after: > NUMA: Node 0 [0,a0000) + [100000,80000000) -> [0,80000000) > NUMA: Node 0 [0,80000000) + [100000000,1000000000) -> [0,1000000000) Signed-off-by: Yinghai Lu <yinghai@kernel.org> [Updated patch description and comment slightly.] Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/numa.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 9a0ed312b830..f5510d889a22 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -270,6 +270,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
270 const u64 high = PFN_PHYS(max_pfn); 270 const u64 high = PFN_PHYS(max_pfn);
271 int i, j, k; 271 int i, j, k;
272 272
273 /* first, trim all entries */
273 for (i = 0; i < mi->nr_blks; i++) { 274 for (i = 0; i < mi->nr_blks; i++) {
274 struct numa_memblk *bi = &mi->blk[i]; 275 struct numa_memblk *bi = &mi->blk[i];
275 276
@@ -278,10 +279,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
278 bi->end = min(bi->end, high); 279 bi->end = min(bi->end, high);
279 280
280 /* and there's no empty block */ 281 /* and there's no empty block */
281 if (bi->start >= bi->end) { 282 if (bi->start >= bi->end)
282 numa_remove_memblk_from(i--, mi); 283 numa_remove_memblk_from(i--, mi);
283 continue; 284 }
284 } 285
286 /* merge neighboring / overlapping entries */
287 for (i = 0; i < mi->nr_blks; i++) {
288 struct numa_memblk *bi = &mi->blk[i];
285 289
286 for (j = i + 1; j < mi->nr_blks; j++) { 290 for (j = i + 1; j < mi->nr_blks; j++) {
287 struct numa_memblk *bj = &mi->blk[j]; 291 struct numa_memblk *bj = &mi->blk[j];
@@ -311,8 +315,8 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
311 */ 315 */
312 if (bi->nid != bj->nid) 316 if (bi->nid != bj->nid)
313 continue; 317 continue;
314 start = max(min(bi->start, bj->start), low); 318 start = min(bi->start, bj->start);
315 end = min(max(bi->end, bj->end), high); 319 end = max(bi->end, bj->end);
316 for (k = 0; k < mi->nr_blks; k++) { 320 for (k = 0; k < mi->nr_blks; k++) {
317 struct numa_memblk *bk = &mi->blk[k]; 321 struct numa_memblk *bk = &mi->blk[k];
318 322
@@ -332,6 +336,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
332 } 336 }
333 } 337 }
334 338
339 /* clear unused ones */
335 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { 340 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
336 mi->blk[i].start = mi->blk[i].end = 0; 341 mi->blk[i].start = mi->blk[i].end = 0;
337 mi->blk[i].nid = NUMA_NO_NODE; 342 mi->blk[i].nid = NUMA_NO_NODE;