aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@novell.com>2010-04-21 11:13:20 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-04-28 20:14:11 -0400
commit2e61878698781d6a9a8bfbaa4ea9c5ddb5a178c3 (patch)
treeb4f6010e356eae8020f3133a9277ca0cdc8d6b4a /arch/x86/mm
parent1d16b0f2f3edf05f12a9e3960588e0d4854157bb (diff)
x86-64: Combine SRAT regions when possible
... i.e. when the hole between two regions isn't occupied by memory on another node. This reduces the memory->node table size, thus reducing cache footprint of lookups, which got increased significantly some time ago, and things go back to how they were before that change on the systems I looked at. Signed-off-by: Jan Beulich <jbeulich@novell.com> LKML-Reference: <4BCF3230020000780003B3CA@vpn.id2.novell.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/srat_64.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 28c68762648f..3ebe6519bd87 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -363,6 +363,54 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
363 for (i = 0; i < MAX_NUMNODES; i++) 363 for (i = 0; i < MAX_NUMNODES; i++)
364 cutoff_node(i, start, end); 364 cutoff_node(i, start, end);
365 365
366 /*
367 * Join together blocks on the same node, holes between
368 * which don't overlap with memory on other nodes.
369 */
370 for (i = 0; i < num_node_memblks; ++i) {
371 int j, k;
372
373 for (j = i + 1; j < num_node_memblks; ++j) {
374 unsigned long start, end;
375
376 if (memblk_nodeid[i] != memblk_nodeid[j])
377 continue;
378 start = min(node_memblk_range[i].end,
379 node_memblk_range[j].end);
380 end = max(node_memblk_range[i].start,
381 node_memblk_range[j].start);
382 for (k = 0; k < num_node_memblks; ++k) {
383 if (memblk_nodeid[i] == memblk_nodeid[k])
384 continue;
385 if (start < node_memblk_range[k].end &&
386 end > node_memblk_range[k].start)
387 break;
388 }
389 if (k < num_node_memblks)
390 continue;
391 start = min(node_memblk_range[i].start,
392 node_memblk_range[j].start);
393 end = max(node_memblk_range[i].end,
394 node_memblk_range[j].end);
395 printk(KERN_INFO "SRAT: Node %d "
396 "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
397 memblk_nodeid[i],
398 node_memblk_range[i].start,
399 node_memblk_range[i].end,
400 node_memblk_range[j].start,
401 node_memblk_range[j].end,
402 start, end);
403 node_memblk_range[i].start = start;
404 node_memblk_range[i].end = end;
405 k = --num_node_memblks - j;
406 memmove(memblk_nodeid + j, memblk_nodeid + j+1,
407 k * sizeof(*memblk_nodeid));
408 memmove(node_memblk_range + j, node_memblk_range + j+1,
409 k * sizeof(*node_memblk_range));
410 --j;
411 }
412 }
413
366 memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, 414 memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
367 memblk_nodeid); 415 memblk_nodeid);
368 if (memnode_shift < 0) { 416 if (memnode_shift < 0) {