aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/smpboot.c
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2006-03-27 04:15:22 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-27 11:44:43 -0500
commit1e9f28fa1eb9773bf65bae08288c6a0a38eef4a7 (patch)
treeccfa4927ebc7a8f663f9ac9e7789a713a33253ff /arch/i386/kernel/smpboot.c
parent77e4bfbcf071f795b54862455dce8902b3fc29c2 (diff)
[PATCH] sched: new sched domain for representing multi-core
Add a new sched domain for representing multi-core with shared caches between cores. Consider a dual package system, each package containing two cores and with last level cache shared between cores with in a package. If there are two runnable processes, with this appended patch those two processes will be scheduled on different packages. On such systems, with this patch we have observed 8% perf improvement with specJBB(2 warehouse) benchmark and 35% improvement with CFP2000 rate(with 2 users). This new domain will come into play only on multi-core systems with shared caches. On other systems, this sched domain will be removed by domain degeneration code. This new domain can be also used for implementing power savings policy (see OLS 2005 CMP kernel scheduler paper for more details.. I will post another patch for power savings policy soon) Most of the arch/* file changes are for cpu_coregroup_map() implementation. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/smpboot.c')
-rw-r--r--arch/i386/kernel/smpboot.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 82371d83bfa9..a6969903f2d6 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -72,6 +72,9 @@ int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
72/* Core ID of each logical CPU */ 72/* Core ID of each logical CPU */
73int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; 73int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
74 74
75/* Last level cache ID of each logical CPU */
76int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
77
75/* representing HT siblings of each logical CPU */ 78/* representing HT siblings of each logical CPU */
76cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 79cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
77EXPORT_SYMBOL(cpu_sibling_map); 80EXPORT_SYMBOL(cpu_sibling_map);
@@ -440,6 +443,18 @@ static void __devinit smp_callin(void)
440 443
441static int cpucount; 444static int cpucount;
442 445
446/* maps the cpu to the sched domain representing multi-core */
447cpumask_t cpu_coregroup_map(int cpu)
448{
449 struct cpuinfo_x86 *c = cpu_data + cpu;
450 /*
451 * For perf, we return last level cache shared map.
452 * TBD: when power saving sched policy is added, we will return
453 * cpu_core_map when power saving policy is enabled
454 */
455 return c->llc_shared_map;
456}
457
443/* representing cpus for which sibling maps can be computed */ 458/* representing cpus for which sibling maps can be computed */
444static cpumask_t cpu_sibling_setup_map; 459static cpumask_t cpu_sibling_setup_map;
445 460
@@ -459,12 +474,16 @@ set_cpu_sibling_map(int cpu)
459 cpu_set(cpu, cpu_sibling_map[i]); 474 cpu_set(cpu, cpu_sibling_map[i]);
460 cpu_set(i, cpu_core_map[cpu]); 475 cpu_set(i, cpu_core_map[cpu]);
461 cpu_set(cpu, cpu_core_map[i]); 476 cpu_set(cpu, cpu_core_map[i]);
477 cpu_set(i, c[cpu].llc_shared_map);
478 cpu_set(cpu, c[i].llc_shared_map);
462 } 479 }
463 } 480 }
464 } else { 481 } else {
465 cpu_set(cpu, cpu_sibling_map[cpu]); 482 cpu_set(cpu, cpu_sibling_map[cpu]);
466 } 483 }
467 484
485 cpu_set(cpu, c[cpu].llc_shared_map);
486
468 if (current_cpu_data.x86_max_cores == 1) { 487 if (current_cpu_data.x86_max_cores == 1) {
469 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 488 cpu_core_map[cpu] = cpu_sibling_map[cpu];
470 c[cpu].booted_cores = 1; 489 c[cpu].booted_cores = 1;
@@ -472,6 +491,11 @@ set_cpu_sibling_map(int cpu)
472 } 491 }
473 492
474 for_each_cpu_mask(i, cpu_sibling_setup_map) { 493 for_each_cpu_mask(i, cpu_sibling_setup_map) {
494 if (cpu_llc_id[cpu] != BAD_APICID &&
495 cpu_llc_id[cpu] == cpu_llc_id[i]) {
496 cpu_set(i, c[cpu].llc_shared_map);
497 cpu_set(cpu, c[i].llc_shared_map);
498 }
475 if (phys_proc_id[cpu] == phys_proc_id[i]) { 499 if (phys_proc_id[cpu] == phys_proc_id[i]) {
476 cpu_set(i, cpu_core_map[cpu]); 500 cpu_set(i, cpu_core_map[cpu]);
477 cpu_set(cpu, cpu_core_map[i]); 501 cpu_set(cpu, cpu_core_map[i]);