aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@hutch.davemloft.net>2007-06-04 20:01:39 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-06-05 00:50:00 -0400
commitf78eae2e6f5d1eb05f76a45486286445b916bd92 (patch)
tree0fa81e104ad9891afcaf18cdcb413c4a0f2ee8da /arch/sparc64
parentd887ab3a9b1899f88b8cfba531e726b5fb2ebd14 (diff)
[SPARC64]: Proper multi-core scheduling support.
The scheduling domain hierarchy is: all cpus --> cpus that share an instruction cache --> cpus that share an integer execution unit Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig9
-rw-r--r--arch/sparc64/kernel/mdesc.c49
-rw-r--r--arch/sparc64/kernel/prom.c1
-rw-r--r--arch/sparc64/kernel/smp.c19
4 files changed, 77 insertions, 1 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index bd00f89eed1e..89a1b469b93d 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -396,6 +396,15 @@ config SCHED_SMT
396 when dealing with UltraSPARC cpus at a cost of slightly increased 396 when dealing with UltraSPARC cpus at a cost of slightly increased
397 overhead in some places. If unsure say N here. 397 overhead in some places. If unsure say N here.
398 398
399config SCHED_MC
400 bool "Multi-core scheduler support"
401 depends on SMP
402 default y
403 help
404 Multi-core scheduler support improves the CPU scheduler's decision
405 making when dealing with multi-core CPU chips at a cost of slightly
406 increased overhead in some places. If unsure say N here.
407
399source "kernel/Kconfig.preempt" 408source "kernel/Kconfig.preempt"
400 409
401config CMDLINE_BOOL 410config CMDLINE_BOOL
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
index 9246c2cf9574..1b5db4bc6b34 100644
--- a/arch/sparc64/kernel/mdesc.c
+++ b/arch/sparc64/kernel/mdesc.c
@@ -473,6 +473,53 @@ static void __init set_core_ids(void)
473 } 473 }
474} 474}
475 475
476static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
477{
478 int i;
479
480 for (i = 0; i < mp->num_arcs; i++) {
481 struct mdesc_node *t = mp->arcs[i].arc;
482 const u64 *id;
483
484 if (strcmp(mp->arcs[i].name, "back"))
485 continue;
486
487 if (strcmp(t->name, "cpu"))
488 continue;
489
490 id = md_get_property(t, "id", NULL);
491 if (*id < NR_CPUS)
492 cpu_data(*id).proc_id = proc_id;
493 }
494}
495
496static void __init __set_proc_ids(const char *exec_unit_name)
497{
498 struct mdesc_node *mp;
499 int idx;
500
501 idx = 0;
502 md_for_each_node_by_name(mp, exec_unit_name) {
503 const char *type;
504 int len;
505
506 type = md_get_property(mp, "type", &len);
507 if (!find_in_proplist(type, "int", len) &&
508 !find_in_proplist(type, "integer", len))
509 continue;
510
511 mark_proc_ids(mp, idx);
512
513 idx++;
514 }
515}
516
517static void __init set_proc_ids(void)
518{
519 __set_proc_ids("exec_unit");
520 __set_proc_ids("exec-unit");
521}
522
476static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) 523static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
477{ 524{
478 u64 val; 525 u64 val;
@@ -574,9 +621,11 @@ static void __init mdesc_fill_in_cpu_data(void)
574#endif 621#endif
575 622
576 c->core_id = 0; 623 c->core_id = 0;
624 c->proc_id = -1;
577 } 625 }
578 626
579 set_core_ids(); 627 set_core_ids();
628 set_proc_ids();
580 629
581 smp_fill_in_sib_core_maps(); 630 smp_fill_in_sib_core_maps();
582} 631}
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index dad4b3ba705f..928aba3d0db3 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -1800,6 +1800,7 @@ static void __init of_fill_in_cpu_data(void)
1800 1800
1801 cpu_data(cpuid).core_id = 0; 1801 cpu_data(cpuid).core_id = 0;
1802 } 1802 }
1803 cpu_data(cpuid).proc_id = -1;
1803 1804
1804#ifdef CONFIG_SMP 1805#ifdef CONFIG_SMP
1805 cpu_set(cpuid, cpu_present_map); 1806 cpu_set(cpuid, cpu_present_map);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c550bba3490a..68a45ac93375 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -51,6 +51,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; 51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
52cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = 52cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
53 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 53 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
54cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
55 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
54static cpumask_t smp_commenced_mask; 56static cpumask_t smp_commenced_mask;
55static cpumask_t cpu_callout_map; 57static cpumask_t cpu_callout_map;
56 58
@@ -1217,13 +1219,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
1217 unsigned int j; 1219 unsigned int j;
1218 1220
1219 if (cpu_data(i).core_id == 0) { 1221 if (cpu_data(i).core_id == 0) {
1220 cpu_set(i, cpu_sibling_map[i]); 1222 cpu_set(i, cpu_core_map[i]);
1221 continue; 1223 continue;
1222 } 1224 }
1223 1225
1224 for_each_possible_cpu(j) { 1226 for_each_possible_cpu(j) {
1225 if (cpu_data(i).core_id == 1227 if (cpu_data(i).core_id ==
1226 cpu_data(j).core_id) 1228 cpu_data(j).core_id)
1229 cpu_set(j, cpu_core_map[i]);
1230 }
1231 }
1232
1233 for_each_possible_cpu(i) {
1234 unsigned int j;
1235
1236 if (cpu_data(i).proc_id == -1) {
1237 cpu_set(i, cpu_sibling_map[i]);
1238 continue;
1239 }
1240
1241 for_each_possible_cpu(j) {
1242 if (cpu_data(i).proc_id ==
1243 cpu_data(j).proc_id)
1227 cpu_set(j, cpu_sibling_map[i]); 1244 cpu_set(j, cpu_sibling_map[i]);
1228 } 1245 }
1229 } 1246 }