diff options
author | Nathan Lynch <ntl@pobox.com> | 2008-07-27 01:24:53 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-28 02:30:51 -0400 |
commit | 440a0857e32a05979fb01fc59ea454a723e80e4b (patch) | |
tree | 9776655cd83b18cdd9cc8f08b836a6811f77ced0 /arch/powerpc/kernel/smp.c | |
parent | 0764bf63da5466474eebf7d21994cf6b106265a3 (diff) |
powerpc: Make core sibling information available to userspace
Implement the notion of "core siblings" for powerpc. This makes
/sys/devices/system/cpu/cpu*/topology/core_siblings present sensible
values, indicating online CPUs which share an L2 cache.
BenH: Made cpu_to_l2cache() use of_find_node_by_phandle() instead
of IBM-specific open coded search
Signed-off-by: Nathan Lynch <ntl@pobox.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r-- | arch/powerpc/kernel/smp.c | 64 |
1 files changed, 64 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 3c4d07e5e06a..f7a2f81b5b7d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -63,10 +63,12 @@ struct thread_info *secondary_ti; | |||
63 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | 63 | cpumask_t cpu_possible_map = CPU_MASK_NONE; |
64 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 64 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
65 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | 65 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
66 | DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; | ||
66 | 67 | ||
67 | EXPORT_SYMBOL(cpu_online_map); | 68 | EXPORT_SYMBOL(cpu_online_map); |
68 | EXPORT_SYMBOL(cpu_possible_map); | 69 | EXPORT_SYMBOL(cpu_possible_map); |
69 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 70 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
71 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | ||
70 | 72 | ||
71 | /* SMP operations for this machine */ | 73 | /* SMP operations for this machine */ |
72 | struct smp_ops_t *smp_ops; | 74 | struct smp_ops_t *smp_ops; |
@@ -230,6 +232,7 @@ void __devinit smp_prepare_boot_cpu(void) | |||
230 | 232 | ||
231 | cpu_set(boot_cpuid, cpu_online_map); | 233 | cpu_set(boot_cpuid, cpu_online_map); |
232 | cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); | 234 | cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); |
235 | cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); | ||
233 | #ifdef CONFIG_PPC64 | 236 | #ifdef CONFIG_PPC64 |
234 | paca[boot_cpuid].__current = current; | 237 | paca[boot_cpuid].__current = current; |
235 | #endif | 238 | #endif |
@@ -377,11 +380,36 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
377 | return 0; | 380 | return 0; |
378 | } | 381 | } |
379 | 382 | ||
383 | /* Must be called when no change can occur to cpu_present_map, | ||
384 | * i.e. during cpu online or offline. | ||
385 | */ | ||
386 | static struct device_node *cpu_to_l2cache(int cpu) | ||
387 | { | ||
388 | struct device_node *np; | ||
389 | const phandle *php; | ||
390 | phandle ph; | ||
391 | |||
392 | if (!cpu_present(cpu)) | ||
393 | return NULL; | ||
394 | |||
395 | np = of_get_cpu_node(cpu, NULL); | ||
396 | if (np == NULL) | ||
397 | return NULL; | ||
398 | |||
399 | php = of_get_property(np, "l2-cache", NULL); | ||
400 | if (php == NULL) | ||
401 | return NULL; | ||
402 | ph = *php; | ||
403 | of_node_put(np); | ||
404 | |||
405 | return of_find_node_by_phandle(ph); | ||
406 | } | ||
380 | 407 | ||
381 | /* Activate a secondary processor. */ | 408 | /* Activate a secondary processor. */ |
382 | int __devinit start_secondary(void *unused) | 409 | int __devinit start_secondary(void *unused) |
383 | { | 410 | { |
384 | unsigned int cpu = smp_processor_id(); | 411 | unsigned int cpu = smp_processor_id(); |
412 | struct device_node *l2_cache; | ||
385 | int i, base; | 413 | int i, base; |
386 | 414 | ||
387 | atomic_inc(&init_mm.mm_count); | 415 | atomic_inc(&init_mm.mm_count); |
@@ -410,7 +438,26 @@ int __devinit start_secondary(void *unused) | |||
410 | continue; | 438 | continue; |
411 | cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); | 439 | cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); |
412 | cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); | 440 | cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); |
441 | |||
442 | /* cpu_core_map should be a superset of | ||
443 | * cpu_sibling_map even if we don't have cache | ||
444 | * information, so update the former here, too. | ||
445 | */ | ||
446 | cpu_set(cpu, per_cpu(cpu_core_map, base +i)); | ||
447 | cpu_set(base + i, per_cpu(cpu_core_map, cpu)); | ||
413 | } | 448 | } |
449 | l2_cache = cpu_to_l2cache(cpu); | ||
450 | for_each_online_cpu(i) { | ||
451 | struct device_node *np = cpu_to_l2cache(i); | ||
452 | if (!np) | ||
453 | continue; | ||
454 | if (np == l2_cache) { | ||
455 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | ||
456 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | ||
457 | } | ||
458 | of_node_put(np); | ||
459 | } | ||
460 | of_node_put(l2_cache); | ||
414 | ipi_call_unlock(); | 461 | ipi_call_unlock(); |
415 | 462 | ||
416 | local_irq_enable(); | 463 | local_irq_enable(); |
@@ -448,6 +495,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
448 | #ifdef CONFIG_HOTPLUG_CPU | 495 | #ifdef CONFIG_HOTPLUG_CPU |
449 | int __cpu_disable(void) | 496 | int __cpu_disable(void) |
450 | { | 497 | { |
498 | struct device_node *l2_cache; | ||
451 | int cpu = smp_processor_id(); | 499 | int cpu = smp_processor_id(); |
452 | int base, i; | 500 | int base, i; |
453 | int err; | 501 | int err; |
@@ -464,7 +512,23 @@ int __cpu_disable(void) | |||
464 | for (i = 0; i < threads_per_core; i++) { | 512 | for (i = 0; i < threads_per_core; i++) { |
465 | cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); | 513 | cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); |
466 | cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); | 514 | cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); |
515 | cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); | ||
516 | cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); | ||
517 | } | ||
518 | |||
519 | l2_cache = cpu_to_l2cache(cpu); | ||
520 | for_each_present_cpu(i) { | ||
521 | struct device_node *np = cpu_to_l2cache(i); | ||
522 | if (!np) | ||
523 | continue; | ||
524 | if (np == l2_cache) { | ||
525 | cpu_clear(cpu, per_cpu(cpu_core_map, i)); | ||
526 | cpu_clear(i, per_cpu(cpu_core_map, cpu)); | ||
527 | } | ||
528 | of_node_put(np); | ||
467 | } | 529 | } |
530 | of_node_put(l2_cache); | ||
531 | |||
468 | 532 | ||
469 | return 0; | 533 | return 0; |
470 | } | 534 | } |