diff options
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
| -rw-r--r-- | arch/powerpc/kernel/smp.c | 119 |
1 files changed, 116 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index f5ae9fa222ea..5337ca7bb649 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <asm/smp.h> | 41 | #include <asm/smp.h> |
| 42 | #include <asm/time.h> | 42 | #include <asm/time.h> |
| 43 | #include <asm/machdep.h> | 43 | #include <asm/machdep.h> |
| 44 | #include <asm/cputhreads.h> | ||
| 44 | #include <asm/cputable.h> | 45 | #include <asm/cputable.h> |
| 45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
| 46 | #include <asm/mpic.h> | 47 | #include <asm/mpic.h> |
| @@ -62,10 +63,12 @@ struct thread_info *secondary_ti; | |||
| 62 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | 63 | cpumask_t cpu_possible_map = CPU_MASK_NONE; |
| 63 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 64 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
| 64 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | 65 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
| 66 | DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; | ||
| 65 | 67 | ||
| 66 | EXPORT_SYMBOL(cpu_online_map); | 68 | EXPORT_SYMBOL(cpu_online_map); |
| 67 | EXPORT_SYMBOL(cpu_possible_map); | 69 | EXPORT_SYMBOL(cpu_possible_map); |
| 68 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 70 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
| 71 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | ||
| 69 | 72 | ||
| 70 | /* SMP operations for this machine */ | 73 | /* SMP operations for this machine */ |
| 71 | struct smp_ops_t *smp_ops; | 74 | struct smp_ops_t *smp_ops; |
| @@ -228,6 +231,8 @@ void __devinit smp_prepare_boot_cpu(void) | |||
| 228 | BUG_ON(smp_processor_id() != boot_cpuid); | 231 | BUG_ON(smp_processor_id() != boot_cpuid); |
| 229 | 232 | ||
| 230 | cpu_set(boot_cpuid, cpu_online_map); | 233 | cpu_set(boot_cpuid, cpu_online_map); |
| 234 | cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); | ||
| 235 | cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); | ||
| 231 | #ifdef CONFIG_PPC64 | 236 | #ifdef CONFIG_PPC64 |
| 232 | paca[boot_cpuid].__current = current; | 237 | paca[boot_cpuid].__current = current; |
| 233 | #endif | 238 | #endif |
| @@ -375,11 +380,60 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
| 375 | return 0; | 380 | return 0; |
| 376 | } | 381 | } |
| 377 | 382 | ||
| 383 | /* Return the value of the reg property corresponding to the given | ||
| 384 | * logical cpu. | ||
| 385 | */ | ||
| 386 | int cpu_to_core_id(int cpu) | ||
| 387 | { | ||
| 388 | struct device_node *np; | ||
| 389 | const int *reg; | ||
| 390 | int id = -1; | ||
| 391 | |||
| 392 | np = of_get_cpu_node(cpu, NULL); | ||
| 393 | if (!np) | ||
| 394 | goto out; | ||
| 395 | |||
| 396 | reg = of_get_property(np, "reg", NULL); | ||
| 397 | if (!reg) | ||
| 398 | goto out; | ||
| 399 | |||
| 400 | id = *reg; | ||
| 401 | out: | ||
| 402 | of_node_put(np); | ||
| 403 | return id; | ||
| 404 | } | ||
| 405 | |||
| 406 | /* Must be called when no change can occur to cpu_present_map, | ||
| 407 | * i.e. during cpu online or offline. | ||
| 408 | */ | ||
| 409 | static struct device_node *cpu_to_l2cache(int cpu) | ||
| 410 | { | ||
| 411 | struct device_node *np; | ||
| 412 | const phandle *php; | ||
| 413 | phandle ph; | ||
| 414 | |||
| 415 | if (!cpu_present(cpu)) | ||
| 416 | return NULL; | ||
| 417 | |||
| 418 | np = of_get_cpu_node(cpu, NULL); | ||
| 419 | if (np == NULL) | ||
| 420 | return NULL; | ||
| 421 | |||
| 422 | php = of_get_property(np, "l2-cache", NULL); | ||
| 423 | if (php == NULL) | ||
| 424 | return NULL; | ||
| 425 | ph = *php; | ||
| 426 | of_node_put(np); | ||
| 427 | |||
| 428 | return of_find_node_by_phandle(ph); | ||
| 429 | } | ||
| 378 | 430 | ||
| 379 | /* Activate a secondary processor. */ | 431 | /* Activate a secondary processor. */ |
| 380 | int __devinit start_secondary(void *unused) | 432 | int __devinit start_secondary(void *unused) |
| 381 | { | 433 | { |
| 382 | unsigned int cpu = smp_processor_id(); | 434 | unsigned int cpu = smp_processor_id(); |
| 435 | struct device_node *l2_cache; | ||
| 436 | int i, base; | ||
| 383 | 437 | ||
| 384 | atomic_inc(&init_mm.mm_count); | 438 | atomic_inc(&init_mm.mm_count); |
| 385 | current->active_mm = &init_mm; | 439 | current->active_mm = &init_mm; |
| @@ -400,6 +454,33 @@ int __devinit start_secondary(void *unused) | |||
| 400 | 454 | ||
| 401 | ipi_call_lock(); | 455 | ipi_call_lock(); |
| 402 | cpu_set(cpu, cpu_online_map); | 456 | cpu_set(cpu, cpu_online_map); |
| 457 | /* Update sibling maps */ | ||
| 458 | base = cpu_first_thread_in_core(cpu); | ||
| 459 | for (i = 0; i < threads_per_core; i++) { | ||
| 460 | if (cpu_is_offline(base + i)) | ||
| 461 | continue; | ||
| 462 | cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); | ||
| 463 | cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); | ||
| 464 | |||
| 465 | /* cpu_core_map should be a superset of | ||
| 466 | * cpu_sibling_map even if we don't have cache | ||
| 467 | * information, so update the former here, too. | ||
| 468 | */ | ||
| 469 | cpu_set(cpu, per_cpu(cpu_core_map, base +i)); | ||
| 470 | cpu_set(base + i, per_cpu(cpu_core_map, cpu)); | ||
| 471 | } | ||
| 472 | l2_cache = cpu_to_l2cache(cpu); | ||
| 473 | for_each_online_cpu(i) { | ||
| 474 | struct device_node *np = cpu_to_l2cache(i); | ||
| 475 | if (!np) | ||
| 476 | continue; | ||
| 477 | if (np == l2_cache) { | ||
| 478 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | ||
| 479 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | ||
| 480 | } | ||
| 481 | of_node_put(np); | ||
| 482 | } | ||
| 483 | of_node_put(l2_cache); | ||
| 403 | ipi_call_unlock(); | 484 | ipi_call_unlock(); |
| 404 | 485 | ||
| 405 | local_irq_enable(); | 486 | local_irq_enable(); |
| @@ -437,10 +518,42 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
| 437 | #ifdef CONFIG_HOTPLUG_CPU | 518 | #ifdef CONFIG_HOTPLUG_CPU |
| 438 | int __cpu_disable(void) | 519 | int __cpu_disable(void) |
| 439 | { | 520 | { |
| 440 | if (smp_ops->cpu_disable) | 521 | struct device_node *l2_cache; |
| 441 | return smp_ops->cpu_disable(); | 522 | int cpu = smp_processor_id(); |
| 523 | int base, i; | ||
| 524 | int err; | ||
| 442 | 525 | ||
| 443 | return -ENOSYS; | 526 | if (!smp_ops->cpu_disable) |
| 527 | return -ENOSYS; | ||
| 528 | |||
| 529 | err = smp_ops->cpu_disable(); | ||
| 530 | if (err) | ||
| 531 | return err; | ||
| 532 | |||
| 533 | /* Update sibling maps */ | ||
| 534 | base = cpu_first_thread_in_core(cpu); | ||
| 535 | for (i = 0; i < threads_per_core; i++) { | ||
| 536 | cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); | ||
| 537 | cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); | ||
| 538 | cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); | ||
| 539 | cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); | ||
| 540 | } | ||
| 541 | |||
| 542 | l2_cache = cpu_to_l2cache(cpu); | ||
| 543 | for_each_present_cpu(i) { | ||
| 544 | struct device_node *np = cpu_to_l2cache(i); | ||
| 545 | if (!np) | ||
| 546 | continue; | ||
| 547 | if (np == l2_cache) { | ||
| 548 | cpu_clear(cpu, per_cpu(cpu_core_map, i)); | ||
| 549 | cpu_clear(i, per_cpu(cpu_core_map, cpu)); | ||
| 550 | } | ||
| 551 | of_node_put(np); | ||
| 552 | } | ||
| 553 | of_node_put(l2_cache); | ||
| 554 | |||
| 555 | |||
| 556 | return 0; | ||
| 444 | } | 557 | } |
| 445 | 558 | ||
| 446 | void __cpu_die(unsigned int cpu) | 559 | void __cpu_die(unsigned int cpu) |
