diff options
| -rw-r--r-- | arch/powerpc/kernel/setup-common.c | 24 | ||||
| -rw-r--r-- | arch/powerpc/kernel/setup_64.c | 3 | ||||
| -rw-r--r-- | arch/powerpc/kernel/smp.c | 32 |
3 files changed, 29 insertions, 30 deletions
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 61a3f4132087..9cc5a52711e5 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
| @@ -367,7 +367,6 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
| 367 | * setup_cpu_maps - initialize the following cpu maps: | 367 | * setup_cpu_maps - initialize the following cpu maps: |
| 368 | * cpu_possible_map | 368 | * cpu_possible_map |
| 369 | * cpu_present_map | 369 | * cpu_present_map |
| 370 | * cpu_sibling_map | ||
| 371 | * | 370 | * |
| 372 | * Having the possible map set up early allows us to restrict allocations | 371 | * Having the possible map set up early allows us to restrict allocations |
| 373 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. | 372 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. |
| @@ -475,29 +474,6 @@ void __init smp_setup_cpu_maps(void) | |||
| 475 | */ | 474 | */ |
| 476 | cpu_init_thread_core_maps(nthreads); | 475 | cpu_init_thread_core_maps(nthreads); |
| 477 | } | 476 | } |
| 478 | |||
| 479 | /* | ||
| 480 | * Being that cpu_sibling_map is now a per_cpu array, then it cannot | ||
| 481 | * be initialized until the per_cpu areas have been created. This | ||
| 482 | * function is now called from setup_per_cpu_areas(). | ||
| 483 | */ | ||
| 484 | void __init smp_setup_cpu_sibling_map(void) | ||
| 485 | { | ||
| 486 | #ifdef CONFIG_PPC64 | ||
| 487 | int i, cpu, base; | ||
| 488 | |||
| 489 | for_each_possible_cpu(cpu) { | ||
| 490 | DBG("Sibling map for CPU %d:", cpu); | ||
| 491 | base = cpu_first_thread_in_core(cpu); | ||
| 492 | for (i = 0; i < threads_per_core; i++) { | ||
| 493 | cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); | ||
| 494 | DBG(" %d", base + i); | ||
| 495 | } | ||
| 496 | DBG("\n"); | ||
| 497 | } | ||
| 498 | |||
| 499 | #endif /* CONFIG_PPC64 */ | ||
| 500 | } | ||
| 501 | #endif /* CONFIG_SMP */ | 477 | #endif /* CONFIG_SMP */ |
| 502 | 478 | ||
| 503 | #ifdef CONFIG_PCSPKR_PLATFORM | 479 | #ifdef CONFIG_PCSPKR_PLATFORM |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 04d8de9f0fc6..8b25f51f03bf 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -611,9 +611,6 @@ void __init setup_per_cpu_areas(void) | |||
| 611 | paca[i].data_offset = ptr - __per_cpu_start; | 611 | paca[i].data_offset = ptr - __per_cpu_start; |
| 612 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 612 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
| 613 | } | 613 | } |
| 614 | |||
| 615 | /* Now that per_cpu is setup, initialize cpu_sibling_map */ | ||
| 616 | smp_setup_cpu_sibling_map(); | ||
| 617 | } | 614 | } |
| 618 | #endif | 615 | #endif |
| 619 | 616 | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index f5ae9fa222ea..3c4d07e5e06a 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <asm/smp.h> | 41 | #include <asm/smp.h> |
| 42 | #include <asm/time.h> | 42 | #include <asm/time.h> |
| 43 | #include <asm/machdep.h> | 43 | #include <asm/machdep.h> |
| 44 | #include <asm/cputhreads.h> | ||
| 44 | #include <asm/cputable.h> | 45 | #include <asm/cputable.h> |
| 45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
| 46 | #include <asm/mpic.h> | 47 | #include <asm/mpic.h> |
| @@ -228,6 +229,7 @@ void __devinit smp_prepare_boot_cpu(void) | |||
| 228 | BUG_ON(smp_processor_id() != boot_cpuid); | 229 | BUG_ON(smp_processor_id() != boot_cpuid); |
| 229 | 230 | ||
| 230 | cpu_set(boot_cpuid, cpu_online_map); | 231 | cpu_set(boot_cpuid, cpu_online_map); |
| 232 | cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); | ||
| 231 | #ifdef CONFIG_PPC64 | 233 | #ifdef CONFIG_PPC64 |
| 232 | paca[boot_cpuid].__current = current; | 234 | paca[boot_cpuid].__current = current; |
| 233 | #endif | 235 | #endif |
| @@ -380,6 +382,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
| 380 | int __devinit start_secondary(void *unused) | 382 | int __devinit start_secondary(void *unused) |
| 381 | { | 383 | { |
| 382 | unsigned int cpu = smp_processor_id(); | 384 | unsigned int cpu = smp_processor_id(); |
| 385 | int i, base; | ||
| 383 | 386 | ||
| 384 | atomic_inc(&init_mm.mm_count); | 387 | atomic_inc(&init_mm.mm_count); |
| 385 | current->active_mm = &init_mm; | 388 | current->active_mm = &init_mm; |
| @@ -400,6 +403,14 @@ int __devinit start_secondary(void *unused) | |||
| 400 | 403 | ||
| 401 | ipi_call_lock(); | 404 | ipi_call_lock(); |
| 402 | cpu_set(cpu, cpu_online_map); | 405 | cpu_set(cpu, cpu_online_map); |
| 406 | /* Update sibling maps */ | ||
| 407 | base = cpu_first_thread_in_core(cpu); | ||
| 408 | for (i = 0; i < threads_per_core; i++) { | ||
| 409 | if (cpu_is_offline(base + i)) | ||
| 410 | continue; | ||
| 411 | cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); | ||
| 412 | cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); | ||
| 413 | } | ||
| 403 | ipi_call_unlock(); | 414 | ipi_call_unlock(); |
| 404 | 415 | ||
| 405 | local_irq_enable(); | 416 | local_irq_enable(); |
| @@ -437,10 +448,25 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
| 437 | #ifdef CONFIG_HOTPLUG_CPU | 448 | #ifdef CONFIG_HOTPLUG_CPU |
| 438 | int __cpu_disable(void) | 449 | int __cpu_disable(void) |
| 439 | { | 450 | { |
| 440 | if (smp_ops->cpu_disable) | 451 | int cpu = smp_processor_id(); |
| 441 | return smp_ops->cpu_disable(); | 452 | int base, i; |
| 453 | int err; | ||
| 442 | 454 | ||
| 443 | return -ENOSYS; | 455 | if (!smp_ops->cpu_disable) |
| 456 | return -ENOSYS; | ||
| 457 | |||
| 458 | err = smp_ops->cpu_disable(); | ||
| 459 | if (err) | ||
| 460 | return err; | ||
| 461 | |||
| 462 | /* Update sibling maps */ | ||
| 463 | base = cpu_first_thread_in_core(cpu); | ||
| 464 | for (i = 0; i < threads_per_core; i++) { | ||
| 465 | cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); | ||
| 466 | cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); | ||
| 467 | } | ||
| 468 | |||
| 469 | return 0; | ||
| 444 | } | 470 | } |
| 445 | 471 | ||
| 446 | void __cpu_die(unsigned int cpu) | 472 | void __cpu_die(unsigned int cpu) |
