diff options
-rw-r--r-- | Documentation/cpu-hotplug.txt | 2 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 180 | ||||
-rw-r--r-- | drivers/s390/char/sclp_cmd.c | 46 | ||||
-rw-r--r-- | include/asm-s390/sclp.h | 1 | ||||
-rw-r--r-- | include/asm-s390/smp.h | 3 |
7 files changed, 79 insertions, 155 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index fb94f5a71b68..ba0aacde94fb 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt | |||
@@ -50,7 +50,7 @@ additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets | |||
50 | cpu_possible_map = cpu_present_map + additional_cpus | 50 | cpu_possible_map = cpu_present_map + additional_cpus |
51 | 51 | ||
52 | (*) Option valid only for following architectures | 52 | (*) Option valid only for following architectures |
53 | - x86_64, ia64, s390 | 53 | - x86_64, ia64 |
54 | 54 | ||
55 | ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT | 55 | ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT |
56 | to determine the number of potentially hot-pluggable cpus. The implementation | 56 | to determine the number of potentially hot-pluggable cpus. The implementation |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index c7cbb0114147..9f7b73b180f0 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -278,7 +278,6 @@ void __init startup_init(void) | |||
278 | setup_lowcore_early(); | 278 | setup_lowcore_early(); |
279 | sclp_read_info_early(); | 279 | sclp_read_info_early(); |
280 | sclp_facilities_detect(); | 280 | sclp_facilities_detect(); |
281 | sclp_read_cpu_info_early(); | ||
282 | memsize = sclp_memory_detect(); | 281 | memsize = sclp_memory_detect(); |
283 | #ifndef CONFIG_64BIT | 282 | #ifndef CONFIG_64BIT |
284 | /* | 283 | /* |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 3a61bfc2c4fb..cbdf3fb05e81 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -922,7 +922,6 @@ setup_arch(char **cmdline_p) | |||
922 | 922 | ||
923 | cpu_init(); | 923 | cpu_init(); |
924 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; | 924 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; |
925 | smp_setup_cpu_possible_map(); | ||
926 | 925 | ||
927 | /* | 926 | /* |
928 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). | 927 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 66fe28930d82..320e4e97bf52 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(lowcore_ptr); | |||
54 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 54 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
55 | EXPORT_SYMBOL(cpu_online_map); | 55 | EXPORT_SYMBOL(cpu_online_map); |
56 | 56 | ||
57 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | 57 | cpumask_t cpu_possible_map = CPU_MASK_ALL; |
58 | EXPORT_SYMBOL(cpu_possible_map); | 58 | EXPORT_SYMBOL(cpu_possible_map); |
59 | 59 | ||
60 | static struct task_struct *current_set[NR_CPUS]; | 60 | static struct task_struct *current_set[NR_CPUS]; |
@@ -399,7 +399,7 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | |||
399 | "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); | 399 | "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); |
400 | return; | 400 | return; |
401 | } | 401 | } |
402 | zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); | 402 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); |
403 | __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; | 403 | __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; |
404 | while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == | 404 | while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == |
405 | sigp_busy) | 405 | sigp_busy) |
@@ -435,67 +435,6 @@ static int cpu_stopped(int cpu) | |||
435 | return 0; | 435 | return 0; |
436 | } | 436 | } |
437 | 437 | ||
438 | /* | ||
439 | * Lets check how many CPUs we have. | ||
440 | */ | ||
441 | static void __init smp_count_cpus(unsigned int *configured_cpus, | ||
442 | unsigned int *standby_cpus) | ||
443 | { | ||
444 | unsigned int cpu; | ||
445 | struct sclp_cpu_info *info; | ||
446 | u16 boot_cpu_addr, cpu_addr; | ||
447 | |||
448 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | ||
449 | current_thread_info()->cpu = 0; | ||
450 | *configured_cpus = 1; | ||
451 | *standby_cpus = 0; | ||
452 | |||
453 | info = alloc_bootmem_pages(sizeof(*info)); | ||
454 | if (!info) | ||
455 | disabled_wait((unsigned long) __builtin_return_address(0)); | ||
456 | |||
457 | /* Use sigp detection algorithm if sclp doesn't work. */ | ||
458 | if (sclp_get_cpu_info(info)) { | ||
459 | smp_use_sigp_detection = 1; | ||
460 | for (cpu = 0; cpu <= 65535; cpu++) { | ||
461 | if (cpu == boot_cpu_addr) | ||
462 | continue; | ||
463 | __cpu_logical_map[CPU_INIT_NO] = cpu; | ||
464 | if (cpu_stopped(CPU_INIT_NO)) | ||
465 | (*configured_cpus)++; | ||
466 | } | ||
467 | goto out; | ||
468 | } | ||
469 | |||
470 | if (info->has_cpu_type) { | ||
471 | for (cpu = 0; cpu < info->combined; cpu++) { | ||
472 | if (info->cpu[cpu].address == boot_cpu_addr) { | ||
473 | smp_cpu_type = info->cpu[cpu].type; | ||
474 | break; | ||
475 | } | ||
476 | } | ||
477 | } | ||
478 | /* Count cpus. */ | ||
479 | for (cpu = 0; cpu < info->combined; cpu++) { | ||
480 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | ||
481 | continue; | ||
482 | cpu_addr = info->cpu[cpu].address; | ||
483 | if (cpu_addr == boot_cpu_addr) | ||
484 | continue; | ||
485 | __cpu_logical_map[CPU_INIT_NO] = cpu_addr; | ||
486 | if (!cpu_stopped(CPU_INIT_NO)) { | ||
487 | (*standby_cpus)++; | ||
488 | continue; | ||
489 | } | ||
490 | smp_get_save_area(*configured_cpus, cpu_addr); | ||
491 | (*configured_cpus)++; | ||
492 | } | ||
493 | out: | ||
494 | printk(KERN_INFO "CPUs: %d configured, %d standby\n", | ||
495 | *configured_cpus, *standby_cpus); | ||
496 | free_bootmem((unsigned long) info, sizeof(*info)); | ||
497 | } | ||
498 | |||
499 | static int cpu_known(int cpu_id) | 438 | static int cpu_known(int cpu_id) |
500 | { | 439 | { |
501 | int cpu; | 440 | int cpu; |
@@ -529,7 +468,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
529 | return 0; | 468 | return 0; |
530 | } | 469 | } |
531 | 470 | ||
532 | static int __init_refok smp_rescan_cpus_sclp(cpumask_t avail) | 471 | static int smp_rescan_cpus_sclp(cpumask_t avail) |
533 | { | 472 | { |
534 | struct sclp_cpu_info *info; | 473 | struct sclp_cpu_info *info; |
535 | int cpu_id, logical_cpu, cpu; | 474 | int cpu_id, logical_cpu, cpu; |
@@ -538,10 +477,7 @@ static int __init_refok smp_rescan_cpus_sclp(cpumask_t avail) | |||
538 | logical_cpu = first_cpu(avail); | 477 | logical_cpu = first_cpu(avail); |
539 | if (logical_cpu == NR_CPUS) | 478 | if (logical_cpu == NR_CPUS) |
540 | return 0; | 479 | return 0; |
541 | if (slab_is_available()) | 480 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
542 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
543 | else | ||
544 | info = alloc_bootmem(sizeof(*info)); | ||
545 | if (!info) | 481 | if (!info) |
546 | return -ENOMEM; | 482 | return -ENOMEM; |
547 | rc = sclp_get_cpu_info(info); | 483 | rc = sclp_get_cpu_info(info); |
@@ -564,10 +500,7 @@ static int __init_refok smp_rescan_cpus_sclp(cpumask_t avail) | |||
564 | break; | 500 | break; |
565 | } | 501 | } |
566 | out: | 502 | out: |
567 | if (slab_is_available()) | 503 | kfree(info); |
568 | kfree(info); | ||
569 | else | ||
570 | free_bootmem((unsigned long) info, sizeof(*info)); | ||
571 | return rc; | 504 | return rc; |
572 | } | 505 | } |
573 | 506 | ||
@@ -575,15 +508,71 @@ static int smp_rescan_cpus(void) | |||
575 | { | 508 | { |
576 | cpumask_t avail; | 509 | cpumask_t avail; |
577 | 510 | ||
578 | cpus_setall(avail); | 511 | cpus_xor(avail, cpu_possible_map, cpu_present_map); |
579 | cpus_and(avail, avail, cpu_possible_map); | ||
580 | cpus_andnot(avail, avail, cpu_present_map); | ||
581 | if (smp_use_sigp_detection) | 512 | if (smp_use_sigp_detection) |
582 | return smp_rescan_cpus_sigp(avail); | 513 | return smp_rescan_cpus_sigp(avail); |
583 | else | 514 | else |
584 | return smp_rescan_cpus_sclp(avail); | 515 | return smp_rescan_cpus_sclp(avail); |
585 | } | 516 | } |
586 | 517 | ||
518 | static void __init smp_detect_cpus(void) | ||
519 | { | ||
520 | unsigned int cpu, c_cpus, s_cpus; | ||
521 | struct sclp_cpu_info *info; | ||
522 | u16 boot_cpu_addr, cpu_addr; | ||
523 | |||
524 | c_cpus = 1; | ||
525 | s_cpus = 0; | ||
526 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | ||
527 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
528 | if (!info) | ||
529 | panic("smp_detect_cpus failed to allocate memory\n"); | ||
530 | /* Use sigp detection algorithm if sclp doesn't work. */ | ||
531 | if (sclp_get_cpu_info(info)) { | ||
532 | smp_use_sigp_detection = 1; | ||
533 | for (cpu = 0; cpu <= 65535; cpu++) { | ||
534 | if (cpu == boot_cpu_addr) | ||
535 | continue; | ||
536 | __cpu_logical_map[CPU_INIT_NO] = cpu; | ||
537 | if (!cpu_stopped(CPU_INIT_NO)) | ||
538 | continue; | ||
539 | smp_get_save_area(c_cpus, cpu); | ||
540 | c_cpus++; | ||
541 | } | ||
542 | goto out; | ||
543 | } | ||
544 | |||
545 | if (info->has_cpu_type) { | ||
546 | for (cpu = 0; cpu < info->combined; cpu++) { | ||
547 | if (info->cpu[cpu].address == boot_cpu_addr) { | ||
548 | smp_cpu_type = info->cpu[cpu].type; | ||
549 | break; | ||
550 | } | ||
551 | } | ||
552 | } | ||
553 | |||
554 | for (cpu = 0; cpu < info->combined; cpu++) { | ||
555 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | ||
556 | continue; | ||
557 | cpu_addr = info->cpu[cpu].address; | ||
558 | if (cpu_addr == boot_cpu_addr) | ||
559 | continue; | ||
560 | __cpu_logical_map[CPU_INIT_NO] = cpu_addr; | ||
561 | if (!cpu_stopped(CPU_INIT_NO)) { | ||
562 | s_cpus++; | ||
563 | continue; | ||
564 | } | ||
565 | smp_get_save_area(c_cpus, cpu_addr); | ||
566 | c_cpus++; | ||
567 | } | ||
568 | out: | ||
569 | kfree(info); | ||
570 | printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); | ||
571 | lock_cpu_hotplug(); | ||
572 | smp_rescan_cpus(); | ||
573 | unlock_cpu_hotplug(); | ||
574 | } | ||
575 | |||
587 | /* | 576 | /* |
588 | * Activate a secondary processor. | 577 | * Activate a secondary processor. |
589 | */ | 578 | */ |
@@ -674,41 +663,20 @@ int __cpu_up(unsigned int cpu) | |||
674 | return 0; | 663 | return 0; |
675 | } | 664 | } |
676 | 665 | ||
677 | static unsigned int __initdata additional_cpus; | ||
678 | static unsigned int __initdata possible_cpus; | ||
679 | |||
680 | void __init smp_setup_cpu_possible_map(void) | ||
681 | { | ||
682 | unsigned int pos_cpus, cpu; | ||
683 | unsigned int configured_cpus, standby_cpus; | ||
684 | |||
685 | smp_count_cpus(&configured_cpus, &standby_cpus); | ||
686 | pos_cpus = min(configured_cpus + standby_cpus + additional_cpus, | ||
687 | (unsigned int) NR_CPUS); | ||
688 | if (possible_cpus) | ||
689 | pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); | ||
690 | for (cpu = 0; cpu < pos_cpus; cpu++) | ||
691 | cpu_set(cpu, cpu_possible_map); | ||
692 | cpu_present_map = cpumask_of_cpu(0); | ||
693 | smp_rescan_cpus(); | ||
694 | } | ||
695 | |||
696 | #ifdef CONFIG_HOTPLUG_CPU | ||
697 | |||
698 | static int __init setup_additional_cpus(char *s) | ||
699 | { | ||
700 | additional_cpus = simple_strtoul(s, NULL, 0); | ||
701 | return 0; | ||
702 | } | ||
703 | early_param("additional_cpus", setup_additional_cpus); | ||
704 | |||
705 | static int __init setup_possible_cpus(char *s) | 666 | static int __init setup_possible_cpus(char *s) |
706 | { | 667 | { |
707 | possible_cpus = simple_strtoul(s, NULL, 0); | 668 | int pcpus, cpu; |
669 | |||
670 | pcpus = simple_strtoul(s, NULL, 0); | ||
671 | cpu_possible_map = cpumask_of_cpu(0); | ||
672 | for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++) | ||
673 | cpu_set(cpu, cpu_possible_map); | ||
708 | return 0; | 674 | return 0; |
709 | } | 675 | } |
710 | early_param("possible_cpus", setup_possible_cpus); | 676 | early_param("possible_cpus", setup_possible_cpus); |
711 | 677 | ||
678 | #ifdef CONFIG_HOTPLUG_CPU | ||
679 | |||
712 | int __cpu_disable(void) | 680 | int __cpu_disable(void) |
713 | { | 681 | { |
714 | struct ec_creg_mask_parms cr_parms; | 682 | struct ec_creg_mask_parms cr_parms; |
@@ -768,6 +736,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
768 | unsigned int cpu; | 736 | unsigned int cpu; |
769 | int i; | 737 | int i; |
770 | 738 | ||
739 | smp_detect_cpus(); | ||
740 | |||
771 | /* request the 0x1201 emergency signal external interrupt */ | 741 | /* request the 0x1201 emergency signal external interrupt */ |
772 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 742 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
773 | panic("Couldn't request external interrupt 0x1201"); | 743 | panic("Couldn't request external interrupt 0x1201"); |
@@ -816,6 +786,8 @@ void __init smp_prepare_boot_cpu(void) | |||
816 | { | 786 | { |
817 | BUG_ON(smp_processor_id() != 0); | 787 | BUG_ON(smp_processor_id() != 0); |
818 | 788 | ||
789 | current_thread_info()->cpu = 0; | ||
790 | cpu_set(0, cpu_present_map); | ||
819 | cpu_set(0, cpu_online_map); | 791 | cpu_set(0, cpu_online_map); |
820 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 792 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
821 | current_set[0] = current; | 793 | current_set[0] = current; |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index d7e6f4d65b78..b5c23396f8fe 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -191,9 +191,6 @@ struct read_cpu_info_sccb { | |||
191 | u8 reserved[4096 - 16]; | 191 | u8 reserved[4096 - 16]; |
192 | } __attribute__((packed, aligned(PAGE_SIZE))); | 192 | } __attribute__((packed, aligned(PAGE_SIZE))); |
193 | 193 | ||
194 | static struct read_cpu_info_sccb __initdata early_read_cpu_info_sccb; | ||
195 | static struct sclp_cpu_info __initdata sclp_cpu_info; | ||
196 | |||
197 | static void sclp_fill_cpu_info(struct sclp_cpu_info *info, | 194 | static void sclp_fill_cpu_info(struct sclp_cpu_info *info, |
198 | struct read_cpu_info_sccb *sccb) | 195 | struct read_cpu_info_sccb *sccb) |
199 | { | 196 | { |
@@ -208,48 +205,16 @@ static void sclp_fill_cpu_info(struct sclp_cpu_info *info, | |||
208 | info->combined * sizeof(struct sclp_cpu_entry)); | 205 | info->combined * sizeof(struct sclp_cpu_entry)); |
209 | } | 206 | } |
210 | 207 | ||
211 | void __init sclp_read_cpu_info_early(void) | 208 | int sclp_get_cpu_info(struct sclp_cpu_info *info) |
212 | { | ||
213 | int rc; | ||
214 | struct read_cpu_info_sccb *sccb; | ||
215 | |||
216 | if (!SCLP_HAS_CPU_INFO) | ||
217 | return; | ||
218 | |||
219 | sccb = &early_read_cpu_info_sccb; | ||
220 | do { | ||
221 | memset(sccb, 0, sizeof(*sccb)); | ||
222 | sccb->header.length = sizeof(*sccb); | ||
223 | rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb); | ||
224 | } while (rc == -EBUSY); | ||
225 | |||
226 | if (rc) | ||
227 | return; | ||
228 | if (sccb->header.response_code != 0x10) | ||
229 | return; | ||
230 | sclp_fill_cpu_info(&sclp_cpu_info, sccb); | ||
231 | } | ||
232 | |||
233 | static int __init sclp_get_cpu_info_early(struct sclp_cpu_info *info) | ||
234 | { | ||
235 | if (!SCLP_HAS_CPU_INFO) | ||
236 | return -EOPNOTSUPP; | ||
237 | *info = sclp_cpu_info; | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int sclp_get_cpu_info_late(struct sclp_cpu_info *info) | ||
242 | { | 209 | { |
243 | int rc; | 210 | int rc; |
244 | struct read_cpu_info_sccb *sccb; | 211 | struct read_cpu_info_sccb *sccb; |
245 | 212 | ||
246 | if (!SCLP_HAS_CPU_INFO) | 213 | if (!SCLP_HAS_CPU_INFO) |
247 | return -EOPNOTSUPP; | 214 | return -EOPNOTSUPP; |
248 | sccb = (struct read_cpu_info_sccb *) __get_free_page(GFP_KERNEL | 215 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
249 | | GFP_DMA); | ||
250 | if (!sccb) | 216 | if (!sccb) |
251 | return -ENOMEM; | 217 | return -ENOMEM; |
252 | memset(sccb, 0, sizeof(*sccb)); | ||
253 | sccb->header.length = sizeof(*sccb); | 218 | sccb->header.length = sizeof(*sccb); |
254 | rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); | 219 | rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); |
255 | if (rc) | 220 | if (rc) |
@@ -266,13 +231,6 @@ out: | |||
266 | return rc; | 231 | return rc; |
267 | } | 232 | } |
268 | 233 | ||
269 | int __init_refok sclp_get_cpu_info(struct sclp_cpu_info *info) | ||
270 | { | ||
271 | if (slab_is_available()) | ||
272 | return sclp_get_cpu_info_late(info); | ||
273 | return sclp_get_cpu_info_early(info); | ||
274 | } | ||
275 | |||
276 | struct cpu_configure_sccb { | 234 | struct cpu_configure_sccb { |
277 | struct sccb_header header; | 235 | struct sccb_header header; |
278 | } __attribute__((packed, aligned(8))); | 236 | } __attribute__((packed, aligned(8))); |
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h index b8c7695cd4c8..b5f2843013a3 100644 --- a/include/asm-s390/sclp.h +++ b/include/asm-s390/sclp.h | |||
@@ -46,7 +46,6 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info); | |||
46 | int sclp_cpu_configure(u8 cpu); | 46 | int sclp_cpu_configure(u8 cpu); |
47 | int sclp_cpu_deconfigure(u8 cpu); | 47 | int sclp_cpu_deconfigure(u8 cpu); |
48 | void sclp_read_info_early(void); | 48 | void sclp_read_info_early(void); |
49 | void sclp_read_cpu_info_early(void); | ||
50 | void sclp_facilities_detect(void); | 49 | void sclp_facilities_detect(void); |
51 | unsigned long long sclp_memory_detect(void); | 50 | unsigned long long sclp_memory_detect(void); |
52 | int sclp_sdias_blk_count(void); | 51 | int sclp_sdias_blk_count(void); |
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 07708c07701e..218454b91862 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h | |||
@@ -35,8 +35,6 @@ extern void machine_restart_smp(char *); | |||
35 | extern void machine_halt_smp(void); | 35 | extern void machine_halt_smp(void); |
36 | extern void machine_power_off_smp(void); | 36 | extern void machine_power_off_smp(void); |
37 | 37 | ||
38 | extern void smp_setup_cpu_possible_map(void); | ||
39 | |||
40 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 38 | #define NO_PROC_ID 0xFF /* No processor magic marker */ |
41 | 39 | ||
42 | /* | 40 | /* |
@@ -103,7 +101,6 @@ static inline void smp_send_stop(void) | |||
103 | 101 | ||
104 | #define hard_smp_processor_id() 0 | 102 | #define hard_smp_processor_id() 0 |
105 | #define smp_cpu_not_running(cpu) 1 | 103 | #define smp_cpu_not_running(cpu) 1 |
106 | #define smp_setup_cpu_possible_map() do { } while (0) | ||
107 | #endif | 104 | #endif |
108 | 105 | ||
109 | extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 106 | extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; |