aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r--arch/s390/kernel/smp.c575
1 files changed, 422 insertions, 153 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 264ea906db4c..aa37fa154512 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
43#include <asm/timer.h> 43#include <asm/timer.h>
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45#include <asm/sclp.h>
45#include <asm/cpu.h> 46#include <asm/cpu.h>
46 47
47/* 48/*
@@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr);
53cpumask_t cpu_online_map = CPU_MASK_NONE; 54cpumask_t cpu_online_map = CPU_MASK_NONE;
54EXPORT_SYMBOL(cpu_online_map); 55EXPORT_SYMBOL(cpu_online_map);
55 56
56cpumask_t cpu_possible_map = CPU_MASK_NONE; 57cpumask_t cpu_possible_map = CPU_MASK_ALL;
57EXPORT_SYMBOL(cpu_possible_map); 58EXPORT_SYMBOL(cpu_possible_map);
58 59
59static struct task_struct *current_set[NR_CPUS]; 60static struct task_struct *current_set[NR_CPUS];
60 61
62static u8 smp_cpu_type;
63static int smp_use_sigp_detection;
64
65enum s390_cpu_state {
66 CPU_STATE_STANDBY,
67 CPU_STATE_CONFIGURED,
68};
69
70#ifdef CONFIG_HOTPLUG_CPU
71static DEFINE_MUTEX(smp_cpu_state_mutex);
72#endif
73static int smp_cpu_state[NR_CPUS];
74
75static DEFINE_PER_CPU(struct cpu, cpu_devices);
76DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
77
61static void smp_ext_bitcall(int, ec_bit_sig); 78static void smp_ext_bitcall(int, ec_bit_sig);
62 79
63/* 80/*
@@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
193} 210}
194EXPORT_SYMBOL(smp_call_function_single); 211EXPORT_SYMBOL(smp_call_function_single);
195 212
213/**
214 * smp_call_function_mask(): Run a function on a set of other CPUs.
215 * @mask: The set of cpus to run on. Must not include the current cpu.
216 * @func: The function to run. This must be fast and non-blocking.
217 * @info: An arbitrary pointer to pass to the function.
218 * @wait: If true, wait (atomically) until function has completed on other CPUs.
219 *
220 * Returns 0 on success, else a negative status code.
221 *
222 * If @wait is true, then returns once @func has returned; otherwise
223 * it returns just before the target cpu calls @func.
224 *
225 * You must not call this function with disabled interrupts or from a
226 * hardware interrupt handler or from a bottom half handler.
227 */
228int
229smp_call_function_mask(cpumask_t mask,
230 void (*func)(void *), void *info,
231 int wait)
232{
233 preempt_disable();
234 __smp_call_function_map(func, info, 0, wait, mask);
235 preempt_enable();
236 return 0;
237}
238EXPORT_SYMBOL(smp_call_function_mask);
239
196void smp_send_stop(void) 240void smp_send_stop(void)
197{ 241{
198 int cpu, rc; 242 int cpu, rc;
@@ -217,33 +261,6 @@ void smp_send_stop(void)
217} 261}
218 262
219/* 263/*
220 * Reboot, halt and power_off routines for SMP.
221 */
222void machine_restart_smp(char *__unused)
223{
224 smp_send_stop();
225 do_reipl();
226}
227
228void machine_halt_smp(void)
229{
230 smp_send_stop();
231 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
232 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
233 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
234 for (;;);
235}
236
237void machine_power_off_smp(void)
238{
239 smp_send_stop();
240 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
241 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
242 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
243 for (;;);
244}
245
246/*
247 * This is the main routine where commands issued by other 264 * This is the main routine where commands issued by other
248 * cpus are handled. 265 * cpus are handled.
249 */ 266 */
@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
355} 372}
356EXPORT_SYMBOL(smp_ctl_clear_bit); 373EXPORT_SYMBOL(smp_ctl_clear_bit);
357 374
375/*
376 * In early ipl state a temp. logically cpu number is needed, so the sigp
377 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
378 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
379 */
380#define CPU_INIT_NO 1
381
358#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 382#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
359 383
360/* 384/*
@@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
375 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); 399 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
376 return; 400 return;
377 } 401 }
378 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 402 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
379 __cpu_logical_map[1] = (__u16) phy_cpu; 403 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
380 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) 404 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
405 sigp_busy)
381 cpu_relax(); 406 cpu_relax();
382 memcpy(zfcpdump_save_areas[cpu], 407 memcpy(zfcpdump_save_areas[cpu],
383 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 408 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
@@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
397 422
398#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 423#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
399 424
400/* 425static int cpu_stopped(int cpu)
401 * Lets check how many CPUs we have.
402 */
403static unsigned int __init smp_count_cpus(void)
404{ 426{
405 unsigned int cpu, num_cpus; 427 __u32 status;
406 __u16 boot_cpu_addr;
407 428
408 /* 429 /* Check for stopped state */
409 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 430 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
410 */ 431 sigp_status_stored) {
432 if (status & 0x40)
433 return 1;
434 }
435 return 0;
436}
437
438static int cpu_known(int cpu_id)
439{
440 int cpu;
441
442 for_each_present_cpu(cpu) {
443 if (__cpu_logical_map[cpu] == cpu_id)
444 return 1;
445 }
446 return 0;
447}
448
449static int smp_rescan_cpus_sigp(cpumask_t avail)
450{
451 int cpu_id, logical_cpu;
452
453 logical_cpu = first_cpu(avail);
454 if (logical_cpu == NR_CPUS)
455 return 0;
456 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
457 if (cpu_known(cpu_id))
458 continue;
459 __cpu_logical_map[logical_cpu] = cpu_id;
460 if (!cpu_stopped(logical_cpu))
461 continue;
462 cpu_set(logical_cpu, cpu_present_map);
463 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
464 logical_cpu = next_cpu(logical_cpu, avail);
465 if (logical_cpu == NR_CPUS)
466 break;
467 }
468 return 0;
469}
470
471static int smp_rescan_cpus_sclp(cpumask_t avail)
472{
473 struct sclp_cpu_info *info;
474 int cpu_id, logical_cpu, cpu;
475 int rc;
476
477 logical_cpu = first_cpu(avail);
478 if (logical_cpu == NR_CPUS)
479 return 0;
480 info = kmalloc(sizeof(*info), GFP_KERNEL);
481 if (!info)
482 return -ENOMEM;
483 rc = sclp_get_cpu_info(info);
484 if (rc)
485 goto out;
486 for (cpu = 0; cpu < info->combined; cpu++) {
487 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
488 continue;
489 cpu_id = info->cpu[cpu].address;
490 if (cpu_known(cpu_id))
491 continue;
492 __cpu_logical_map[logical_cpu] = cpu_id;
493 cpu_set(logical_cpu, cpu_present_map);
494 if (cpu >= info->configured)
495 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
496 else
497 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
498 logical_cpu = next_cpu(logical_cpu, avail);
499 if (logical_cpu == NR_CPUS)
500 break;
501 }
502out:
503 kfree(info);
504 return rc;
505}
506
507static int smp_rescan_cpus(void)
508{
509 cpumask_t avail;
510
511 cpus_xor(avail, cpu_possible_map, cpu_present_map);
512 if (smp_use_sigp_detection)
513 return smp_rescan_cpus_sigp(avail);
514 else
515 return smp_rescan_cpus_sclp(avail);
516}
517
518static void __init smp_detect_cpus(void)
519{
520 unsigned int cpu, c_cpus, s_cpus;
521 struct sclp_cpu_info *info;
522 u16 boot_cpu_addr, cpu_addr;
523
524 c_cpus = 1;
525 s_cpus = 0;
411 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 526 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
412 current_thread_info()->cpu = 0; 527 info = kmalloc(sizeof(*info), GFP_KERNEL);
413 num_cpus = 1; 528 if (!info)
414 for (cpu = 0; cpu <= 65535; cpu++) { 529 panic("smp_detect_cpus failed to allocate memory\n");
415 if ((__u16) cpu == boot_cpu_addr) 530 /* Use sigp detection algorithm if sclp doesn't work. */
531 if (sclp_get_cpu_info(info)) {
532 smp_use_sigp_detection = 1;
533 for (cpu = 0; cpu <= 65535; cpu++) {
534 if (cpu == boot_cpu_addr)
535 continue;
536 __cpu_logical_map[CPU_INIT_NO] = cpu;
537 if (!cpu_stopped(CPU_INIT_NO))
538 continue;
539 smp_get_save_area(c_cpus, cpu);
540 c_cpus++;
541 }
542 goto out;
543 }
544
545 if (info->has_cpu_type) {
546 for (cpu = 0; cpu < info->combined; cpu++) {
547 if (info->cpu[cpu].address == boot_cpu_addr) {
548 smp_cpu_type = info->cpu[cpu].type;
549 break;
550 }
551 }
552 }
553
554 for (cpu = 0; cpu < info->combined; cpu++) {
555 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
556 continue;
557 cpu_addr = info->cpu[cpu].address;
558 if (cpu_addr == boot_cpu_addr)
416 continue; 559 continue;
417 __cpu_logical_map[1] = (__u16) cpu; 560 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
418 if (signal_processor(1, sigp_sense) == sigp_not_operational) 561 if (!cpu_stopped(CPU_INIT_NO)) {
562 s_cpus++;
419 continue; 563 continue;
420 smp_get_save_area(num_cpus, cpu); 564 }
421 num_cpus++; 565 smp_get_save_area(c_cpus, cpu_addr);
566 c_cpus++;
422 } 567 }
423 printk("Detected %d CPU's\n", (int) num_cpus); 568out:
424 printk("Boot cpu address %2X\n", boot_cpu_addr); 569 kfree(info);
425 return num_cpus; 570 printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
571 get_online_cpus();
572 smp_rescan_cpus();
573 put_online_cpus();
426} 574}
427 575
428/* 576/*
@@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid)
453 return 0; 601 return 0;
454} 602}
455 603
456DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
457
458static void __init smp_create_idle(unsigned int cpu) 604static void __init smp_create_idle(unsigned int cpu)
459{ 605{
460 struct task_struct *p; 606 struct task_struct *p;
@@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu)
470 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); 616 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
471} 617}
472 618
473static int cpu_stopped(int cpu) 619static int __cpuinit smp_alloc_lowcore(int cpu)
474{ 620{
475 __u32 status; 621 unsigned long async_stack, panic_stack;
622 struct _lowcore *lowcore;
623 int lc_order;
624
625 lc_order = sizeof(long) == 8 ? 1 : 0;
626 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
627 if (!lowcore)
628 return -ENOMEM;
629 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
630 if (!async_stack)
631 goto out_async_stack;
632 panic_stack = __get_free_page(GFP_KERNEL);
633 if (!panic_stack)
634 goto out_panic_stack;
635
636 *lowcore = S390_lowcore;
637 lowcore->async_stack = async_stack + ASYNC_SIZE;
638 lowcore->panic_stack = panic_stack + PAGE_SIZE;
476 639
477 /* Check for stopped state */ 640#ifndef CONFIG_64BIT
478 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == 641 if (MACHINE_HAS_IEEE) {
479 sigp_status_stored) { 642 unsigned long save_area;
480 if (status & 0x40) 643
481 return 1; 644 save_area = get_zeroed_page(GFP_KERNEL);
645 if (!save_area)
646 goto out_save_area;
647 lowcore->extended_save_area_addr = (u32) save_area;
482 } 648 }
649#endif
650 lowcore_ptr[cpu] = lowcore;
483 return 0; 651 return 0;
652
653#ifndef CONFIG_64BIT
654out_save_area:
655 free_page(panic_stack);
656#endif
657out_panic_stack:
658 free_pages(async_stack, ASYNC_ORDER);
659out_async_stack:
660 free_pages((unsigned long) lowcore, lc_order);
661 return -ENOMEM;
484} 662}
485 663
486/* Upping and downing of CPUs */ 664#ifdef CONFIG_HOTPLUG_CPU
665static void smp_free_lowcore(int cpu)
666{
667 struct _lowcore *lowcore;
668 int lc_order;
669
670 lc_order = sizeof(long) == 8 ? 1 : 0;
671 lowcore = lowcore_ptr[cpu];
672#ifndef CONFIG_64BIT
673 if (MACHINE_HAS_IEEE)
674 free_page((unsigned long) lowcore->extended_save_area_addr);
675#endif
676 free_page(lowcore->panic_stack - PAGE_SIZE);
677 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
678 free_pages((unsigned long) lowcore, lc_order);
679 lowcore_ptr[cpu] = NULL;
680}
681#endif /* CONFIG_HOTPLUG_CPU */
487 682
488int __cpu_up(unsigned int cpu) 683/* Upping and downing of CPUs */
684int __cpuinit __cpu_up(unsigned int cpu)
489{ 685{
490 struct task_struct *idle; 686 struct task_struct *idle;
491 struct _lowcore *cpu_lowcore; 687 struct _lowcore *cpu_lowcore;
492 struct stack_frame *sf; 688 struct stack_frame *sf;
493 sigp_ccode ccode; 689 sigp_ccode ccode;
494 int curr_cpu;
495 690
496 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 691 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
497 __cpu_logical_map[cpu] = (__u16) curr_cpu; 692 return -EIO;
498 if (cpu_stopped(cpu)) 693 if (smp_alloc_lowcore(cpu))
499 break; 694 return -ENOMEM;
500 }
501
502 if (!cpu_stopped(cpu))
503 return -ENODEV;
504 695
505 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 696 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
506 cpu, sigp_set_prefix); 697 cpu, sigp_set_prefix);
@@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu)
515 cpu_lowcore = lowcore_ptr[cpu]; 706 cpu_lowcore = lowcore_ptr[cpu];
516 cpu_lowcore->kernel_stack = (unsigned long) 707 cpu_lowcore->kernel_stack = (unsigned long)
517 task_stack_page(idle) + THREAD_SIZE; 708 task_stack_page(idle) + THREAD_SIZE;
709 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
518 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 710 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
519 - sizeof(struct pt_regs) 711 - sizeof(struct pt_regs)
520 - sizeof(struct stack_frame)); 712 - sizeof(struct stack_frame));
@@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu)
528 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 720 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
529 cpu_lowcore->current_task = (unsigned long) idle; 721 cpu_lowcore->current_task = (unsigned long) idle;
530 cpu_lowcore->cpu_data.cpu_nr = cpu; 722 cpu_lowcore->cpu_data.cpu_nr = cpu;
723 cpu_lowcore->softirq_pending = 0;
724 cpu_lowcore->ext_call_fast = 0;
531 eieio(); 725 eieio();
532 726
533 while (signal_processor(cpu, sigp_restart) == sigp_busy) 727 while (signal_processor(cpu, sigp_restart) == sigp_busy)
@@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu)
538 return 0; 732 return 0;
539} 733}
540 734
541static unsigned int __initdata additional_cpus; 735static int __init setup_possible_cpus(char *s)
542static unsigned int __initdata possible_cpus;
543
544void __init smp_setup_cpu_possible_map(void)
545{ 736{
546 unsigned int phy_cpus, pos_cpus, cpu; 737 int pcpus, cpu;
547
548 phy_cpus = smp_count_cpus();
549 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
550
551 if (possible_cpus)
552 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
553 738
554 for (cpu = 0; cpu < pos_cpus; cpu++) 739 pcpus = simple_strtoul(s, NULL, 0);
740 cpu_possible_map = cpumask_of_cpu(0);
741 for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
555 cpu_set(cpu, cpu_possible_map); 742 cpu_set(cpu, cpu_possible_map);
556
557 phy_cpus = min(phy_cpus, pos_cpus);
558
559 for (cpu = 0; cpu < phy_cpus; cpu++)
560 cpu_set(cpu, cpu_present_map);
561}
562
563#ifdef CONFIG_HOTPLUG_CPU
564
565static int __init setup_additional_cpus(char *s)
566{
567 additional_cpus = simple_strtoul(s, NULL, 0);
568 return 0;
569}
570early_param("additional_cpus", setup_additional_cpus);
571
572static int __init setup_possible_cpus(char *s)
573{
574 possible_cpus = simple_strtoul(s, NULL, 0);
575 return 0; 743 return 0;
576} 744}
577early_param("possible_cpus", setup_possible_cpus); 745early_param("possible_cpus", setup_possible_cpus);
578 746
747#ifdef CONFIG_HOTPLUG_CPU
748
579int __cpu_disable(void) 749int __cpu_disable(void)
580{ 750{
581 struct ec_creg_mask_parms cr_parms; 751 struct ec_creg_mask_parms cr_parms;
@@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu)
612 /* Wait until target cpu is down */ 782 /* Wait until target cpu is down */
613 while (!smp_cpu_not_running(cpu)) 783 while (!smp_cpu_not_running(cpu))
614 cpu_relax(); 784 cpu_relax();
615 printk("Processor %d spun down\n", cpu); 785 smp_free_lowcore(cpu);
786 printk(KERN_INFO "Processor %d spun down\n", cpu);
616} 787}
617 788
618void cpu_die(void) 789void cpu_die(void)
@@ -625,49 +796,19 @@ void cpu_die(void)
625 796
626#endif /* CONFIG_HOTPLUG_CPU */ 797#endif /* CONFIG_HOTPLUG_CPU */
627 798
628/*
629 * Cycle through the processors and setup structures.
630 */
631
632void __init smp_prepare_cpus(unsigned int max_cpus) 799void __init smp_prepare_cpus(unsigned int max_cpus)
633{ 800{
634 unsigned long stack;
635 unsigned int cpu; 801 unsigned int cpu;
636 int i; 802
803 smp_detect_cpus();
637 804
638 /* request the 0x1201 emergency signal external interrupt */ 805 /* request the 0x1201 emergency signal external interrupt */
639 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 806 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
640 panic("Couldn't request external interrupt 0x1201"); 807 panic("Couldn't request external interrupt 0x1201");
641 memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); 808 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
642 /*
643 * Initialize prefix pages and stacks for all possible cpus
644 */
645 print_cpu_info(&S390_lowcore.cpu_data); 809 print_cpu_info(&S390_lowcore.cpu_data);
810 smp_alloc_lowcore(smp_processor_id());
646 811
647 for_each_possible_cpu(i) {
648 lowcore_ptr[i] = (struct _lowcore *)
649 __get_free_pages(GFP_KERNEL | GFP_DMA,
650 sizeof(void*) == 8 ? 1 : 0);
651 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
652 if (!lowcore_ptr[i] || !stack)
653 panic("smp_boot_cpus failed to allocate memory\n");
654
655 *(lowcore_ptr[i]) = S390_lowcore;
656 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
657 stack = __get_free_pages(GFP_KERNEL, 0);
658 if (!stack)
659 panic("smp_boot_cpus failed to allocate memory\n");
660 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
661#ifndef CONFIG_64BIT
662 if (MACHINE_HAS_IEEE) {
663 lowcore_ptr[i]->extended_save_area_addr =
664 (__u32) __get_free_pages(GFP_KERNEL, 0);
665 if (!lowcore_ptr[i]->extended_save_area_addr)
666 panic("smp_boot_cpus failed to "
667 "allocate memory\n");
668 }
669#endif
670 }
671#ifndef CONFIG_64BIT 812#ifndef CONFIG_64BIT
672 if (MACHINE_HAS_IEEE) 813 if (MACHINE_HAS_IEEE)
673 ctl_set_bit(14, 29); /* enable extended save area */ 814 ctl_set_bit(14, 29); /* enable extended save area */
@@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void)
683{ 824{
684 BUG_ON(smp_processor_id() != 0); 825 BUG_ON(smp_processor_id() != 0);
685 826
827 current_thread_info()->cpu = 0;
828 cpu_set(0, cpu_present_map);
686 cpu_set(0, cpu_online_map); 829 cpu_set(0, cpu_online_map);
687 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 830 S390_lowcore.percpu_offset = __per_cpu_offset[0];
688 current_set[0] = current; 831 current_set[0] = current;
832 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
689 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 833 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
690} 834}
691 835
692void __init smp_cpus_done(unsigned int max_cpus) 836void __init smp_cpus_done(unsigned int max_cpus)
693{ 837{
694 cpu_present_map = cpu_possible_map;
695} 838}
696 839
697/* 840/*
@@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier)
705 return 0; 848 return 0;
706} 849}
707 850
708static DEFINE_PER_CPU(struct cpu, cpu_devices); 851#ifdef CONFIG_HOTPLUG_CPU
852static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
853{
854 ssize_t count;
855
856 mutex_lock(&smp_cpu_state_mutex);
857 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
858 mutex_unlock(&smp_cpu_state_mutex);
859 return count;
860}
861
862static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
863 size_t count)
864{
865 int cpu = dev->id;
866 int val, rc;
867 char delim;
868
869 if (sscanf(buf, "%d %c", &val, &delim) != 1)
870 return -EINVAL;
871 if (val != 0 && val != 1)
872 return -EINVAL;
873
874 mutex_lock(&smp_cpu_state_mutex);
875 get_online_cpus();
876 rc = -EBUSY;
877 if (cpu_online(cpu))
878 goto out;
879 rc = 0;
880 switch (val) {
881 case 0:
882 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
883 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
884 if (!rc)
885 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
886 }
887 break;
888 case 1:
889 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
890 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
891 if (!rc)
892 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
893 }
894 break;
895 default:
896 break;
897 }
898out:
899 put_online_cpus();
900 mutex_unlock(&smp_cpu_state_mutex);
901 return rc ? rc : count;
902}
903static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
904#endif /* CONFIG_HOTPLUG_CPU */
905
906static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
907{
908 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
909}
910static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
911
912
913static struct attribute *cpu_common_attrs[] = {
914#ifdef CONFIG_HOTPLUG_CPU
915 &attr_configure.attr,
916#endif
917 &attr_address.attr,
918 NULL,
919};
920
921static struct attribute_group cpu_common_attr_group = {
922 .attrs = cpu_common_attrs,
923};
709 924
710static ssize_t show_capability(struct sys_device *dev, char *buf) 925static ssize_t show_capability(struct sys_device *dev, char *buf)
711{ 926{
@@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
750} 965}
751static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); 966static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
752 967
753static struct attribute *cpu_attrs[] = { 968static struct attribute *cpu_online_attrs[] = {
754 &attr_capability.attr, 969 &attr_capability.attr,
755 &attr_idle_count.attr, 970 &attr_idle_count.attr,
756 &attr_idle_time_us.attr, 971 &attr_idle_time_us.attr,
757 NULL, 972 NULL,
758}; 973};
759 974
760static struct attribute_group cpu_attr_group = { 975static struct attribute_group cpu_online_attr_group = {
761 .attrs = cpu_attrs, 976 .attrs = cpu_online_attrs,
762}; 977};
763 978
764static int __cpuinit smp_cpu_notify(struct notifier_block *self, 979static int __cpuinit smp_cpu_notify(struct notifier_block *self,
@@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
778 idle->idle_time = 0; 993 idle->idle_time = 0;
779 idle->idle_count = 0; 994 idle->idle_count = 0;
780 spin_unlock_irq(&idle->lock); 995 spin_unlock_irq(&idle->lock);
781 if (sysfs_create_group(&s->kobj, &cpu_attr_group)) 996 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
782 return NOTIFY_BAD; 997 return NOTIFY_BAD;
783 break; 998 break;
784 case CPU_DEAD: 999 case CPU_DEAD:
785 case CPU_DEAD_FROZEN: 1000 case CPU_DEAD_FROZEN:
786 sysfs_remove_group(&s->kobj, &cpu_attr_group); 1001 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
787 break; 1002 break;
788 } 1003 }
789 return NOTIFY_OK; 1004 return NOTIFY_OK;
@@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
793 .notifier_call = smp_cpu_notify, 1008 .notifier_call = smp_cpu_notify,
794}; 1009};
795 1010
1011static int smp_add_present_cpu(int cpu)
1012{
1013 struct cpu *c = &per_cpu(cpu_devices, cpu);
1014 struct sys_device *s = &c->sysdev;
1015 int rc;
1016
1017 c->hotpluggable = 1;
1018 rc = register_cpu(c, cpu);
1019 if (rc)
1020 goto out;
1021 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1022 if (rc)
1023 goto out_cpu;
1024 if (!cpu_online(cpu))
1025 goto out;
1026 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1027 if (!rc)
1028 return 0;
1029 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1030out_cpu:
1031#ifdef CONFIG_HOTPLUG_CPU
1032 unregister_cpu(c);
1033#endif
1034out:
1035 return rc;
1036}
1037
1038#ifdef CONFIG_HOTPLUG_CPU
1039static ssize_t rescan_store(struct sys_device *dev, const char *buf,
1040 size_t count)
1041{
1042 cpumask_t newcpus;
1043 int cpu;
1044 int rc;
1045
1046 mutex_lock(&smp_cpu_state_mutex);
1047 get_online_cpus();
1048 newcpus = cpu_present_map;
1049 rc = smp_rescan_cpus();
1050 if (rc)
1051 goto out;
1052 cpus_andnot(newcpus, cpu_present_map, newcpus);
1053 for_each_cpu_mask(cpu, newcpus) {
1054 rc = smp_add_present_cpu(cpu);
1055 if (rc)
1056 cpu_clear(cpu, cpu_present_map);
1057 }
1058 rc = 0;
1059out:
1060 put_online_cpus();
1061 mutex_unlock(&smp_cpu_state_mutex);
1062 return rc ? rc : count;
1063}
1064static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1065#endif /* CONFIG_HOTPLUG_CPU */
1066
796static int __init topology_init(void) 1067static int __init topology_init(void)
797{ 1068{
798 int cpu; 1069 int cpu;
@@ -800,16 +1071,14 @@ static int __init topology_init(void)
800 1071
801 register_cpu_notifier(&smp_cpu_nb); 1072 register_cpu_notifier(&smp_cpu_nb);
802 1073
803 for_each_possible_cpu(cpu) { 1074#ifdef CONFIG_HOTPLUG_CPU
804 struct cpu *c = &per_cpu(cpu_devices, cpu); 1075 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
805 struct sys_device *s = &c->sysdev; 1076 &attr_rescan.attr);
806 1077 if (rc)
807 c->hotpluggable = 1; 1078 return rc;
808 register_cpu(c, cpu); 1079#endif
809 if (!cpu_online(cpu)) 1080 for_each_present_cpu(cpu) {
810 continue; 1081 rc = smp_add_present_cpu(cpu);
811 s = &c->sysdev;
812 rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
813 if (rc) 1082 if (rc)
814 return rc; 1083 return rc;
815 } 1084 }