aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r--arch/s390/kernel/smp.c401
1 files changed, 330 insertions, 71 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 264ea906db4c..66fe28930d82 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
43#include <asm/timer.h> 43#include <asm/timer.h>
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45#include <asm/sclp.h>
45#include <asm/cpu.h> 46#include <asm/cpu.h>
46 47
47/* 48/*
@@ -58,6 +59,22 @@ EXPORT_SYMBOL(cpu_possible_map);
58 59
59static struct task_struct *current_set[NR_CPUS]; 60static struct task_struct *current_set[NR_CPUS];
60 61
62static u8 smp_cpu_type;
63static int smp_use_sigp_detection;
64
65enum s390_cpu_state {
66 CPU_STATE_STANDBY,
67 CPU_STATE_CONFIGURED,
68};
69
70#ifdef CONFIG_HOTPLUG_CPU
71static DEFINE_MUTEX(smp_cpu_state_mutex);
72#endif
73static int smp_cpu_state[NR_CPUS];
74
75static DEFINE_PER_CPU(struct cpu, cpu_devices);
76DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
77
61static void smp_ext_bitcall(int, ec_bit_sig); 78static void smp_ext_bitcall(int, ec_bit_sig);
62 79
63/* 80/*
@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
355} 372}
356EXPORT_SYMBOL(smp_ctl_clear_bit); 373EXPORT_SYMBOL(smp_ctl_clear_bit);
357 374
375/*
376 * In early ipl state a temp. logically cpu number is needed, so the sigp
377 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
378 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
379 */
380#define CPU_INIT_NO 1
381
358#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 382#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
359 383
360/* 384/*
@@ -376,8 +400,9 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
376 return; 400 return;
377 } 401 }
378 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 402 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area));
379 __cpu_logical_map[1] = (__u16) phy_cpu; 403 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
380 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) 404 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
405 sigp_busy)
381 cpu_relax(); 406 cpu_relax();
382 memcpy(zfcpdump_save_areas[cpu], 407 memcpy(zfcpdump_save_areas[cpu],
383 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 408 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
@@ -397,32 +422,166 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
397 422
398#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 423#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
399 424
425static int cpu_stopped(int cpu)
426{
427 __u32 status;
428
429 /* Check for stopped state */
430 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
431 sigp_status_stored) {
432 if (status & 0x40)
433 return 1;
434 }
435 return 0;
436}
437
400/* 438/*
401 * Lets check how many CPUs we have. 439 * Lets check how many CPUs we have.
402 */ 440 */
403static unsigned int __init smp_count_cpus(void) 441static void __init smp_count_cpus(unsigned int *configured_cpus,
442 unsigned int *standby_cpus)
404{ 443{
405 unsigned int cpu, num_cpus; 444 unsigned int cpu;
406 __u16 boot_cpu_addr; 445 struct sclp_cpu_info *info;
446 u16 boot_cpu_addr, cpu_addr;
407 447
408 /*
409 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
410 */
411 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 448 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
412 current_thread_info()->cpu = 0; 449 current_thread_info()->cpu = 0;
413 num_cpus = 1; 450 *configured_cpus = 1;
414 for (cpu = 0; cpu <= 65535; cpu++) { 451 *standby_cpus = 0;
415 if ((__u16) cpu == boot_cpu_addr) 452
453 info = alloc_bootmem_pages(sizeof(*info));
454 if (!info)
455 disabled_wait((unsigned long) __builtin_return_address(0));
456
457 /* Use sigp detection algorithm if sclp doesn't work. */
458 if (sclp_get_cpu_info(info)) {
459 smp_use_sigp_detection = 1;
460 for (cpu = 0; cpu <= 65535; cpu++) {
461 if (cpu == boot_cpu_addr)
462 continue;
463 __cpu_logical_map[CPU_INIT_NO] = cpu;
464 if (cpu_stopped(CPU_INIT_NO))
465 (*configured_cpus)++;
466 }
467 goto out;
468 }
469
470 if (info->has_cpu_type) {
471 for (cpu = 0; cpu < info->combined; cpu++) {
472 if (info->cpu[cpu].address == boot_cpu_addr) {
473 smp_cpu_type = info->cpu[cpu].type;
474 break;
475 }
476 }
477 }
478 /* Count cpus. */
479 for (cpu = 0; cpu < info->combined; cpu++) {
480 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
481 continue;
482 cpu_addr = info->cpu[cpu].address;
483 if (cpu_addr == boot_cpu_addr)
416 continue; 484 continue;
417 __cpu_logical_map[1] = (__u16) cpu; 485 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
418 if (signal_processor(1, sigp_sense) == sigp_not_operational) 486 if (!cpu_stopped(CPU_INIT_NO)) {
487 (*standby_cpus)++;
419 continue; 488 continue;
420 smp_get_save_area(num_cpus, cpu); 489 }
421 num_cpus++; 490 smp_get_save_area(*configured_cpus, cpu_addr);
491 (*configured_cpus)++;
422 } 492 }
423 printk("Detected %d CPU's\n", (int) num_cpus); 493out:
424 printk("Boot cpu address %2X\n", boot_cpu_addr); 494 printk(KERN_INFO "CPUs: %d configured, %d standby\n",
425 return num_cpus; 495 *configured_cpus, *standby_cpus);
496 free_bootmem((unsigned long) info, sizeof(*info));
497}
498
499static int cpu_known(int cpu_id)
500{
501 int cpu;
502
503 for_each_present_cpu(cpu) {
504 if (__cpu_logical_map[cpu] == cpu_id)
505 return 1;
506 }
507 return 0;
508}
509
510static int smp_rescan_cpus_sigp(cpumask_t avail)
511{
512 int cpu_id, logical_cpu;
513
514 logical_cpu = first_cpu(avail);
515 if (logical_cpu == NR_CPUS)
516 return 0;
517 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
518 if (cpu_known(cpu_id))
519 continue;
520 __cpu_logical_map[logical_cpu] = cpu_id;
521 if (!cpu_stopped(logical_cpu))
522 continue;
523 cpu_set(logical_cpu, cpu_present_map);
524 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
525 logical_cpu = next_cpu(logical_cpu, avail);
526 if (logical_cpu == NR_CPUS)
527 break;
528 }
529 return 0;
530}
531
532static int __init_refok smp_rescan_cpus_sclp(cpumask_t avail)
533{
534 struct sclp_cpu_info *info;
535 int cpu_id, logical_cpu, cpu;
536 int rc;
537
538 logical_cpu = first_cpu(avail);
539 if (logical_cpu == NR_CPUS)
540 return 0;
541 if (slab_is_available())
542 info = kmalloc(sizeof(*info), GFP_KERNEL);
543 else
544 info = alloc_bootmem(sizeof(*info));
545 if (!info)
546 return -ENOMEM;
547 rc = sclp_get_cpu_info(info);
548 if (rc)
549 goto out;
550 for (cpu = 0; cpu < info->combined; cpu++) {
551 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
552 continue;
553 cpu_id = info->cpu[cpu].address;
554 if (cpu_known(cpu_id))
555 continue;
556 __cpu_logical_map[logical_cpu] = cpu_id;
557 cpu_set(logical_cpu, cpu_present_map);
558 if (cpu >= info->configured)
559 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
560 else
561 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
562 logical_cpu = next_cpu(logical_cpu, avail);
563 if (logical_cpu == NR_CPUS)
564 break;
565 }
566out:
567 if (slab_is_available())
568 kfree(info);
569 else
570 free_bootmem((unsigned long) info, sizeof(*info));
571 return rc;
572}
573
574static int smp_rescan_cpus(void)
575{
576 cpumask_t avail;
577
578 cpus_setall(avail);
579 cpus_and(avail, avail, cpu_possible_map);
580 cpus_andnot(avail, avail, cpu_present_map);
581 if (smp_use_sigp_detection)
582 return smp_rescan_cpus_sigp(avail);
583 else
584 return smp_rescan_cpus_sclp(avail);
426} 585}
427 586
428/* 587/*
@@ -453,8 +612,6 @@ int __cpuinit start_secondary(void *cpuvoid)
453 return 0; 612 return 0;
454} 613}
455 614
456DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
457
458static void __init smp_create_idle(unsigned int cpu) 615static void __init smp_create_idle(unsigned int cpu)
459{ 616{
460 struct task_struct *p; 617 struct task_struct *p;
@@ -470,37 +627,16 @@ static void __init smp_create_idle(unsigned int cpu)
470 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); 627 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
471} 628}
472 629
473static int cpu_stopped(int cpu)
474{
475 __u32 status;
476
477 /* Check for stopped state */
478 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
479 sigp_status_stored) {
480 if (status & 0x40)
481 return 1;
482 }
483 return 0;
484}
485
486/* Upping and downing of CPUs */ 630/* Upping and downing of CPUs */
487
488int __cpu_up(unsigned int cpu) 631int __cpu_up(unsigned int cpu)
489{ 632{
490 struct task_struct *idle; 633 struct task_struct *idle;
491 struct _lowcore *cpu_lowcore; 634 struct _lowcore *cpu_lowcore;
492 struct stack_frame *sf; 635 struct stack_frame *sf;
493 sigp_ccode ccode; 636 sigp_ccode ccode;
494 int curr_cpu;
495
496 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
497 __cpu_logical_map[cpu] = (__u16) curr_cpu;
498 if (cpu_stopped(cpu))
499 break;
500 }
501 637
502 if (!cpu_stopped(cpu)) 638 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
503 return -ENODEV; 639 return -EIO;
504 640
505 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 641 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
506 cpu, sigp_set_prefix); 642 cpu, sigp_set_prefix);
@@ -543,21 +679,18 @@ static unsigned int __initdata possible_cpus;
543 679
544void __init smp_setup_cpu_possible_map(void) 680void __init smp_setup_cpu_possible_map(void)
545{ 681{
546 unsigned int phy_cpus, pos_cpus, cpu; 682 unsigned int pos_cpus, cpu;
547 683 unsigned int configured_cpus, standby_cpus;
548 phy_cpus = smp_count_cpus();
549 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
550 684
685 smp_count_cpus(&configured_cpus, &standby_cpus);
686 pos_cpus = min(configured_cpus + standby_cpus + additional_cpus,
687 (unsigned int) NR_CPUS);
551 if (possible_cpus) 688 if (possible_cpus)
552 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 689 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
553
554 for (cpu = 0; cpu < pos_cpus; cpu++) 690 for (cpu = 0; cpu < pos_cpus; cpu++)
555 cpu_set(cpu, cpu_possible_map); 691 cpu_set(cpu, cpu_possible_map);
556 692 cpu_present_map = cpumask_of_cpu(0);
557 phy_cpus = min(phy_cpus, pos_cpus); 693 smp_rescan_cpus();
558
559 for (cpu = 0; cpu < phy_cpus; cpu++)
560 cpu_set(cpu, cpu_present_map);
561} 694}
562 695
563#ifdef CONFIG_HOTPLUG_CPU 696#ifdef CONFIG_HOTPLUG_CPU
@@ -612,7 +745,7 @@ void __cpu_die(unsigned int cpu)
612 /* Wait until target cpu is down */ 745 /* Wait until target cpu is down */
613 while (!smp_cpu_not_running(cpu)) 746 while (!smp_cpu_not_running(cpu))
614 cpu_relax(); 747 cpu_relax();
615 printk("Processor %d spun down\n", cpu); 748 printk(KERN_INFO "Processor %d spun down\n", cpu);
616} 749}
617 750
618void cpu_die(void) 751void cpu_die(void)
@@ -686,12 +819,12 @@ void __init smp_prepare_boot_cpu(void)
686 cpu_set(0, cpu_online_map); 819 cpu_set(0, cpu_online_map);
687 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 820 S390_lowcore.percpu_offset = __per_cpu_offset[0];
688 current_set[0] = current; 821 current_set[0] = current;
822 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
689 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 823 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
690} 824}
691 825
692void __init smp_cpus_done(unsigned int max_cpus) 826void __init smp_cpus_done(unsigned int max_cpus)
693{ 827{
694 cpu_present_map = cpu_possible_map;
695} 828}
696 829
697/* 830/*
@@ -705,7 +838,79 @@ int setup_profiling_timer(unsigned int multiplier)
705 return 0; 838 return 0;
706} 839}
707 840
708static DEFINE_PER_CPU(struct cpu, cpu_devices); 841#ifdef CONFIG_HOTPLUG_CPU
842static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
843{
844 ssize_t count;
845
846 mutex_lock(&smp_cpu_state_mutex);
847 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
848 mutex_unlock(&smp_cpu_state_mutex);
849 return count;
850}
851
852static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
853 size_t count)
854{
855 int cpu = dev->id;
856 int val, rc;
857 char delim;
858
859 if (sscanf(buf, "%d %c", &val, &delim) != 1)
860 return -EINVAL;
861 if (val != 0 && val != 1)
862 return -EINVAL;
863
864 mutex_lock(&smp_cpu_state_mutex);
865 lock_cpu_hotplug();
866 rc = -EBUSY;
867 if (cpu_online(cpu))
868 goto out;
869 rc = 0;
870 switch (val) {
871 case 0:
872 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
873 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
874 if (!rc)
875 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
876 }
877 break;
878 case 1:
879 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
880 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
881 if (!rc)
882 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
883 }
884 break;
885 default:
886 break;
887 }
888out:
889 unlock_cpu_hotplug();
890 mutex_unlock(&smp_cpu_state_mutex);
891 return rc ? rc : count;
892}
893static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
894#endif /* CONFIG_HOTPLUG_CPU */
895
896static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
897{
898 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
899}
900static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
901
902
903static struct attribute *cpu_common_attrs[] = {
904#ifdef CONFIG_HOTPLUG_CPU
905 &attr_configure.attr,
906#endif
907 &attr_address.attr,
908 NULL,
909};
910
911static struct attribute_group cpu_common_attr_group = {
912 .attrs = cpu_common_attrs,
913};
709 914
710static ssize_t show_capability(struct sys_device *dev, char *buf) 915static ssize_t show_capability(struct sys_device *dev, char *buf)
711{ 916{
@@ -750,15 +955,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
750} 955}
751static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); 956static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
752 957
753static struct attribute *cpu_attrs[] = { 958static struct attribute *cpu_online_attrs[] = {
754 &attr_capability.attr, 959 &attr_capability.attr,
755 &attr_idle_count.attr, 960 &attr_idle_count.attr,
756 &attr_idle_time_us.attr, 961 &attr_idle_time_us.attr,
757 NULL, 962 NULL,
758}; 963};
759 964
760static struct attribute_group cpu_attr_group = { 965static struct attribute_group cpu_online_attr_group = {
761 .attrs = cpu_attrs, 966 .attrs = cpu_online_attrs,
762}; 967};
763 968
764static int __cpuinit smp_cpu_notify(struct notifier_block *self, 969static int __cpuinit smp_cpu_notify(struct notifier_block *self,
@@ -778,12 +983,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
778 idle->idle_time = 0; 983 idle->idle_time = 0;
779 idle->idle_count = 0; 984 idle->idle_count = 0;
780 spin_unlock_irq(&idle->lock); 985 spin_unlock_irq(&idle->lock);
781 if (sysfs_create_group(&s->kobj, &cpu_attr_group)) 986 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
782 return NOTIFY_BAD; 987 return NOTIFY_BAD;
783 break; 988 break;
784 case CPU_DEAD: 989 case CPU_DEAD:
785 case CPU_DEAD_FROZEN: 990 case CPU_DEAD_FROZEN:
786 sysfs_remove_group(&s->kobj, &cpu_attr_group); 991 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
787 break; 992 break;
788 } 993 }
789 return NOTIFY_OK; 994 return NOTIFY_OK;
@@ -793,6 +998,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
793 .notifier_call = smp_cpu_notify, 998 .notifier_call = smp_cpu_notify,
794}; 999};
795 1000
1001static int smp_add_present_cpu(int cpu)
1002{
1003 struct cpu *c = &per_cpu(cpu_devices, cpu);
1004 struct sys_device *s = &c->sysdev;
1005 int rc;
1006
1007 c->hotpluggable = 1;
1008 rc = register_cpu(c, cpu);
1009 if (rc)
1010 goto out;
1011 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1012 if (rc)
1013 goto out_cpu;
1014 if (!cpu_online(cpu))
1015 goto out;
1016 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1017 if (!rc)
1018 return 0;
1019 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1020out_cpu:
1021#ifdef CONFIG_HOTPLUG_CPU
1022 unregister_cpu(c);
1023#endif
1024out:
1025 return rc;
1026}
1027
1028#ifdef CONFIG_HOTPLUG_CPU
1029static ssize_t rescan_store(struct sys_device *dev, const char *buf,
1030 size_t count)
1031{
1032 cpumask_t newcpus;
1033 int cpu;
1034 int rc;
1035
1036 mutex_lock(&smp_cpu_state_mutex);
1037 lock_cpu_hotplug();
1038 newcpus = cpu_present_map;
1039 rc = smp_rescan_cpus();
1040 if (rc)
1041 goto out;
1042 cpus_andnot(newcpus, cpu_present_map, newcpus);
1043 for_each_cpu_mask(cpu, newcpus) {
1044 rc = smp_add_present_cpu(cpu);
1045 if (rc)
1046 cpu_clear(cpu, cpu_present_map);
1047 }
1048 rc = 0;
1049out:
1050 unlock_cpu_hotplug();
1051 mutex_unlock(&smp_cpu_state_mutex);
1052 return rc ? rc : count;
1053}
1054static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1055#endif /* CONFIG_HOTPLUG_CPU */
1056
796static int __init topology_init(void) 1057static int __init topology_init(void)
797{ 1058{
798 int cpu; 1059 int cpu;
@@ -800,16 +1061,14 @@ static int __init topology_init(void)
800 1061
801 register_cpu_notifier(&smp_cpu_nb); 1062 register_cpu_notifier(&smp_cpu_nb);
802 1063
803 for_each_possible_cpu(cpu) { 1064#ifdef CONFIG_HOTPLUG_CPU
804 struct cpu *c = &per_cpu(cpu_devices, cpu); 1065 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
805 struct sys_device *s = &c->sysdev; 1066 &attr_rescan.attr);
806 1067 if (rc)
807 c->hotpluggable = 1; 1068 return rc;
808 register_cpu(c, cpu); 1069#endif
809 if (!cpu_online(cpu)) 1070 for_each_present_cpu(cpu) {
810 continue; 1071 rc = smp_add_present_cpu(cpu);
811 s = &c->sysdev;
812 rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
813 if (rc) 1072 if (rc)
814 return rc; 1073 return rc;
815 } 1074 }