aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2008-01-26 08:10:56 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-01-26 08:11:09 -0500
commit08d07968277cd898c88bf12b7720d89c02c4f139 (patch)
tree1c91768976c389883842eb7650141e93b7dbe334
parentc05ffc4f2b208da8ba7d3a9b5ab886c76f8939b5 (diff)
[S390] Standby cpu activation/deactivation.
Add a new interface so that cpus can be put into standby state and configured state. Only offline cpus can be put into standby state or configured state. For that the new percpu sysfs attribute "configure" must be used. To put a cpu in standby state a "0" must be written to the attribute. In order to switch it into configured state a "1" must be written to the attribute. Only cpus in configured state can be brought online. In addition this patch introduces a static mapping of physical to logical cpus. As a result only the sysfs directories of present cpus will be created. To scan for new cpus the new sysfs attribute "rescan" must be used. Writing to /sys/devices/system/cpu/rescan will trigger a rescan of cpus and will create directories for new cpus. On IPL only configured cpus will be used. And on reboot/shutdown all cpus will remain in their current state (configured/standby). Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/kernel/early.c3
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c401
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_cmd.c319
-rw-r--r--drivers/s390/char/sclp_info.c116
-rw-r--r--include/asm-s390/sclp.h21
8 files changed, 675 insertions, 193 deletions
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1b3af7dab816..c7cbb0114147 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -276,8 +276,9 @@ void __init startup_init(void)
276 create_kernel_nss(); 276 create_kernel_nss();
277 sort_main_extable(); 277 sort_main_extable();
278 setup_lowcore_early(); 278 setup_lowcore_early();
279 sclp_readinfo_early(); 279 sclp_read_info_early();
280 sclp_facilities_detect(); 280 sclp_facilities_detect();
281 sclp_read_cpu_info_early();
281 memsize = sclp_memory_detect(); 282 memsize = sclp_memory_detect();
282#ifndef CONFIG_64BIT 283#ifndef CONFIG_64BIT
283 /* 284 /*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 577aa7dd660e..d68a4025486e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -920,7 +920,7 @@ setup_arch(char **cmdline_p)
920 920
921void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) 921void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
922{ 922{
923 printk("cpu %d " 923 printk(KERN_INFO "cpu %d "
924#ifdef CONFIG_SMP 924#ifdef CONFIG_SMP
925 "phys_idx=%d " 925 "phys_idx=%d "
926#endif 926#endif
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 264ea906db4c..66fe28930d82 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
43#include <asm/timer.h> 43#include <asm/timer.h>
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45#include <asm/sclp.h>
45#include <asm/cpu.h> 46#include <asm/cpu.h>
46 47
47/* 48/*
@@ -58,6 +59,22 @@ EXPORT_SYMBOL(cpu_possible_map);
58 59
59static struct task_struct *current_set[NR_CPUS]; 60static struct task_struct *current_set[NR_CPUS];
60 61
62static u8 smp_cpu_type;
63static int smp_use_sigp_detection;
64
65enum s390_cpu_state {
66 CPU_STATE_STANDBY,
67 CPU_STATE_CONFIGURED,
68};
69
70#ifdef CONFIG_HOTPLUG_CPU
71static DEFINE_MUTEX(smp_cpu_state_mutex);
72#endif
73static int smp_cpu_state[NR_CPUS];
74
75static DEFINE_PER_CPU(struct cpu, cpu_devices);
76DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
77
61static void smp_ext_bitcall(int, ec_bit_sig); 78static void smp_ext_bitcall(int, ec_bit_sig);
62 79
63/* 80/*
@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
355} 372}
356EXPORT_SYMBOL(smp_ctl_clear_bit); 373EXPORT_SYMBOL(smp_ctl_clear_bit);
357 374
375/*
376 * In early ipl state a temp. logically cpu number is needed, so the sigp
377 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
378 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
379 */
380#define CPU_INIT_NO 1
381
358#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 382#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
359 383
360/* 384/*
@@ -376,8 +400,9 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
376 return; 400 return;
377 } 401 }
378 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 402 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area));
379 __cpu_logical_map[1] = (__u16) phy_cpu; 403 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
380 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) 404 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
405 sigp_busy)
381 cpu_relax(); 406 cpu_relax();
382 memcpy(zfcpdump_save_areas[cpu], 407 memcpy(zfcpdump_save_areas[cpu],
383 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 408 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
@@ -397,32 +422,166 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
397 422
398#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 423#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
399 424
425static int cpu_stopped(int cpu)
426{
427 __u32 status;
428
429 /* Check for stopped state */
430 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
431 sigp_status_stored) {
432 if (status & 0x40)
433 return 1;
434 }
435 return 0;
436}
437
400/* 438/*
401 * Lets check how many CPUs we have. 439 * Lets check how many CPUs we have.
402 */ 440 */
403static unsigned int __init smp_count_cpus(void) 441static void __init smp_count_cpus(unsigned int *configured_cpus,
442 unsigned int *standby_cpus)
404{ 443{
405 unsigned int cpu, num_cpus; 444 unsigned int cpu;
406 __u16 boot_cpu_addr; 445 struct sclp_cpu_info *info;
446 u16 boot_cpu_addr, cpu_addr;
407 447
408 /*
409 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
410 */
411 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 448 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
412 current_thread_info()->cpu = 0; 449 current_thread_info()->cpu = 0;
413 num_cpus = 1; 450 *configured_cpus = 1;
414 for (cpu = 0; cpu <= 65535; cpu++) { 451 *standby_cpus = 0;
415 if ((__u16) cpu == boot_cpu_addr) 452
453 info = alloc_bootmem_pages(sizeof(*info));
454 if (!info)
455 disabled_wait((unsigned long) __builtin_return_address(0));
456
457 /* Use sigp detection algorithm if sclp doesn't work. */
458 if (sclp_get_cpu_info(info)) {
459 smp_use_sigp_detection = 1;
460 for (cpu = 0; cpu <= 65535; cpu++) {
461 if (cpu == boot_cpu_addr)
462 continue;
463 __cpu_logical_map[CPU_INIT_NO] = cpu;
464 if (cpu_stopped(CPU_INIT_NO))
465 (*configured_cpus)++;
466 }
467 goto out;
468 }
469
470 if (info->has_cpu_type) {
471 for (cpu = 0; cpu < info->combined; cpu++) {
472 if (info->cpu[cpu].address == boot_cpu_addr) {
473 smp_cpu_type = info->cpu[cpu].type;
474 break;
475 }
476 }
477 }
478 /* Count cpus. */
479 for (cpu = 0; cpu < info->combined; cpu++) {
480 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
481 continue;
482 cpu_addr = info->cpu[cpu].address;
483 if (cpu_addr == boot_cpu_addr)
416 continue; 484 continue;
417 __cpu_logical_map[1] = (__u16) cpu; 485 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
418 if (signal_processor(1, sigp_sense) == sigp_not_operational) 486 if (!cpu_stopped(CPU_INIT_NO)) {
487 (*standby_cpus)++;
419 continue; 488 continue;
420 smp_get_save_area(num_cpus, cpu); 489 }
421 num_cpus++; 490 smp_get_save_area(*configured_cpus, cpu_addr);
491 (*configured_cpus)++;
422 } 492 }
423 printk("Detected %d CPU's\n", (int) num_cpus); 493out:
424 printk("Boot cpu address %2X\n", boot_cpu_addr); 494 printk(KERN_INFO "CPUs: %d configured, %d standby\n",
425 return num_cpus; 495 *configured_cpus, *standby_cpus);
496 free_bootmem((unsigned long) info, sizeof(*info));
497}
498
499static int cpu_known(int cpu_id)
500{
501 int cpu;
502
503 for_each_present_cpu(cpu) {
504 if (__cpu_logical_map[cpu] == cpu_id)
505 return 1;
506 }
507 return 0;
508}
509
510static int smp_rescan_cpus_sigp(cpumask_t avail)
511{
512 int cpu_id, logical_cpu;
513
514 logical_cpu = first_cpu(avail);
515 if (logical_cpu == NR_CPUS)
516 return 0;
517 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
518 if (cpu_known(cpu_id))
519 continue;
520 __cpu_logical_map[logical_cpu] = cpu_id;
521 if (!cpu_stopped(logical_cpu))
522 continue;
523 cpu_set(logical_cpu, cpu_present_map);
524 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
525 logical_cpu = next_cpu(logical_cpu, avail);
526 if (logical_cpu == NR_CPUS)
527 break;
528 }
529 return 0;
530}
531
532static int __init_refok smp_rescan_cpus_sclp(cpumask_t avail)
533{
534 struct sclp_cpu_info *info;
535 int cpu_id, logical_cpu, cpu;
536 int rc;
537
538 logical_cpu = first_cpu(avail);
539 if (logical_cpu == NR_CPUS)
540 return 0;
541 if (slab_is_available())
542 info = kmalloc(sizeof(*info), GFP_KERNEL);
543 else
544 info = alloc_bootmem(sizeof(*info));
545 if (!info)
546 return -ENOMEM;
547 rc = sclp_get_cpu_info(info);
548 if (rc)
549 goto out;
550 for (cpu = 0; cpu < info->combined; cpu++) {
551 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
552 continue;
553 cpu_id = info->cpu[cpu].address;
554 if (cpu_known(cpu_id))
555 continue;
556 __cpu_logical_map[logical_cpu] = cpu_id;
557 cpu_set(logical_cpu, cpu_present_map);
558 if (cpu >= info->configured)
559 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
560 else
561 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
562 logical_cpu = next_cpu(logical_cpu, avail);
563 if (logical_cpu == NR_CPUS)
564 break;
565 }
566out:
567 if (slab_is_available())
568 kfree(info);
569 else
570 free_bootmem((unsigned long) info, sizeof(*info));
571 return rc;
572}
573
574static int smp_rescan_cpus(void)
575{
576 cpumask_t avail;
577
578 cpus_setall(avail);
579 cpus_and(avail, avail, cpu_possible_map);
580 cpus_andnot(avail, avail, cpu_present_map);
581 if (smp_use_sigp_detection)
582 return smp_rescan_cpus_sigp(avail);
583 else
584 return smp_rescan_cpus_sclp(avail);
426} 585}
427 586
428/* 587/*
@@ -453,8 +612,6 @@ int __cpuinit start_secondary(void *cpuvoid)
453 return 0; 612 return 0;
454} 613}
455 614
456DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
457
458static void __init smp_create_idle(unsigned int cpu) 615static void __init smp_create_idle(unsigned int cpu)
459{ 616{
460 struct task_struct *p; 617 struct task_struct *p;
@@ -470,37 +627,16 @@ static void __init smp_create_idle(unsigned int cpu)
470 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); 627 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
471} 628}
472 629
473static int cpu_stopped(int cpu)
474{
475 __u32 status;
476
477 /* Check for stopped state */
478 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
479 sigp_status_stored) {
480 if (status & 0x40)
481 return 1;
482 }
483 return 0;
484}
485
486/* Upping and downing of CPUs */ 630/* Upping and downing of CPUs */
487
488int __cpu_up(unsigned int cpu) 631int __cpu_up(unsigned int cpu)
489{ 632{
490 struct task_struct *idle; 633 struct task_struct *idle;
491 struct _lowcore *cpu_lowcore; 634 struct _lowcore *cpu_lowcore;
492 struct stack_frame *sf; 635 struct stack_frame *sf;
493 sigp_ccode ccode; 636 sigp_ccode ccode;
494 int curr_cpu;
495
496 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
497 __cpu_logical_map[cpu] = (__u16) curr_cpu;
498 if (cpu_stopped(cpu))
499 break;
500 }
501 637
502 if (!cpu_stopped(cpu)) 638 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
503 return -ENODEV; 639 return -EIO;
504 640
505 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 641 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
506 cpu, sigp_set_prefix); 642 cpu, sigp_set_prefix);
@@ -543,21 +679,18 @@ static unsigned int __initdata possible_cpus;
543 679
544void __init smp_setup_cpu_possible_map(void) 680void __init smp_setup_cpu_possible_map(void)
545{ 681{
546 unsigned int phy_cpus, pos_cpus, cpu; 682 unsigned int pos_cpus, cpu;
547 683 unsigned int configured_cpus, standby_cpus;
548 phy_cpus = smp_count_cpus();
549 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
550 684
685 smp_count_cpus(&configured_cpus, &standby_cpus);
686 pos_cpus = min(configured_cpus + standby_cpus + additional_cpus,
687 (unsigned int) NR_CPUS);
551 if (possible_cpus) 688 if (possible_cpus)
552 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 689 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
553
554 for (cpu = 0; cpu < pos_cpus; cpu++) 690 for (cpu = 0; cpu < pos_cpus; cpu++)
555 cpu_set(cpu, cpu_possible_map); 691 cpu_set(cpu, cpu_possible_map);
556 692 cpu_present_map = cpumask_of_cpu(0);
557 phy_cpus = min(phy_cpus, pos_cpus); 693 smp_rescan_cpus();
558
559 for (cpu = 0; cpu < phy_cpus; cpu++)
560 cpu_set(cpu, cpu_present_map);
561} 694}
562 695
563#ifdef CONFIG_HOTPLUG_CPU 696#ifdef CONFIG_HOTPLUG_CPU
@@ -612,7 +745,7 @@ void __cpu_die(unsigned int cpu)
612 /* Wait until target cpu is down */ 745 /* Wait until target cpu is down */
613 while (!smp_cpu_not_running(cpu)) 746 while (!smp_cpu_not_running(cpu))
614 cpu_relax(); 747 cpu_relax();
615 printk("Processor %d spun down\n", cpu); 748 printk(KERN_INFO "Processor %d spun down\n", cpu);
616} 749}
617 750
618void cpu_die(void) 751void cpu_die(void)
@@ -686,12 +819,12 @@ void __init smp_prepare_boot_cpu(void)
686 cpu_set(0, cpu_online_map); 819 cpu_set(0, cpu_online_map);
687 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 820 S390_lowcore.percpu_offset = __per_cpu_offset[0];
688 current_set[0] = current; 821 current_set[0] = current;
822 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
689 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 823 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
690} 824}
691 825
692void __init smp_cpus_done(unsigned int max_cpus) 826void __init smp_cpus_done(unsigned int max_cpus)
693{ 827{
694 cpu_present_map = cpu_possible_map;
695} 828}
696 829
697/* 830/*
@@ -705,7 +838,79 @@ int setup_profiling_timer(unsigned int multiplier)
705 return 0; 838 return 0;
706} 839}
707 840
708static DEFINE_PER_CPU(struct cpu, cpu_devices); 841#ifdef CONFIG_HOTPLUG_CPU
842static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
843{
844 ssize_t count;
845
846 mutex_lock(&smp_cpu_state_mutex);
847 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
848 mutex_unlock(&smp_cpu_state_mutex);
849 return count;
850}
851
852static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
853 size_t count)
854{
855 int cpu = dev->id;
856 int val, rc;
857 char delim;
858
859 if (sscanf(buf, "%d %c", &val, &delim) != 1)
860 return -EINVAL;
861 if (val != 0 && val != 1)
862 return -EINVAL;
863
864 mutex_lock(&smp_cpu_state_mutex);
865 lock_cpu_hotplug();
866 rc = -EBUSY;
867 if (cpu_online(cpu))
868 goto out;
869 rc = 0;
870 switch (val) {
871 case 0:
872 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
873 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
874 if (!rc)
875 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
876 }
877 break;
878 case 1:
879 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
880 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
881 if (!rc)
882 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
883 }
884 break;
885 default:
886 break;
887 }
888out:
889 unlock_cpu_hotplug();
890 mutex_unlock(&smp_cpu_state_mutex);
891 return rc ? rc : count;
892}
893static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
894#endif /* CONFIG_HOTPLUG_CPU */
895
896static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
897{
898 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
899}
900static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
901
902
903static struct attribute *cpu_common_attrs[] = {
904#ifdef CONFIG_HOTPLUG_CPU
905 &attr_configure.attr,
906#endif
907 &attr_address.attr,
908 NULL,
909};
910
911static struct attribute_group cpu_common_attr_group = {
912 .attrs = cpu_common_attrs,
913};
709 914
710static ssize_t show_capability(struct sys_device *dev, char *buf) 915static ssize_t show_capability(struct sys_device *dev, char *buf)
711{ 916{
@@ -750,15 +955,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
750} 955}
751static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); 956static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
752 957
753static struct attribute *cpu_attrs[] = { 958static struct attribute *cpu_online_attrs[] = {
754 &attr_capability.attr, 959 &attr_capability.attr,
755 &attr_idle_count.attr, 960 &attr_idle_count.attr,
756 &attr_idle_time_us.attr, 961 &attr_idle_time_us.attr,
757 NULL, 962 NULL,
758}; 963};
759 964
760static struct attribute_group cpu_attr_group = { 965static struct attribute_group cpu_online_attr_group = {
761 .attrs = cpu_attrs, 966 .attrs = cpu_online_attrs,
762}; 967};
763 968
764static int __cpuinit smp_cpu_notify(struct notifier_block *self, 969static int __cpuinit smp_cpu_notify(struct notifier_block *self,
@@ -778,12 +983,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
778 idle->idle_time = 0; 983 idle->idle_time = 0;
779 idle->idle_count = 0; 984 idle->idle_count = 0;
780 spin_unlock_irq(&idle->lock); 985 spin_unlock_irq(&idle->lock);
781 if (sysfs_create_group(&s->kobj, &cpu_attr_group)) 986 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
782 return NOTIFY_BAD; 987 return NOTIFY_BAD;
783 break; 988 break;
784 case CPU_DEAD: 989 case CPU_DEAD:
785 case CPU_DEAD_FROZEN: 990 case CPU_DEAD_FROZEN:
786 sysfs_remove_group(&s->kobj, &cpu_attr_group); 991 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
787 break; 992 break;
788 } 993 }
789 return NOTIFY_OK; 994 return NOTIFY_OK;
@@ -793,6 +998,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
793 .notifier_call = smp_cpu_notify, 998 .notifier_call = smp_cpu_notify,
794}; 999};
795 1000
1001static int smp_add_present_cpu(int cpu)
1002{
1003 struct cpu *c = &per_cpu(cpu_devices, cpu);
1004 struct sys_device *s = &c->sysdev;
1005 int rc;
1006
1007 c->hotpluggable = 1;
1008 rc = register_cpu(c, cpu);
1009 if (rc)
1010 goto out;
1011 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1012 if (rc)
1013 goto out_cpu;
1014 if (!cpu_online(cpu))
1015 goto out;
1016 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1017 if (!rc)
1018 return 0;
1019 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1020out_cpu:
1021#ifdef CONFIG_HOTPLUG_CPU
1022 unregister_cpu(c);
1023#endif
1024out:
1025 return rc;
1026}
1027
1028#ifdef CONFIG_HOTPLUG_CPU
1029static ssize_t rescan_store(struct sys_device *dev, const char *buf,
1030 size_t count)
1031{
1032 cpumask_t newcpus;
1033 int cpu;
1034 int rc;
1035
1036 mutex_lock(&smp_cpu_state_mutex);
1037 lock_cpu_hotplug();
1038 newcpus = cpu_present_map;
1039 rc = smp_rescan_cpus();
1040 if (rc)
1041 goto out;
1042 cpus_andnot(newcpus, cpu_present_map, newcpus);
1043 for_each_cpu_mask(cpu, newcpus) {
1044 rc = smp_add_present_cpu(cpu);
1045 if (rc)
1046 cpu_clear(cpu, cpu_present_map);
1047 }
1048 rc = 0;
1049out:
1050 unlock_cpu_hotplug();
1051 mutex_unlock(&smp_cpu_state_mutex);
1052 return rc ? rc : count;
1053}
1054static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1055#endif /* CONFIG_HOTPLUG_CPU */
1056
796static int __init topology_init(void) 1057static int __init topology_init(void)
797{ 1058{
798 int cpu; 1059 int cpu;
@@ -800,16 +1061,14 @@ static int __init topology_init(void)
800 1061
801 register_cpu_notifier(&smp_cpu_nb); 1062 register_cpu_notifier(&smp_cpu_nb);
802 1063
803 for_each_possible_cpu(cpu) { 1064#ifdef CONFIG_HOTPLUG_CPU
804 struct cpu *c = &per_cpu(cpu_devices, cpu); 1065 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
805 struct sys_device *s = &c->sysdev; 1066 &attr_rescan.attr);
806 1067 if (rc)
807 c->hotpluggable = 1; 1068 return rc;
808 register_cpu(c, cpu); 1069#endif
809 if (!cpu_online(cpu)) 1070 for_each_present_cpu(cpu) {
810 continue; 1071 rc = smp_add_present_cpu(cpu);
811 s = &c->sysdev;
812 rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
813 if (rc) 1072 if (rc)
814 return rc; 1073 return rc;
815 } 1074 }
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index bee3a3af691d..9317333ec149 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o sclp_config.o sclp_chp.o sclp_cpi_sys.o 6 sclp_cmd.o sclp_config.o sclp_chp.o sclp_cpi_sys.o
7 7
8obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index c7318a125852..aa8186d18aee 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -56,8 +56,6 @@ typedef unsigned int sclp_cmdw_t;
56#define SCLP_CMDW_READ_EVENT_DATA 0x00770005 56#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
57#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 57#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
58#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 58#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
59#define SCLP_CMDW_READ_SCP_INFO 0x00020001
60#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
61 59
62#define GDS_ID_MDSMU 0x1310 60#define GDS_ID_MDSMU 0x1310
63#define GDS_ID_MDSROUTEINFO 0x1311 61#define GDS_ID_MDSROUTEINFO 0x1311
@@ -83,6 +81,8 @@ extern u64 sclp_facilities;
83 81
84#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) 82#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
85#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) 83#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
84#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
85#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
86 86
87struct gds_subvector { 87struct gds_subvector {
88 u8 length; 88 u8 length;
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
new file mode 100644
index 000000000000..ba004fd43c05
--- /dev/null
+++ b/drivers/s390/char/sclp_cmd.c
@@ -0,0 +1,319 @@
1/*
2 * drivers/s390/char/sclp_cmd.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/completion.h>
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/slab.h>
12#include <linux/string.h>
13#include <asm/sclp.h>
14#include "sclp.h"
15
16#define TAG "sclp_cmd: "
17
18#define SCLP_CMDW_READ_SCP_INFO 0x00020001
19#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
20
21struct read_info_sccb {
22 struct sccb_header header; /* 0-7 */
23 u16 rnmax; /* 8-9 */
24 u8 rnsize; /* 10 */
25 u8 _reserved0[24 - 11]; /* 11-15 */
26 u8 loadparm[8]; /* 24-31 */
27 u8 _reserved1[48 - 32]; /* 32-47 */
28 u64 facilities; /* 48-55 */
29 u8 _reserved2[84 - 56]; /* 56-83 */
30 u8 fac84; /* 84 */
31 u8 _reserved3[91 - 85]; /* 85-90 */
32 u8 flags; /* 91 */
33 u8 _reserved4[100 - 92]; /* 92-99 */
34 u32 rnsize2; /* 100-103 */
35 u64 rnmax2; /* 104-111 */
36 u8 _reserved5[4096 - 112]; /* 112-4095 */
37} __attribute__((packed, aligned(PAGE_SIZE)));
38
39static struct read_info_sccb __initdata early_read_info_sccb;
40static int __initdata early_read_info_sccb_valid;
41
42u64 sclp_facilities;
43static u8 sclp_fac84;
44
45static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
46{
47 int rc;
48
49 __ctl_set_bit(0, 9);
50 rc = sclp_service_call(cmd, sccb);
51 if (rc)
52 goto out;
53 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
54 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
55 local_irq_disable();
56out:
57 /* Contents of the sccb might have changed. */
58 barrier();
59 __ctl_clear_bit(0, 9);
60 return rc;
61}
62
63void __init sclp_read_info_early(void)
64{
65 int rc;
66 int i;
67 struct read_info_sccb *sccb;
68 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
69 SCLP_CMDW_READ_SCP_INFO};
70
71 sccb = &early_read_info_sccb;
72 for (i = 0; i < ARRAY_SIZE(commands); i++) {
73 do {
74 memset(sccb, 0, sizeof(*sccb));
75 sccb->header.length = sizeof(*sccb);
76 sccb->header.control_mask[2] = 0x80;
77 rc = sclp_cmd_sync_early(commands[i], sccb);
78 } while (rc == -EBUSY);
79
80 if (rc)
81 break;
82 if (sccb->header.response_code == 0x10) {
83 early_read_info_sccb_valid = 1;
84 break;
85 }
86 if (sccb->header.response_code != 0x1f0)
87 break;
88 }
89}
90
91void __init sclp_facilities_detect(void)
92{
93 if (!early_read_info_sccb_valid)
94 return;
95 sclp_facilities = early_read_info_sccb.facilities;
96 sclp_fac84 = early_read_info_sccb.fac84;
97}
98
99unsigned long long __init sclp_memory_detect(void)
100{
101 unsigned long long memsize;
102 struct read_info_sccb *sccb;
103
104 if (!early_read_info_sccb_valid)
105 return 0;
106 sccb = &early_read_info_sccb;
107 if (sccb->rnsize)
108 memsize = sccb->rnsize << 20;
109 else
110 memsize = sccb->rnsize2 << 20;
111 if (sccb->rnmax)
112 memsize *= sccb->rnmax;
113 else
114 memsize *= sccb->rnmax2;
115 return memsize;
116}
117
118/*
119 * This function will be called after sclp_memory_detect(), which gets called
120 * early from early.c code. Therefore the sccb should have valid contents.
121 */
122void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
123{
124 struct read_info_sccb *sccb;
125
126 if (!early_read_info_sccb_valid)
127 return;
128 sccb = &early_read_info_sccb;
129 info->is_valid = 1;
130 if (sccb->flags & 0x2)
131 info->has_dump = 1;
132 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
133}
134
135static void sclp_sync_callback(struct sclp_req *req, void *data)
136{
137 struct completion *completion = data;
138
139 complete(completion);
140}
141
142static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
143{
144 struct completion completion;
145 struct sclp_req *request;
146 int rc;
147
148 request = kzalloc(sizeof(*request), GFP_KERNEL);
149 if (!request)
150 return -ENOMEM;
151 request->command = cmd;
152 request->sccb = sccb;
153 request->status = SCLP_REQ_FILLED;
154 request->callback = sclp_sync_callback;
155 request->callback_data = &completion;
156 init_completion(&completion);
157
158 /* Perform sclp request. */
159 rc = sclp_add_request(request);
160 if (rc)
161 goto out;
162 wait_for_completion(&completion);
163
164 /* Check response. */
165 if (request->status != SCLP_REQ_DONE) {
166 printk(KERN_WARNING TAG "sync request failed "
167 "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status);
168 rc = -EIO;
169 }
170out:
171 kfree(request);
172 return rc;
173}
174
175/*
176 * CPU configuration related functions.
177 */
178
179#define SCLP_CMDW_READ_CPU_INFO 0x00010001
180#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
181#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
182
183struct read_cpu_info_sccb {
184 struct sccb_header header;
185 u16 nr_configured;
186 u16 offset_configured;
187 u16 nr_standby;
188 u16 offset_standby;
189 u8 reserved[4096 - 16];
190} __attribute__((packed, aligned(PAGE_SIZE)));
191
192static struct read_cpu_info_sccb __initdata early_read_cpu_info_sccb;
193static struct sclp_cpu_info __initdata sclp_cpu_info;
194
195static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
196 struct read_cpu_info_sccb *sccb)
197{
198 char *page = (char *) sccb;
199
200 memset(info, 0, sizeof(*info));
201 info->configured = sccb->nr_configured;
202 info->standby = sccb->nr_standby;
203 info->combined = sccb->nr_configured + sccb->nr_standby;
204 info->has_cpu_type = sclp_fac84 & 0x1;
205 memcpy(&info->cpu, page + sccb->offset_configured,
206 info->combined * sizeof(struct sclp_cpu_entry));
207}
208
209void __init sclp_read_cpu_info_early(void)
210{
211 int rc;
212 struct read_cpu_info_sccb *sccb;
213
214 if (!SCLP_HAS_CPU_INFO)
215 return;
216
217 sccb = &early_read_cpu_info_sccb;
218 do {
219 memset(sccb, 0, sizeof(*sccb));
220 sccb->header.length = sizeof(*sccb);
221 rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb);
222 } while (rc == -EBUSY);
223
224 if (rc)
225 return;
226 if (sccb->header.response_code != 0x10)
227 return;
228 sclp_fill_cpu_info(&sclp_cpu_info, sccb);
229}
230
231static int __init sclp_get_cpu_info_early(struct sclp_cpu_info *info)
232{
233 if (!SCLP_HAS_CPU_INFO)
234 return -EOPNOTSUPP;
235 *info = sclp_cpu_info;
236 return 0;
237}
238
239static int sclp_get_cpu_info_late(struct sclp_cpu_info *info)
240{
241 int rc;
242 struct read_cpu_info_sccb *sccb;
243
244 if (!SCLP_HAS_CPU_INFO)
245 return -EOPNOTSUPP;
246 sccb = (struct read_cpu_info_sccb *) __get_free_page(GFP_KERNEL
247 | GFP_DMA);
248 if (!sccb)
249 return -ENOMEM;
250 memset(sccb, 0, sizeof(*sccb));
251 sccb->header.length = sizeof(*sccb);
252 rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
253 if (rc)
254 goto out;
255 if (sccb->header.response_code != 0x0010) {
256 printk(KERN_WARNING TAG "readcpuinfo failed "
257 "(response=0x%04x)\n", sccb->header.response_code);
258 rc = -EIO;
259 goto out;
260 }
261 sclp_fill_cpu_info(info, sccb);
262out:
263 free_page((unsigned long) sccb);
264 return rc;
265}
266
267int __init_refok sclp_get_cpu_info(struct sclp_cpu_info *info)
268{
269 if (slab_is_available())
270 return sclp_get_cpu_info_late(info);
271 return sclp_get_cpu_info_early(info);
272}
273
274struct cpu_configure_sccb {
275 struct sccb_header header;
276} __attribute__((packed, aligned(8)));
277
278static int do_cpu_configure(sclp_cmdw_t cmd)
279{
280 struct cpu_configure_sccb *sccb;
281 int rc;
282
283 if (!SCLP_HAS_CPU_RECONFIG)
284 return -EOPNOTSUPP;
285 /*
286 * This is not going to cross a page boundary since we force
287 * kmalloc to have a minimum alignment of 8 bytes on s390.
288 */
289 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
290 if (!sccb)
291 return -ENOMEM;
292 sccb->header.length = sizeof(*sccb);
293 rc = do_sync_request(cmd, sccb);
294 if (rc)
295 goto out;
296 switch (sccb->header.response_code) {
297 case 0x0020:
298 case 0x0120:
299 break;
300 default:
301 printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, "
302 "response=0x%04x)\n", cmd, sccb->header.response_code);
303 rc = -EIO;
304 break;
305 }
306out:
307 kfree(sccb);
308 return rc;
309}
310
311int sclp_cpu_configure(u8 cpu)
312{
313 return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
314}
315
316int sclp_cpu_deconfigure(u8 cpu)
317{
318 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
319}
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c
deleted file mode 100644
index a1136e052750..000000000000
--- a/drivers/s390/char/sclp_info.c
+++ /dev/null
@@ -1,116 +0,0 @@
1/*
2 * drivers/s390/char/sclp_info.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <asm/sclp.h>
12#include "sclp.h"
13
14struct sclp_readinfo_sccb {
15 struct sccb_header header; /* 0-7 */
16 u16 rnmax; /* 8-9 */
17 u8 rnsize; /* 10 */
18 u8 _reserved0[24 - 11]; /* 11-23 */
19 u8 loadparm[8]; /* 24-31 */
20 u8 _reserved1[48 - 32]; /* 32-47 */
21 u64 facilities; /* 48-55 */
22 u8 _reserved2[91 - 56]; /* 56-90 */
23 u8 flags; /* 91 */
24 u8 _reserved3[100 - 92]; /* 92-99 */
25 u32 rnsize2; /* 100-103 */
26 u64 rnmax2; /* 104-111 */
27 u8 _reserved4[4096 - 112]; /* 112-4095 */
28} __attribute__((packed, aligned(4096)));
29
30static struct sclp_readinfo_sccb __initdata early_readinfo_sccb;
31static int __initdata early_readinfo_sccb_valid;
32
33u64 sclp_facilities;
34
35void __init sclp_readinfo_early(void)
36{
37 int ret;
38 int i;
39 struct sclp_readinfo_sccb *sccb;
40 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
41 SCLP_CMDW_READ_SCP_INFO};
42
43 /* Enable service signal subclass mask. */
44 __ctl_set_bit(0, 9);
45 sccb = &early_readinfo_sccb;
46 for (i = 0; i < ARRAY_SIZE(commands); i++) {
47 do {
48 memset(sccb, 0, sizeof(*sccb));
49 sccb->header.length = sizeof(*sccb);
50 sccb->header.control_mask[2] = 0x80;
51 ret = sclp_service_call(commands[i], sccb);
52 } while (ret == -EBUSY);
53
54 if (ret)
55 break;
56 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
57 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
58 local_irq_disable();
59 /*
60 * Contents of the sccb might have changed
61 * therefore a barrier is needed.
62 */
63 barrier();
64 if (sccb->header.response_code == 0x10) {
65 early_readinfo_sccb_valid = 1;
66 break;
67 }
68 if (sccb->header.response_code != 0x1f0)
69 break;
70 }
71 /* Disable service signal subclass mask again. */
72 __ctl_clear_bit(0, 9);
73}
74
75void __init sclp_facilities_detect(void)
76{
77 if (!early_readinfo_sccb_valid)
78 return;
79 sclp_facilities = early_readinfo_sccb.facilities;
80}
81
82unsigned long long __init sclp_memory_detect(void)
83{
84 unsigned long long memsize;
85 struct sclp_readinfo_sccb *sccb;
86
87 if (!early_readinfo_sccb_valid)
88 return 0;
89 sccb = &early_readinfo_sccb;
90 if (sccb->rnsize)
91 memsize = sccb->rnsize << 20;
92 else
93 memsize = sccb->rnsize2 << 20;
94 if (sccb->rnmax)
95 memsize *= sccb->rnmax;
96 else
97 memsize *= sccb->rnmax2;
98 return memsize;
99}
100
101/*
102 * This function will be called after sclp_memory_detect(), which gets called
103 * early from early.c code. Therefore the sccb should have valid contents.
104 */
105void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
106{
107 struct sclp_readinfo_sccb *sccb;
108
109 if (!early_readinfo_sccb_valid)
110 return;
111 sccb = &early_readinfo_sccb;
112 info->is_valid = 1;
113 if (sccb->flags & 0x2)
114 info->has_dump = 1;
115 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
116}
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
index cb9faf1ea5cf..b8c7695cd4c8 100644
--- a/include/asm-s390/sclp.h
+++ b/include/asm-s390/sclp.h
@@ -27,7 +27,26 @@ struct sclp_ipl_info {
27 char loadparm[LOADPARM_LEN]; 27 char loadparm[LOADPARM_LEN];
28}; 28};
29 29
30void sclp_readinfo_early(void); 30struct sclp_cpu_entry {
31 u8 address;
32 u8 reserved0[13];
33 u8 type;
34 u8 reserved1;
35} __attribute__((packed));
36
37struct sclp_cpu_info {
38 unsigned int configured;
39 unsigned int standby;
40 unsigned int combined;
41 int has_cpu_type;
42 struct sclp_cpu_entry cpu[255];
43};
44
45int sclp_get_cpu_info(struct sclp_cpu_info *info);
46int sclp_cpu_configure(u8 cpu);
47int sclp_cpu_deconfigure(u8 cpu);
48void sclp_read_info_early(void);
49void sclp_read_cpu_info_early(void);
31void sclp_facilities_detect(void); 50void sclp_facilities_detect(void);
32unsigned long long sclp_memory_detect(void); 51unsigned long long sclp_memory_detect(void);
33int sclp_sdias_blk_count(void); 52int sclp_sdias_blk_count(void);