aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r--arch/s390/kernel/smp.c131
1 files changed, 77 insertions, 54 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 76a6fdd46c45..541053ed234e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -36,6 +36,8 @@
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/timex.h> 37#include <linux/timex.h>
38#include <linux/bootmem.h> 38#include <linux/bootmem.h>
39#include <linux/slab.h>
40#include <asm/asm-offsets.h>
39#include <asm/ipl.h> 41#include <asm/ipl.h>
40#include <asm/setup.h> 42#include <asm/setup.h>
41#include <asm/sigp.h> 43#include <asm/sigp.h>
@@ -53,7 +55,7 @@
53#include "entry.h" 55#include "entry.h"
54 56
55/* logical cpu to cpu address */ 57/* logical cpu to cpu address */
56int __cpu_logical_map[NR_CPUS]; 58unsigned short __cpu_logical_map[NR_CPUS];
57 59
58static struct task_struct *current_set[NR_CPUS]; 60static struct task_struct *current_set[NR_CPUS];
59 61
@@ -72,13 +74,13 @@ static int cpu_management;
72 74
73static DEFINE_PER_CPU(struct cpu, cpu_devices); 75static DEFINE_PER_CPU(struct cpu, cpu_devices);
74 76
75static void smp_ext_bitcall(int, ec_bit_sig); 77static void smp_ext_bitcall(int, int);
76 78
77static int cpu_stopped(int cpu) 79static int raw_cpu_stopped(int cpu)
78{ 80{
79 __u32 status; 81 u32 status;
80 82
81 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { 83 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
82 case sigp_status_stored: 84 case sigp_status_stored:
83 /* Check for stopped and check stop state */ 85 /* Check for stopped and check stop state */
84 if (status & 0x50) 86 if (status & 0x50)
@@ -90,6 +92,44 @@ static int cpu_stopped(int cpu)
90 return 0; 92 return 0;
91} 93}
92 94
95static inline int cpu_stopped(int cpu)
96{
97 return raw_cpu_stopped(cpu_logical_map(cpu));
98}
99
100void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
101{
102 struct _lowcore *lc, *current_lc;
103 struct stack_frame *sf;
104 struct pt_regs *regs;
105 unsigned long sp;
106
107 if (smp_processor_id() == 0)
108 func(data);
109 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
110 /* Disable lowcore protection */
111 __ctl_clear_bit(0, 28);
112 current_lc = lowcore_ptr[smp_processor_id()];
113 lc = lowcore_ptr[0];
114 if (!lc)
115 lc = current_lc;
116 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
117 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
118 if (!cpu_online(0))
119 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
120 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
121 cpu_relax();
122 sp = lc->panic_stack;
123 sp -= sizeof(struct pt_regs);
124 regs = (struct pt_regs *) sp;
125 memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
126 regs->psw = lc->psw_save_area;
127 sp -= STACK_FRAME_OVERHEAD;
128 sf = (struct stack_frame *) sp;
129 sf->back_chain = regs->gprs[15];
130 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
131}
132
93void smp_send_stop(void) 133void smp_send_stop(void)
94{ 134{
95 int cpu, rc; 135 int cpu, rc;
@@ -103,7 +143,7 @@ void smp_send_stop(void)
103 if (cpu == smp_processor_id()) 143 if (cpu == smp_processor_id())
104 continue; 144 continue;
105 do { 145 do {
106 rc = signal_processor(cpu, sigp_stop); 146 rc = sigp(cpu, sigp_stop);
107 } while (rc == sigp_busy); 147 } while (rc == sigp_busy);
108 148
109 while (!cpu_stopped(cpu)) 149 while (!cpu_stopped(cpu))
@@ -139,13 +179,13 @@ static void do_ext_call_interrupt(__u16 code)
139 * Send an external call sigp to another cpu and return without waiting 179 * Send an external call sigp to another cpu and return without waiting
140 * for its completion. 180 * for its completion.
141 */ 181 */
142static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 182static void smp_ext_bitcall(int cpu, int sig)
143{ 183{
144 /* 184 /*
145 * Set signaling bit in lowcore of target cpu and kick it 185 * Set signaling bit in lowcore of target cpu and kick it
146 */ 186 */
147 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 187 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
148 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 188 while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
149 udelay(10); 189 udelay(10);
150} 190}
151 191
@@ -239,24 +279,8 @@ void smp_ctl_clear_bit(int cr, int bit)
239} 279}
240EXPORT_SYMBOL(smp_ctl_clear_bit); 280EXPORT_SYMBOL(smp_ctl_clear_bit);
241 281
242/*
243 * In early ipl state a temp. logically cpu number is needed, so the sigp
244 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
245 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
246 */
247#define CPU_INIT_NO 1
248
249#ifdef CONFIG_ZFCPDUMP 282#ifdef CONFIG_ZFCPDUMP
250 283
251/*
252 * zfcpdump_prefix_array holds prefix registers for the following scenario:
253 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
254 * save its prefix registers, since they get lost, when switching from 31 bit
255 * to 64 bit.
256 */
257unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
258 __attribute__((__section__(".data")));
259
260static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 284static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
261{ 285{
262 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 286 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
@@ -266,21 +290,15 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
266 "the dump\n", cpu, NR_CPUS - 1); 290 "the dump\n", cpu, NR_CPUS - 1);
267 return; 291 return;
268 } 292 }
269 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); 293 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
270 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; 294 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
271 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
272 sigp_busy)
273 cpu_relax(); 295 cpu_relax();
274 memcpy(zfcpdump_save_areas[cpu], 296 memcpy_real(zfcpdump_save_areas[cpu],
275 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 297 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
276 SAVE_AREA_SIZE); 298 sizeof(struct save_area));
277#ifdef CONFIG_64BIT
278 /* copy original prefix register */
279 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
280#endif
281} 299}
282 300
283union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 301struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
284EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 302EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
285 303
286#else 304#else
@@ -389,8 +407,7 @@ static void __init smp_detect_cpus(void)
389 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { 407 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
390 if (cpu == boot_cpu_addr) 408 if (cpu == boot_cpu_addr)
391 continue; 409 continue;
392 __cpu_logical_map[CPU_INIT_NO] = cpu; 410 if (!raw_cpu_stopped(cpu))
393 if (!cpu_stopped(CPU_INIT_NO))
394 continue; 411 continue;
395 smp_get_save_area(c_cpus, cpu); 412 smp_get_save_area(c_cpus, cpu);
396 c_cpus++; 413 c_cpus++;
@@ -413,8 +430,7 @@ static void __init smp_detect_cpus(void)
413 cpu_addr = info->cpu[cpu].address; 430 cpu_addr = info->cpu[cpu].address;
414 if (cpu_addr == boot_cpu_addr) 431 if (cpu_addr == boot_cpu_addr)
415 continue; 432 continue;
416 __cpu_logical_map[CPU_INIT_NO] = cpu_addr; 433 if (!raw_cpu_stopped(cpu_addr)) {
417 if (!cpu_stopped(CPU_INIT_NO)) {
418 s_cpus++; 434 s_cpus++;
419 continue; 435 continue;
420 } 436 }
@@ -533,18 +549,18 @@ static void smp_free_lowcore(int cpu)
533/* Upping and downing of CPUs */ 549/* Upping and downing of CPUs */
534int __cpuinit __cpu_up(unsigned int cpu) 550int __cpuinit __cpu_up(unsigned int cpu)
535{ 551{
536 struct task_struct *idle;
537 struct _lowcore *cpu_lowcore; 552 struct _lowcore *cpu_lowcore;
553 struct task_struct *idle;
538 struct stack_frame *sf; 554 struct stack_frame *sf;
539 sigp_ccode ccode;
540 u32 lowcore; 555 u32 lowcore;
556 int ccode;
541 557
542 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 558 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
543 return -EIO; 559 return -EIO;
544 if (smp_alloc_lowcore(cpu)) 560 if (smp_alloc_lowcore(cpu))
545 return -ENOMEM; 561 return -ENOMEM;
546 do { 562 do {
547 ccode = signal_processor(cpu, sigp_initial_cpu_reset); 563 ccode = sigp(cpu, sigp_initial_cpu_reset);
548 if (ccode == sigp_busy) 564 if (ccode == sigp_busy)
549 udelay(10); 565 udelay(10);
550 if (ccode == sigp_not_operational) 566 if (ccode == sigp_not_operational)
@@ -552,7 +568,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
552 } while (ccode == sigp_busy); 568 } while (ccode == sigp_busy);
553 569
554 lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; 570 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
555 while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 571 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
556 udelay(10); 572 udelay(10);
557 573
558 idle = current_set[cpu]; 574 idle = current_set[cpu];
@@ -578,7 +594,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
578 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; 594 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
579 eieio(); 595 eieio();
580 596
581 while (signal_processor(cpu, sigp_restart) == sigp_busy) 597 while (sigp(cpu, sigp_restart) == sigp_busy)
582 udelay(10); 598 udelay(10);
583 599
584 while (!cpu_online(cpu)) 600 while (!cpu_online(cpu))
@@ -640,7 +656,7 @@ void __cpu_die(unsigned int cpu)
640 /* Wait until target cpu is down */ 656 /* Wait until target cpu is down */
641 while (!cpu_stopped(cpu)) 657 while (!cpu_stopped(cpu))
642 cpu_relax(); 658 cpu_relax();
643 while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) 659 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
644 udelay(10); 660 udelay(10);
645 smp_free_lowcore(cpu); 661 smp_free_lowcore(cpu);
646 pr_info("Processor %d stopped\n", cpu); 662 pr_info("Processor %d stopped\n", cpu);
@@ -649,7 +665,7 @@ void __cpu_die(unsigned int cpu)
649void cpu_die(void) 665void cpu_die(void)
650{ 666{
651 idle_task_exit(); 667 idle_task_exit();
652 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) 668 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
653 cpu_relax(); 669 cpu_relax();
654 for (;;); 670 for (;;);
655} 671}
@@ -765,7 +781,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
765 get_online_cpus(); 781 get_online_cpus();
766 mutex_lock(&smp_cpu_state_mutex); 782 mutex_lock(&smp_cpu_state_mutex);
767 rc = -EBUSY; 783 rc = -EBUSY;
768 if (cpu_online(cpu)) 784 /* disallow configuration changes of online cpus and cpu 0 */
785 if (cpu_online(cpu) || cpu == 0)
769 goto out; 786 goto out;
770 rc = 0; 787 rc = 0;
771 switch (val) { 788 switch (val) {
@@ -927,21 +944,21 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
927 struct cpu *c = &per_cpu(cpu_devices, cpu); 944 struct cpu *c = &per_cpu(cpu_devices, cpu);
928 struct sys_device *s = &c->sysdev; 945 struct sys_device *s = &c->sysdev;
929 struct s390_idle_data *idle; 946 struct s390_idle_data *idle;
947 int err = 0;
930 948
931 switch (action) { 949 switch (action) {
932 case CPU_ONLINE: 950 case CPU_ONLINE:
933 case CPU_ONLINE_FROZEN: 951 case CPU_ONLINE_FROZEN:
934 idle = &per_cpu(s390_idle, cpu); 952 idle = &per_cpu(s390_idle, cpu);
935 memset(idle, 0, sizeof(struct s390_idle_data)); 953 memset(idle, 0, sizeof(struct s390_idle_data));
936 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) 954 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
937 return NOTIFY_BAD;
938 break; 955 break;
939 case CPU_DEAD: 956 case CPU_DEAD:
940 case CPU_DEAD_FROZEN: 957 case CPU_DEAD_FROZEN:
941 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 958 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
942 break; 959 break;
943 } 960 }
944 return NOTIFY_OK; 961 return notifier_from_errno(err);
945} 962}
946 963
947static struct notifier_block __cpuinitdata smp_cpu_nb = { 964static struct notifier_block __cpuinitdata smp_cpu_nb = {
@@ -1004,7 +1021,9 @@ out:
1004 return rc; 1021 return rc;
1005} 1022}
1006 1023
1007static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, 1024static ssize_t __ref rescan_store(struct sysdev_class *class,
1025 struct sysdev_class_attribute *attr,
1026 const char *buf,
1008 size_t count) 1027 size_t count)
1009{ 1028{
1010 int rc; 1029 int rc;
@@ -1015,7 +1034,9 @@ static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
1015static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); 1034static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1016#endif /* CONFIG_HOTPLUG_CPU */ 1035#endif /* CONFIG_HOTPLUG_CPU */
1017 1036
1018static ssize_t dispatching_show(struct sysdev_class *class, char *buf) 1037static ssize_t dispatching_show(struct sysdev_class *class,
1038 struct sysdev_class_attribute *attr,
1039 char *buf)
1019{ 1040{
1020 ssize_t count; 1041 ssize_t count;
1021 1042
@@ -1025,7 +1046,9 @@ static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
1025 return count; 1046 return count;
1026} 1047}
1027 1048
1028static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, 1049static ssize_t dispatching_store(struct sysdev_class *dev,
1050 struct sysdev_class_attribute *attr,
1051 const char *buf,
1029 size_t count) 1052 size_t count)
1030{ 1053{
1031 int val, rc; 1054 int val, rc;