diff options
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 108 |
1 files changed, 62 insertions, 46 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 76a6fdd46c45..8b10127c00ad 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
37 | #include <linux/timex.h> | 37 | #include <linux/timex.h> |
38 | #include <linux/bootmem.h> | 38 | #include <linux/bootmem.h> |
39 | #include <asm/asm-offsets.h> | ||
39 | #include <asm/ipl.h> | 40 | #include <asm/ipl.h> |
40 | #include <asm/setup.h> | 41 | #include <asm/setup.h> |
41 | #include <asm/sigp.h> | 42 | #include <asm/sigp.h> |
@@ -53,7 +54,7 @@ | |||
53 | #include "entry.h" | 54 | #include "entry.h" |
54 | 55 | ||
55 | /* logical cpu to cpu address */ | 56 | /* logical cpu to cpu address */ |
56 | int __cpu_logical_map[NR_CPUS]; | 57 | unsigned short __cpu_logical_map[NR_CPUS]; |
57 | 58 | ||
58 | static struct task_struct *current_set[NR_CPUS]; | 59 | static struct task_struct *current_set[NR_CPUS]; |
59 | 60 | ||
@@ -72,13 +73,13 @@ static int cpu_management; | |||
72 | 73 | ||
73 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 74 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
74 | 75 | ||
75 | static void smp_ext_bitcall(int, ec_bit_sig); | 76 | static void smp_ext_bitcall(int, int); |
76 | 77 | ||
77 | static int cpu_stopped(int cpu) | 78 | static int raw_cpu_stopped(int cpu) |
78 | { | 79 | { |
79 | __u32 status; | 80 | u32 status; |
80 | 81 | ||
81 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | 82 | switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { |
82 | case sigp_status_stored: | 83 | case sigp_status_stored: |
83 | /* Check for stopped and check stop state */ | 84 | /* Check for stopped and check stop state */ |
84 | if (status & 0x50) | 85 | if (status & 0x50) |
@@ -90,6 +91,44 @@ static int cpu_stopped(int cpu) | |||
90 | return 0; | 91 | return 0; |
91 | } | 92 | } |
92 | 93 | ||
94 | static inline int cpu_stopped(int cpu) | ||
95 | { | ||
96 | return raw_cpu_stopped(cpu_logical_map(cpu)); | ||
97 | } | ||
98 | |||
99 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | ||
100 | { | ||
101 | struct _lowcore *lc, *current_lc; | ||
102 | struct stack_frame *sf; | ||
103 | struct pt_regs *regs; | ||
104 | unsigned long sp; | ||
105 | |||
106 | if (smp_processor_id() == 0) | ||
107 | func(data); | ||
108 | __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); | ||
109 | /* Disable lowcore protection */ | ||
110 | __ctl_clear_bit(0, 28); | ||
111 | current_lc = lowcore_ptr[smp_processor_id()]; | ||
112 | lc = lowcore_ptr[0]; | ||
113 | if (!lc) | ||
114 | lc = current_lc; | ||
115 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | ||
116 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; | ||
117 | if (!cpu_online(0)) | ||
118 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); | ||
119 | while (sigp(0, sigp_stop_and_store_status) == sigp_busy) | ||
120 | cpu_relax(); | ||
121 | sp = lc->panic_stack; | ||
122 | sp -= sizeof(struct pt_regs); | ||
123 | regs = (struct pt_regs *) sp; | ||
124 | memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); | ||
125 | regs->psw = lc->psw_save_area; | ||
126 | sp -= STACK_FRAME_OVERHEAD; | ||
127 | sf = (struct stack_frame *) sp; | ||
128 | sf->back_chain = regs->gprs[15]; | ||
129 | smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); | ||
130 | } | ||
131 | |||
93 | void smp_send_stop(void) | 132 | void smp_send_stop(void) |
94 | { | 133 | { |
95 | int cpu, rc; | 134 | int cpu, rc; |
@@ -103,7 +142,7 @@ void smp_send_stop(void) | |||
103 | if (cpu == smp_processor_id()) | 142 | if (cpu == smp_processor_id()) |
104 | continue; | 143 | continue; |
105 | do { | 144 | do { |
106 | rc = signal_processor(cpu, sigp_stop); | 145 | rc = sigp(cpu, sigp_stop); |
107 | } while (rc == sigp_busy); | 146 | } while (rc == sigp_busy); |
108 | 147 | ||
109 | while (!cpu_stopped(cpu)) | 148 | while (!cpu_stopped(cpu)) |
@@ -139,13 +178,13 @@ static void do_ext_call_interrupt(__u16 code) | |||
139 | * Send an external call sigp to another cpu and return without waiting | 178 | * Send an external call sigp to another cpu and return without waiting |
140 | * for its completion. | 179 | * for its completion. |
141 | */ | 180 | */ |
142 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | 181 | static void smp_ext_bitcall(int cpu, int sig) |
143 | { | 182 | { |
144 | /* | 183 | /* |
145 | * Set signaling bit in lowcore of target cpu and kick it | 184 | * Set signaling bit in lowcore of target cpu and kick it |
146 | */ | 185 | */ |
147 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | 186 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
148 | while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) | 187 | while (sigp(cpu, sigp_emergency_signal) == sigp_busy) |
149 | udelay(10); | 188 | udelay(10); |
150 | } | 189 | } |
151 | 190 | ||
@@ -239,24 +278,8 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
239 | } | 278 | } |
240 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 279 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
241 | 280 | ||
242 | /* | ||
243 | * In early ipl state a temp. logically cpu number is needed, so the sigp | ||
244 | * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on | ||
245 | * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1. | ||
246 | */ | ||
247 | #define CPU_INIT_NO 1 | ||
248 | |||
249 | #ifdef CONFIG_ZFCPDUMP | 281 | #ifdef CONFIG_ZFCPDUMP |
250 | 282 | ||
251 | /* | ||
252 | * zfcpdump_prefix_array holds prefix registers for the following scenario: | ||
253 | * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to | ||
254 | * save its prefix registers, since they get lost, when switching from 31 bit | ||
255 | * to 64 bit. | ||
256 | */ | ||
257 | unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ | ||
258 | __attribute__((__section__(".data"))); | ||
259 | |||
260 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | 283 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) |
261 | { | 284 | { |
262 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 285 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
@@ -266,21 +289,15 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | |||
266 | "the dump\n", cpu, NR_CPUS - 1); | 289 | "the dump\n", cpu, NR_CPUS - 1); |
267 | return; | 290 | return; |
268 | } | 291 | } |
269 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); | 292 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); |
270 | __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; | 293 | while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) |
271 | while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == | ||
272 | sigp_busy) | ||
273 | cpu_relax(); | 294 | cpu_relax(); |
274 | memcpy(zfcpdump_save_areas[cpu], | 295 | memcpy(zfcpdump_save_areas[cpu], |
275 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, | 296 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, |
276 | SAVE_AREA_SIZE); | 297 | sizeof(struct save_area)); |
277 | #ifdef CONFIG_64BIT | ||
278 | /* copy original prefix register */ | ||
279 | zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; | ||
280 | #endif | ||
281 | } | 298 | } |
282 | 299 | ||
283 | union save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 300 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; |
284 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | 301 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); |
285 | 302 | ||
286 | #else | 303 | #else |
@@ -389,8 +406,7 @@ static void __init smp_detect_cpus(void) | |||
389 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { | 406 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { |
390 | if (cpu == boot_cpu_addr) | 407 | if (cpu == boot_cpu_addr) |
391 | continue; | 408 | continue; |
392 | __cpu_logical_map[CPU_INIT_NO] = cpu; | 409 | if (!raw_cpu_stopped(cpu)) |
393 | if (!cpu_stopped(CPU_INIT_NO)) | ||
394 | continue; | 410 | continue; |
395 | smp_get_save_area(c_cpus, cpu); | 411 | smp_get_save_area(c_cpus, cpu); |
396 | c_cpus++; | 412 | c_cpus++; |
@@ -413,8 +429,7 @@ static void __init smp_detect_cpus(void) | |||
413 | cpu_addr = info->cpu[cpu].address; | 429 | cpu_addr = info->cpu[cpu].address; |
414 | if (cpu_addr == boot_cpu_addr) | 430 | if (cpu_addr == boot_cpu_addr) |
415 | continue; | 431 | continue; |
416 | __cpu_logical_map[CPU_INIT_NO] = cpu_addr; | 432 | if (!raw_cpu_stopped(cpu_addr)) { |
417 | if (!cpu_stopped(CPU_INIT_NO)) { | ||
418 | s_cpus++; | 433 | s_cpus++; |
419 | continue; | 434 | continue; |
420 | } | 435 | } |
@@ -533,18 +548,18 @@ static void smp_free_lowcore(int cpu) | |||
533 | /* Upping and downing of CPUs */ | 548 | /* Upping and downing of CPUs */ |
534 | int __cpuinit __cpu_up(unsigned int cpu) | 549 | int __cpuinit __cpu_up(unsigned int cpu) |
535 | { | 550 | { |
536 | struct task_struct *idle; | ||
537 | struct _lowcore *cpu_lowcore; | 551 | struct _lowcore *cpu_lowcore; |
552 | struct task_struct *idle; | ||
538 | struct stack_frame *sf; | 553 | struct stack_frame *sf; |
539 | sigp_ccode ccode; | ||
540 | u32 lowcore; | 554 | u32 lowcore; |
555 | int ccode; | ||
541 | 556 | ||
542 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 557 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
543 | return -EIO; | 558 | return -EIO; |
544 | if (smp_alloc_lowcore(cpu)) | 559 | if (smp_alloc_lowcore(cpu)) |
545 | return -ENOMEM; | 560 | return -ENOMEM; |
546 | do { | 561 | do { |
547 | ccode = signal_processor(cpu, sigp_initial_cpu_reset); | 562 | ccode = sigp(cpu, sigp_initial_cpu_reset); |
548 | if (ccode == sigp_busy) | 563 | if (ccode == sigp_busy) |
549 | udelay(10); | 564 | udelay(10); |
550 | if (ccode == sigp_not_operational) | 565 | if (ccode == sigp_not_operational) |
@@ -552,7 +567,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
552 | } while (ccode == sigp_busy); | 567 | } while (ccode == sigp_busy); |
553 | 568 | ||
554 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; | 569 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; |
555 | while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) | 570 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) |
556 | udelay(10); | 571 | udelay(10); |
557 | 572 | ||
558 | idle = current_set[cpu]; | 573 | idle = current_set[cpu]; |
@@ -578,7 +593,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
578 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; | 593 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; |
579 | eieio(); | 594 | eieio(); |
580 | 595 | ||
581 | while (signal_processor(cpu, sigp_restart) == sigp_busy) | 596 | while (sigp(cpu, sigp_restart) == sigp_busy) |
582 | udelay(10); | 597 | udelay(10); |
583 | 598 | ||
584 | while (!cpu_online(cpu)) | 599 | while (!cpu_online(cpu)) |
@@ -640,7 +655,7 @@ void __cpu_die(unsigned int cpu) | |||
640 | /* Wait until target cpu is down */ | 655 | /* Wait until target cpu is down */ |
641 | while (!cpu_stopped(cpu)) | 656 | while (!cpu_stopped(cpu)) |
642 | cpu_relax(); | 657 | cpu_relax(); |
643 | while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) | 658 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) |
644 | udelay(10); | 659 | udelay(10); |
645 | smp_free_lowcore(cpu); | 660 | smp_free_lowcore(cpu); |
646 | pr_info("Processor %d stopped\n", cpu); | 661 | pr_info("Processor %d stopped\n", cpu); |
@@ -649,7 +664,7 @@ void __cpu_die(unsigned int cpu) | |||
649 | void cpu_die(void) | 664 | void cpu_die(void) |
650 | { | 665 | { |
651 | idle_task_exit(); | 666 | idle_task_exit(); |
652 | while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) | 667 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) |
653 | cpu_relax(); | 668 | cpu_relax(); |
654 | for (;;); | 669 | for (;;); |
655 | } | 670 | } |
@@ -765,7 +780,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev, | |||
765 | get_online_cpus(); | 780 | get_online_cpus(); |
766 | mutex_lock(&smp_cpu_state_mutex); | 781 | mutex_lock(&smp_cpu_state_mutex); |
767 | rc = -EBUSY; | 782 | rc = -EBUSY; |
768 | if (cpu_online(cpu)) | 783 | /* disallow configuration changes of online cpus and cpu 0 */ |
784 | if (cpu_online(cpu) || cpu == 0) | ||
769 | goto out; | 785 | goto out; |
770 | rc = 0; | 786 | rc = 0; |
771 | switch (val) { | 787 | switch (val) { |