aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kernel/ipl.c24
-rw-r--r--arch/s390/kernel/machine_kexec.c65
-rw-r--r--arch/s390/kernel/smp.c104
3 files changed, 49 insertions, 144 deletions
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 101b003cfabf..a36bea1188d9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -576,23 +576,6 @@ static struct subsys_attribute dump_type_attr =
576 576
577static decl_subsys(dump, NULL, NULL); 577static decl_subsys(dump, NULL, NULL);
578 578
579#ifdef CONFIG_SMP
580static void dump_smp_stop_all(void)
581{
582 int cpu;
583 preempt_disable();
584 for_each_online_cpu(cpu) {
585 if (cpu == smp_processor_id())
586 continue;
587 while (signal_processor(cpu, sigp_stop) == sigp_busy)
588 udelay(10);
589 }
590 preempt_enable();
591}
592#else
593#define dump_smp_stop_all() do { } while (0)
594#endif
595
596/* 579/*
597 * Shutdown actions section 580 * Shutdown actions section
598 */ 581 */
@@ -724,13 +707,13 @@ static void do_dump(void)
724 707
725 switch (dump_method) { 708 switch (dump_method) {
726 case IPL_METHOD_CCW_CIO: 709 case IPL_METHOD_CCW_CIO:
727 dump_smp_stop_all(); 710 smp_send_stop();
728 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 711 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
729 devid.ssid = 0; 712 devid.ssid = 0;
730 reipl_ccw_dev(&devid); 713 reipl_ccw_dev(&devid);
731 break; 714 break;
732 case IPL_METHOD_CCW_VM: 715 case IPL_METHOD_CCW_VM:
733 dump_smp_stop_all(); 716 smp_send_stop();
734 sprintf(buf, "STORE STATUS"); 717 sprintf(buf, "STORE STATUS");
735 __cpcmd(buf, NULL, 0, NULL); 718 __cpcmd(buf, NULL, 0, NULL);
736 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); 719 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
@@ -1059,9 +1042,6 @@ void s390_reset_system(void)
1059{ 1042{
1060 struct _lowcore *lc; 1043 struct _lowcore *lc;
1061 1044
1062 /* Disable all interrupts/machine checks */
1063 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
1064
1065 /* Stack for interrupt/machine check handler */ 1045 /* Stack for interrupt/machine check handler */
1066 lc = (struct _lowcore *)(unsigned long) store_prefix(); 1046 lc = (struct _lowcore *)(unsigned long) store_prefix();
1067 lc->panic_stack = S390_lowcore.panic_stack; 1047 lc->panic_stack = S390_lowcore.panic_stack;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 202bf1fdfe39..def5caf8f72f 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -1,15 +1,10 @@
1/* 1/*
2 * arch/s390/kernel/machine_kexec.c 2 * arch/s390/kernel/machine_kexec.c
3 * 3 *
4 * (C) Copyright IBM Corp. 2005 4 * Copyright IBM Corp. 2005,2006
5 * 5 *
6 * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com> 6 * Author(s): Rolf Adelsberger,
7 * 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 */
9
10/*
11 * s390_machine_kexec.c - handle the transition of Linux booting another kernel
12 * on the S390 architecture.
13 */ 8 */
14 9
15#include <linux/device.h> 10#include <linux/device.h>
@@ -24,81 +19,53 @@
24#include <asm/smp.h> 19#include <asm/smp.h>
25#include <asm/reset.h> 20#include <asm/reset.h>
26 21
27static void kexec_halt_all_cpus(void *); 22typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
28
29typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long);
30 23
31extern const unsigned char relocate_kernel[]; 24extern const unsigned char relocate_kernel[];
32extern const unsigned long long relocate_kernel_len; 25extern const unsigned long long relocate_kernel_len;
33 26
34int 27int machine_kexec_prepare(struct kimage *image)
35machine_kexec_prepare(struct kimage *image)
36{ 28{
37 unsigned long reboot_code_buffer; 29 void *reboot_code_buffer;
38 30
39 /* We don't support anything but the default image type for now. */ 31 /* We don't support anything but the default image type for now. */
40 if (image->type != KEXEC_TYPE_DEFAULT) 32 if (image->type != KEXEC_TYPE_DEFAULT)
41 return -EINVAL; 33 return -EINVAL;
42 34
43 /* Get the destination where the assembler code should be copied to.*/ 35 /* Get the destination where the assembler code should be copied to.*/
44 reboot_code_buffer = page_to_pfn(image->control_code_page)<<PAGE_SHIFT; 36 reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
45 37
46 /* Then copy it */ 38 /* Then copy it */
47 memcpy((void *) reboot_code_buffer, relocate_kernel, 39 memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
48 relocate_kernel_len);
49 return 0; 40 return 0;
50} 41}
51 42
52void 43void machine_kexec_cleanup(struct kimage *image)
53machine_kexec_cleanup(struct kimage *image)
54{ 44{
55} 45}
56 46
57void 47void machine_shutdown(void)
58machine_shutdown(void)
59{ 48{
60 printk(KERN_INFO "kexec: machine_shutdown called\n"); 49 printk(KERN_INFO "kexec: machine_shutdown called\n");
61} 50}
62 51
63NORET_TYPE void
64machine_kexec(struct kimage *image)
65{
66 on_each_cpu(kexec_halt_all_cpus, image, 0, 0);
67 for (;;);
68}
69
70extern void pfault_fini(void); 52extern void pfault_fini(void);
71 53
72static void 54void machine_kexec(struct kimage *image)
73kexec_halt_all_cpus(void *kernel_image)
74{ 55{
75 static atomic_t cpuid = ATOMIC_INIT(-1);
76 int cpu;
77 struct kimage *image;
78 relocate_kernel_t data_mover; 56 relocate_kernel_t data_mover;
79 57
58 preempt_disable();
80#ifdef CONFIG_PFAULT 59#ifdef CONFIG_PFAULT
81 if (MACHINE_IS_VM) 60 if (MACHINE_IS_VM)
82 pfault_fini(); 61 pfault_fini();
83#endif 62#endif
84 63 smp_send_stop();
85 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
86 signal_processor(smp_processor_id(), sigp_stop);
87
88 /* Wait for all other cpus to enter stopped state */
89 for_each_online_cpu(cpu) {
90 if (cpu == smp_processor_id())
91 continue;
92 while (!smp_cpu_not_running(cpu))
93 cpu_relax();
94 }
95
96 s390_reset_system(); 64 s390_reset_system();
97 65
98 image = (struct kimage *) kernel_image; 66 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
99 data_mover = (relocate_kernel_t)
100 (page_to_pfn(image->control_code_page) << PAGE_SHIFT);
101 67
102 /* Call the moving routine */ 68 /* Call the moving routine */
103 (*data_mover) (&image->head, image->start); 69 (*data_mover)(&image->head, image->start);
70 for (;;);
104} 71}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 62822245f9be..b549a43ed08f 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -230,18 +230,37 @@ static inline void do_store_status(void)
230 } 230 }
231} 231}
232 232
233static inline void do_wait_for_stop(void)
234{
235 int cpu;
236
237 /* Wait for all other cpus to enter stopped state */
238 for_each_online_cpu(cpu) {
239 if (cpu == smp_processor_id())
240 continue;
241 while(!smp_cpu_not_running(cpu))
242 cpu_relax();
243 }
244}
245
233/* 246/*
234 * this function sends a 'stop' sigp to all other CPUs in the system. 247 * this function sends a 'stop' sigp to all other CPUs in the system.
235 * it goes straight through. 248 * it goes straight through.
236 */ 249 */
237void smp_send_stop(void) 250void smp_send_stop(void)
238{ 251{
252 /* Disable all interrupts/machine checks */
253 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
254
239 /* write magic number to zero page (absolute 0) */ 255 /* write magic number to zero page (absolute 0) */
240 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 256 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
241 257
242 /* stop other processors. */ 258 /* stop other processors. */
243 do_send_stop(); 259 do_send_stop();
244 260
261 /* wait until other processors are stopped */
262 do_wait_for_stop();
263
245 /* store status of other processors. */ 264 /* store status of other processors. */
246 do_store_status(); 265 do_store_status();
247} 266}
@@ -250,88 +269,28 @@ void smp_send_stop(void)
250 * Reboot, halt and power_off routines for SMP. 269 * Reboot, halt and power_off routines for SMP.
251 */ 270 */
252 271
253static void do_machine_restart(void * __unused)
254{
255 int cpu;
256 static atomic_t cpuid = ATOMIC_INIT(-1);
257
258 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
259 signal_processor(smp_processor_id(), sigp_stop);
260
261 /* Wait for all other cpus to enter stopped state */
262 for_each_online_cpu(cpu) {
263 if (cpu == smp_processor_id())
264 continue;
265 while(!smp_cpu_not_running(cpu))
266 cpu_relax();
267 }
268
269 /* Store status of other cpus. */
270 do_store_status();
271
272 /*
273 * Finally call reipl. Because we waited for all other
274 * cpus to enter this function we know that they do
275 * not hold any s390irq-locks (the cpus have been
276 * interrupted by an external interrupt and s390irq
277 * locks are always held disabled).
278 */
279 do_reipl();
280}
281
282void machine_restart_smp(char * __unused) 272void machine_restart_smp(char * __unused)
283{ 273{
284 on_each_cpu(do_machine_restart, NULL, 0, 0); 274 smp_send_stop();
285} 275 do_reipl();
286
287static void do_wait_for_stop(void)
288{
289 unsigned long cr[16];
290
291 __ctl_store(cr, 0, 15);
292 cr[0] &= ~0xffff;
293 cr[6] = 0;
294 __ctl_load(cr, 0, 15);
295 for (;;)
296 enabled_wait();
297}
298
299static void do_machine_halt(void * __unused)
300{
301 static atomic_t cpuid = ATOMIC_INIT(-1);
302
303 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
304 smp_send_stop();
305 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
306 cpcmd(vmhalt_cmd, NULL, 0, NULL);
307 signal_processor(smp_processor_id(),
308 sigp_stop_and_store_status);
309 }
310 do_wait_for_stop();
311} 276}
312 277
313void machine_halt_smp(void) 278void machine_halt_smp(void)
314{ 279{
315 on_each_cpu(do_machine_halt, NULL, 0, 0); 280 smp_send_stop();
316} 281 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
317 282 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
318static void do_machine_power_off(void * __unused) 283 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
319{ 284 for (;;);
320 static atomic_t cpuid = ATOMIC_INIT(-1);
321
322 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
323 smp_send_stop();
324 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
325 cpcmd(vmpoff_cmd, NULL, 0, NULL);
326 signal_processor(smp_processor_id(),
327 sigp_stop_and_store_status);
328 }
329 do_wait_for_stop();
330} 285}
331 286
332void machine_power_off_smp(void) 287void machine_power_off_smp(void)
333{ 288{
334 on_each_cpu(do_machine_power_off, NULL, 0, 0); 289 smp_send_stop();
290 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
291 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
292 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
293 for (;;);
335} 294}
336 295
337/* 296/*
@@ -860,4 +819,3 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
860EXPORT_SYMBOL(smp_call_function); 819EXPORT_SYMBOL(smp_call_function);
861EXPORT_SYMBOL(smp_get_cpu); 820EXPORT_SYMBOL(smp_get_cpu);
862EXPORT_SYMBOL(smp_put_cpu); 821EXPORT_SYMBOL(smp_put_cpu);
863