diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/head64.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/ipl.c | 948 | ||||
-rw-r--r-- | arch/s390/kernel/process.c | 18 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 15 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 139 | ||||
-rw-r--r-- | arch/s390/kernel/signal.c | 20 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 575 | ||||
-rw-r--r-- | arch/s390/kernel/traps.c | 20 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 12 |
11 files changed, 1120 insertions, 635 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 56cb71007cd9..b3b650a93c7c 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -31,7 +31,3 @@ S390_KEXEC_OBJS := machine_kexec.o crash.o | |||
31 | S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) | 31 | S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) |
32 | obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) | 32 | obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) |
33 | 33 | ||
34 | # | ||
35 | # This is just to get the dependencies... | ||
36 | # | ||
37 | binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 1b3af7dab816..9f7b73b180f0 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -276,7 +276,7 @@ void __init startup_init(void) | |||
276 | create_kernel_nss(); | 276 | create_kernel_nss(); |
277 | sort_main_extable(); | 277 | sort_main_extable(); |
278 | setup_lowcore_early(); | 278 | setup_lowcore_early(); |
279 | sclp_readinfo_early(); | 279 | sclp_read_info_early(); |
280 | sclp_facilities_detect(); | 280 | sclp_facilities_detect(); |
281 | memsize = sclp_memory_detect(); | 281 | memsize = sclp_memory_detect(); |
282 | #ifndef CONFIG_64BIT | 282 | #ifndef CONFIG_64BIT |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index a87b1976d409..79dccd206a6e 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -157,7 +157,7 @@ startup_continue: | |||
157 | .long 0xb2b10000 # store facility list | 157 | .long 0xb2b10000 # store facility list |
158 | tm 0xc8,0x08 # check bit for clearing-by-ASCE | 158 | tm 0xc8,0x08 # check bit for clearing-by-ASCE |
159 | bno 0f-.LPG1(%r13) | 159 | bno 0f-.LPG1(%r13) |
160 | lhi %r1,2094 | 160 | lhi %r1,2048 |
161 | lhi %r2,0 | 161 | lhi %r2,0 |
162 | .long 0xb98e2001 | 162 | .long 0xb98e2001 |
163 | oi 7(%r12),0x80 # set IDTE flag | 163 | oi 7(%r12),0x80 # set IDTE flag |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index b97694fa62ec..db28cca81fef 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/s390/kernel/ipl.c | 2 | * arch/s390/kernel/ipl.c |
3 | * ipl/reipl/dump support for Linux on s390. | 3 | * ipl/reipl/dump support for Linux on s390. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2005,2006 | 5 | * Copyright IBM Corp. 2005,2007 |
6 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | 6 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> |
7 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 7 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
8 | * Volker Sameske <sameske@de.ibm.com> | 8 | * Volker Sameske <sameske@de.ibm.com> |
@@ -31,6 +31,43 @@ | |||
31 | #define IPL_FCP_DUMP_STR "fcp_dump" | 31 | #define IPL_FCP_DUMP_STR "fcp_dump" |
32 | #define IPL_NSS_STR "nss" | 32 | #define IPL_NSS_STR "nss" |
33 | 33 | ||
34 | #define DUMP_CCW_STR "ccw" | ||
35 | #define DUMP_FCP_STR "fcp" | ||
36 | #define DUMP_NONE_STR "none" | ||
37 | |||
38 | /* | ||
39 | * Four shutdown trigger types are supported: | ||
40 | * - panic | ||
41 | * - halt | ||
42 | * - power off | ||
43 | * - reipl | ||
44 | */ | ||
45 | #define ON_PANIC_STR "on_panic" | ||
46 | #define ON_HALT_STR "on_halt" | ||
47 | #define ON_POFF_STR "on_poff" | ||
48 | #define ON_REIPL_STR "on_reboot" | ||
49 | |||
50 | struct shutdown_action; | ||
51 | struct shutdown_trigger { | ||
52 | char *name; | ||
53 | struct shutdown_action *action; | ||
54 | }; | ||
55 | |||
56 | /* | ||
57 | * Five shutdown action types are supported: | ||
58 | */ | ||
59 | #define SHUTDOWN_ACTION_IPL_STR "ipl" | ||
60 | #define SHUTDOWN_ACTION_REIPL_STR "reipl" | ||
61 | #define SHUTDOWN_ACTION_DUMP_STR "dump" | ||
62 | #define SHUTDOWN_ACTION_VMCMD_STR "vmcmd" | ||
63 | #define SHUTDOWN_ACTION_STOP_STR "stop" | ||
64 | |||
65 | struct shutdown_action { | ||
66 | char *name; | ||
67 | void (*fn) (struct shutdown_trigger *trigger); | ||
68 | int (*init) (void); | ||
69 | }; | ||
70 | |||
34 | static char *ipl_type_str(enum ipl_type type) | 71 | static char *ipl_type_str(enum ipl_type type) |
35 | { | 72 | { |
36 | switch (type) { | 73 | switch (type) { |
@@ -54,10 +91,6 @@ enum dump_type { | |||
54 | DUMP_TYPE_FCP = 4, | 91 | DUMP_TYPE_FCP = 4, |
55 | }; | 92 | }; |
56 | 93 | ||
57 | #define DUMP_NONE_STR "none" | ||
58 | #define DUMP_CCW_STR "ccw" | ||
59 | #define DUMP_FCP_STR "fcp" | ||
60 | |||
61 | static char *dump_type_str(enum dump_type type) | 94 | static char *dump_type_str(enum dump_type type) |
62 | { | 95 | { |
63 | switch (type) { | 96 | switch (type) { |
@@ -99,30 +132,6 @@ enum dump_method { | |||
99 | DUMP_METHOD_FCP_DIAG, | 132 | DUMP_METHOD_FCP_DIAG, |
100 | }; | 133 | }; |
101 | 134 | ||
102 | enum shutdown_action { | ||
103 | SHUTDOWN_REIPL, | ||
104 | SHUTDOWN_DUMP, | ||
105 | SHUTDOWN_STOP, | ||
106 | }; | ||
107 | |||
108 | #define SHUTDOWN_REIPL_STR "reipl" | ||
109 | #define SHUTDOWN_DUMP_STR "dump" | ||
110 | #define SHUTDOWN_STOP_STR "stop" | ||
111 | |||
112 | static char *shutdown_action_str(enum shutdown_action action) | ||
113 | { | ||
114 | switch (action) { | ||
115 | case SHUTDOWN_REIPL: | ||
116 | return SHUTDOWN_REIPL_STR; | ||
117 | case SHUTDOWN_DUMP: | ||
118 | return SHUTDOWN_DUMP_STR; | ||
119 | case SHUTDOWN_STOP: | ||
120 | return SHUTDOWN_STOP_STR; | ||
121 | default: | ||
122 | return NULL; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static int diag308_set_works = 0; | 135 | static int diag308_set_works = 0; |
127 | 136 | ||
128 | static int reipl_capabilities = IPL_TYPE_UNKNOWN; | 137 | static int reipl_capabilities = IPL_TYPE_UNKNOWN; |
@@ -140,8 +149,6 @@ static enum dump_method dump_method = DUMP_METHOD_NONE; | |||
140 | static struct ipl_parameter_block *dump_block_fcp; | 149 | static struct ipl_parameter_block *dump_block_fcp; |
141 | static struct ipl_parameter_block *dump_block_ccw; | 150 | static struct ipl_parameter_block *dump_block_ccw; |
142 | 151 | ||
143 | static enum shutdown_action on_panic_action = SHUTDOWN_STOP; | ||
144 | |||
145 | static struct sclp_ipl_info sclp_ipl_info; | 152 | static struct sclp_ipl_info sclp_ipl_info; |
146 | 153 | ||
147 | int diag308(unsigned long subcode, void *addr) | 154 | int diag308(unsigned long subcode, void *addr) |
@@ -205,8 +212,8 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | |||
205 | struct kobj_attribute *attr, \ | 212 | struct kobj_attribute *attr, \ |
206 | const char *buf, size_t len) \ | 213 | const char *buf, size_t len) \ |
207 | { \ | 214 | { \ |
208 | if (sscanf(buf, _fmt_in, _value) != 1) \ | 215 | strncpy(_value, buf, sizeof(_value) - 1); \ |
209 | return -EINVAL; \ | 216 | strstrip(_value); \ |
210 | return len; \ | 217 | return len; \ |
211 | } \ | 218 | } \ |
212 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | 219 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ |
@@ -245,33 +252,6 @@ static __init enum ipl_type get_ipl_type(void) | |||
245 | return IPL_TYPE_FCP; | 252 | return IPL_TYPE_FCP; |
246 | } | 253 | } |
247 | 254 | ||
248 | void __init setup_ipl_info(void) | ||
249 | { | ||
250 | ipl_info.type = get_ipl_type(); | ||
251 | switch (ipl_info.type) { | ||
252 | case IPL_TYPE_CCW: | ||
253 | ipl_info.data.ccw.dev_id.devno = ipl_devno; | ||
254 | ipl_info.data.ccw.dev_id.ssid = 0; | ||
255 | break; | ||
256 | case IPL_TYPE_FCP: | ||
257 | case IPL_TYPE_FCP_DUMP: | ||
258 | ipl_info.data.fcp.dev_id.devno = | ||
259 | IPL_PARMBLOCK_START->ipl_info.fcp.devno; | ||
260 | ipl_info.data.fcp.dev_id.ssid = 0; | ||
261 | ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; | ||
262 | ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; | ||
263 | break; | ||
264 | case IPL_TYPE_NSS: | ||
265 | strncpy(ipl_info.data.nss.name, kernel_nss_name, | ||
266 | sizeof(ipl_info.data.nss.name)); | ||
267 | break; | ||
268 | case IPL_TYPE_UNKNOWN: | ||
269 | default: | ||
270 | /* We have no info to copy */ | ||
271 | break; | ||
272 | } | ||
273 | } | ||
274 | |||
275 | struct ipl_info ipl_info; | 255 | struct ipl_info ipl_info; |
276 | EXPORT_SYMBOL_GPL(ipl_info); | 256 | EXPORT_SYMBOL_GPL(ipl_info); |
277 | 257 | ||
@@ -428,8 +408,74 @@ static struct attribute_group ipl_unknown_attr_group = { | |||
428 | 408 | ||
429 | static struct kset *ipl_kset; | 409 | static struct kset *ipl_kset; |
430 | 410 | ||
411 | static int __init ipl_register_fcp_files(void) | ||
412 | { | ||
413 | int rc; | ||
414 | |||
415 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); | ||
416 | if (rc) | ||
417 | goto out; | ||
418 | rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); | ||
419 | if (rc) | ||
420 | goto out_ipl_parm; | ||
421 | rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr); | ||
422 | if (!rc) | ||
423 | goto out; | ||
424 | |||
425 | sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); | ||
426 | |||
427 | out_ipl_parm: | ||
428 | sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group); | ||
429 | out: | ||
430 | return rc; | ||
431 | } | ||
432 | |||
433 | static void ipl_run(struct shutdown_trigger *trigger) | ||
434 | { | ||
435 | diag308(DIAG308_IPL, NULL); | ||
436 | if (MACHINE_IS_VM) | ||
437 | __cpcmd("IPL", NULL, 0, NULL); | ||
438 | else if (ipl_info.type == IPL_TYPE_CCW) | ||
439 | reipl_ccw_dev(&ipl_info.data.ccw.dev_id); | ||
440 | } | ||
441 | |||
442 | static int ipl_init(void) | ||
443 | { | ||
444 | int rc; | ||
445 | |||
446 | ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj); | ||
447 | if (!ipl_kset) { | ||
448 | rc = -ENOMEM; | ||
449 | goto out; | ||
450 | } | ||
451 | switch (ipl_info.type) { | ||
452 | case IPL_TYPE_CCW: | ||
453 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group); | ||
454 | break; | ||
455 | case IPL_TYPE_FCP: | ||
456 | case IPL_TYPE_FCP_DUMP: | ||
457 | rc = ipl_register_fcp_files(); | ||
458 | break; | ||
459 | case IPL_TYPE_NSS: | ||
460 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group); | ||
461 | break; | ||
462 | default: | ||
463 | rc = sysfs_create_group(&ipl_kset->kobj, | ||
464 | &ipl_unknown_attr_group); | ||
465 | break; | ||
466 | } | ||
467 | out: | ||
468 | if (rc) | ||
469 | panic("ipl_init failed: rc = %i\n", rc); | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run, | ||
475 | ipl_init}; | ||
476 | |||
431 | /* | 477 | /* |
432 | * reipl section | 478 | * reipl shutdown action: Reboot Linux on shutdown. |
433 | */ | 479 | */ |
434 | 480 | ||
435 | /* FCP reipl device attributes */ | 481 | /* FCP reipl device attributes */ |
@@ -549,7 +595,9 @@ static int reipl_set_type(enum ipl_type type) | |||
549 | 595 | ||
550 | switch(type) { | 596 | switch(type) { |
551 | case IPL_TYPE_CCW: | 597 | case IPL_TYPE_CCW: |
552 | if (MACHINE_IS_VM) | 598 | if (diag308_set_works) |
599 | reipl_method = REIPL_METHOD_CCW_DIAG; | ||
600 | else if (MACHINE_IS_VM) | ||
553 | reipl_method = REIPL_METHOD_CCW_VM; | 601 | reipl_method = REIPL_METHOD_CCW_VM; |
554 | else | 602 | else |
555 | reipl_method = REIPL_METHOD_CCW_CIO; | 603 | reipl_method = REIPL_METHOD_CCW_CIO; |
@@ -600,143 +648,11 @@ static ssize_t reipl_type_store(struct kobject *kobj, | |||
600 | } | 648 | } |
601 | 649 | ||
602 | static struct kobj_attribute reipl_type_attr = | 650 | static struct kobj_attribute reipl_type_attr = |
603 | __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); | 651 | __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); |
604 | 652 | ||
605 | static struct kset *reipl_kset; | 653 | static struct kset *reipl_kset; |
606 | 654 | ||
607 | /* | 655 | void reipl_run(struct shutdown_trigger *trigger) |
608 | * dump section | ||
609 | */ | ||
610 | |||
611 | /* FCP dump device attributes */ | ||
612 | |||
613 | DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", | ||
614 | dump_block_fcp->ipl_info.fcp.wwpn); | ||
615 | DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", | ||
616 | dump_block_fcp->ipl_info.fcp.lun); | ||
617 | DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", | ||
618 | dump_block_fcp->ipl_info.fcp.bootprog); | ||
619 | DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n", | ||
620 | dump_block_fcp->ipl_info.fcp.br_lba); | ||
621 | DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
622 | dump_block_fcp->ipl_info.fcp.devno); | ||
623 | |||
624 | static struct attribute *dump_fcp_attrs[] = { | ||
625 | &sys_dump_fcp_device_attr.attr, | ||
626 | &sys_dump_fcp_wwpn_attr.attr, | ||
627 | &sys_dump_fcp_lun_attr.attr, | ||
628 | &sys_dump_fcp_bootprog_attr.attr, | ||
629 | &sys_dump_fcp_br_lba_attr.attr, | ||
630 | NULL, | ||
631 | }; | ||
632 | |||
633 | static struct attribute_group dump_fcp_attr_group = { | ||
634 | .name = IPL_FCP_STR, | ||
635 | .attrs = dump_fcp_attrs, | ||
636 | }; | ||
637 | |||
638 | /* CCW dump device attributes */ | ||
639 | |||
640 | DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
641 | dump_block_ccw->ipl_info.ccw.devno); | ||
642 | |||
643 | static struct attribute *dump_ccw_attrs[] = { | ||
644 | &sys_dump_ccw_device_attr.attr, | ||
645 | NULL, | ||
646 | }; | ||
647 | |||
648 | static struct attribute_group dump_ccw_attr_group = { | ||
649 | .name = IPL_CCW_STR, | ||
650 | .attrs = dump_ccw_attrs, | ||
651 | }; | ||
652 | |||
653 | /* dump type */ | ||
654 | |||
655 | static int dump_set_type(enum dump_type type) | ||
656 | { | ||
657 | if (!(dump_capabilities & type)) | ||
658 | return -EINVAL; | ||
659 | switch(type) { | ||
660 | case DUMP_TYPE_CCW: | ||
661 | if (MACHINE_IS_VM) | ||
662 | dump_method = DUMP_METHOD_CCW_VM; | ||
663 | else if (diag308_set_works) | ||
664 | dump_method = DUMP_METHOD_CCW_DIAG; | ||
665 | else | ||
666 | dump_method = DUMP_METHOD_CCW_CIO; | ||
667 | break; | ||
668 | case DUMP_TYPE_FCP: | ||
669 | dump_method = DUMP_METHOD_FCP_DIAG; | ||
670 | break; | ||
671 | default: | ||
672 | dump_method = DUMP_METHOD_NONE; | ||
673 | } | ||
674 | dump_type = type; | ||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | static ssize_t dump_type_show(struct kobject *kobj, | ||
679 | struct kobj_attribute *attr, char *page) | ||
680 | { | ||
681 | return sprintf(page, "%s\n", dump_type_str(dump_type)); | ||
682 | } | ||
683 | |||
684 | static ssize_t dump_type_store(struct kobject *kobj, | ||
685 | struct kobj_attribute *attr, | ||
686 | const char *buf, size_t len) | ||
687 | { | ||
688 | int rc = -EINVAL; | ||
689 | |||
690 | if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0) | ||
691 | rc = dump_set_type(DUMP_TYPE_NONE); | ||
692 | else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0) | ||
693 | rc = dump_set_type(DUMP_TYPE_CCW); | ||
694 | else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) | ||
695 | rc = dump_set_type(DUMP_TYPE_FCP); | ||
696 | return (rc != 0) ? rc : len; | ||
697 | } | ||
698 | |||
699 | static struct kobj_attribute dump_type_attr = | ||
700 | __ATTR(dump_type, 0644, dump_type_show, dump_type_store); | ||
701 | |||
702 | static struct kset *dump_kset; | ||
703 | |||
704 | /* | ||
705 | * Shutdown actions section | ||
706 | */ | ||
707 | |||
708 | static struct kset *shutdown_actions_kset; | ||
709 | |||
710 | /* on panic */ | ||
711 | |||
712 | static ssize_t on_panic_show(struct kobject *kobj, | ||
713 | struct kobj_attribute *attr, char *page) | ||
714 | { | ||
715 | return sprintf(page, "%s\n", shutdown_action_str(on_panic_action)); | ||
716 | } | ||
717 | |||
718 | static ssize_t on_panic_store(struct kobject *kobj, | ||
719 | struct kobj_attribute *attr, | ||
720 | const char *buf, size_t len) | ||
721 | { | ||
722 | if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0) | ||
723 | on_panic_action = SHUTDOWN_REIPL; | ||
724 | else if (strncmp(buf, SHUTDOWN_DUMP_STR, | ||
725 | strlen(SHUTDOWN_DUMP_STR)) == 0) | ||
726 | on_panic_action = SHUTDOWN_DUMP; | ||
727 | else if (strncmp(buf, SHUTDOWN_STOP_STR, | ||
728 | strlen(SHUTDOWN_STOP_STR)) == 0) | ||
729 | on_panic_action = SHUTDOWN_STOP; | ||
730 | else | ||
731 | return -EINVAL; | ||
732 | |||
733 | return len; | ||
734 | } | ||
735 | |||
736 | static struct kobj_attribute on_panic_attr = | ||
737 | __ATTR(on_panic, 0644, on_panic_show, on_panic_store); | ||
738 | |||
739 | void do_reipl(void) | ||
740 | { | 656 | { |
741 | struct ccw_dev_id devid; | 657 | struct ccw_dev_id devid; |
742 | static char buf[100]; | 658 | static char buf[100]; |
@@ -745,8 +661,6 @@ void do_reipl(void) | |||
745 | switch (reipl_method) { | 661 | switch (reipl_method) { |
746 | case REIPL_METHOD_CCW_CIO: | 662 | case REIPL_METHOD_CCW_CIO: |
747 | devid.devno = reipl_block_ccw->ipl_info.ccw.devno; | 663 | devid.devno = reipl_block_ccw->ipl_info.ccw.devno; |
748 | if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno) | ||
749 | diag308(DIAG308_IPL, NULL); | ||
750 | devid.ssid = 0; | 664 | devid.ssid = 0; |
751 | reipl_ccw_dev(&devid); | 665 | reipl_ccw_dev(&devid); |
752 | break; | 666 | break; |
@@ -787,98 +701,6 @@ void do_reipl(void) | |||
787 | default: | 701 | default: |
788 | break; | 702 | break; |
789 | } | 703 | } |
790 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
791 | } | ||
792 | |||
793 | static void do_dump(void) | ||
794 | { | ||
795 | struct ccw_dev_id devid; | ||
796 | static char buf[100]; | ||
797 | |||
798 | switch (dump_method) { | ||
799 | case DUMP_METHOD_CCW_CIO: | ||
800 | smp_send_stop(); | ||
801 | devid.devno = dump_block_ccw->ipl_info.ccw.devno; | ||
802 | devid.ssid = 0; | ||
803 | reipl_ccw_dev(&devid); | ||
804 | break; | ||
805 | case DUMP_METHOD_CCW_VM: | ||
806 | smp_send_stop(); | ||
807 | sprintf(buf, "STORE STATUS"); | ||
808 | __cpcmd(buf, NULL, 0, NULL); | ||
809 | sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); | ||
810 | __cpcmd(buf, NULL, 0, NULL); | ||
811 | break; | ||
812 | case DUMP_METHOD_CCW_DIAG: | ||
813 | diag308(DIAG308_SET, dump_block_ccw); | ||
814 | diag308(DIAG308_DUMP, NULL); | ||
815 | break; | ||
816 | case DUMP_METHOD_FCP_DIAG: | ||
817 | diag308(DIAG308_SET, dump_block_fcp); | ||
818 | diag308(DIAG308_DUMP, NULL); | ||
819 | break; | ||
820 | case DUMP_METHOD_NONE: | ||
821 | default: | ||
822 | return; | ||
823 | } | ||
824 | printk(KERN_EMERG "Dump failed!\n"); | ||
825 | } | ||
826 | |||
827 | /* init functions */ | ||
828 | |||
829 | static int __init ipl_register_fcp_files(void) | ||
830 | { | ||
831 | int rc; | ||
832 | |||
833 | rc = sysfs_create_group(&ipl_kset->kobj, | ||
834 | &ipl_fcp_attr_group); | ||
835 | if (rc) | ||
836 | goto out; | ||
837 | rc = sysfs_create_bin_file(&ipl_kset->kobj, | ||
838 | &ipl_parameter_attr); | ||
839 | if (rc) | ||
840 | goto out_ipl_parm; | ||
841 | rc = sysfs_create_bin_file(&ipl_kset->kobj, | ||
842 | &ipl_scp_data_attr); | ||
843 | if (!rc) | ||
844 | goto out; | ||
845 | |||
846 | sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); | ||
847 | |||
848 | out_ipl_parm: | ||
849 | sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group); | ||
850 | out: | ||
851 | return rc; | ||
852 | } | ||
853 | |||
854 | static int __init ipl_init(void) | ||
855 | { | ||
856 | int rc; | ||
857 | |||
858 | ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj); | ||
859 | if (!ipl_kset) | ||
860 | return -ENOMEM; | ||
861 | switch (ipl_info.type) { | ||
862 | case IPL_TYPE_CCW: | ||
863 | rc = sysfs_create_group(&ipl_kset->kobj, | ||
864 | &ipl_ccw_attr_group); | ||
865 | break; | ||
866 | case IPL_TYPE_FCP: | ||
867 | case IPL_TYPE_FCP_DUMP: | ||
868 | rc = ipl_register_fcp_files(); | ||
869 | break; | ||
870 | case IPL_TYPE_NSS: | ||
871 | rc = sysfs_create_group(&ipl_kset->kobj, | ||
872 | &ipl_nss_attr_group); | ||
873 | break; | ||
874 | default: | ||
875 | rc = sysfs_create_group(&ipl_kset->kobj, | ||
876 | &ipl_unknown_attr_group); | ||
877 | break; | ||
878 | } | ||
879 | if (rc) | ||
880 | kset_unregister(ipl_kset); | ||
881 | return rc; | ||
882 | } | 704 | } |
883 | 705 | ||
884 | static void __init reipl_probe(void) | 706 | static void __init reipl_probe(void) |
@@ -923,6 +745,7 @@ static int __init reipl_ccw_init(void) | |||
923 | reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; | 745 | reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; |
924 | reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; | 746 | reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; |
925 | reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; | 747 | reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; |
748 | reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID; | ||
926 | /* check if read scp info worked and set loadparm */ | 749 | /* check if read scp info worked and set loadparm */ |
927 | if (sclp_ipl_info.is_valid) | 750 | if (sclp_ipl_info.is_valid) |
928 | memcpy(reipl_block_ccw->ipl_info.ccw.load_param, | 751 | memcpy(reipl_block_ccw->ipl_info.ccw.load_param, |
@@ -931,8 +754,7 @@ static int __init reipl_ccw_init(void) | |||
931 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ | 754 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ |
932 | memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40, | 755 | memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40, |
933 | LOADPARM_LEN); | 756 | LOADPARM_LEN); |
934 | /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ | 757 | if (!MACHINE_IS_VM && !diag308_set_works) |
935 | if (!MACHINE_IS_VM) | ||
936 | sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; | 758 | sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; |
937 | if (ipl_info.type == IPL_TYPE_CCW) | 759 | if (ipl_info.type == IPL_TYPE_CCW) |
938 | reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; | 760 | reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; |
@@ -970,7 +792,7 @@ static int __init reipl_fcp_init(void) | |||
970 | return 0; | 792 | return 0; |
971 | } | 793 | } |
972 | 794 | ||
973 | static int __init reipl_init(void) | 795 | static int reipl_init(void) |
974 | { | 796 | { |
975 | int rc; | 797 | int rc; |
976 | 798 | ||
@@ -997,6 +819,140 @@ static int __init reipl_init(void) | |||
997 | return 0; | 819 | return 0; |
998 | } | 820 | } |
999 | 821 | ||
822 | static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR, | ||
823 | reipl_run, reipl_init}; | ||
824 | |||
825 | /* | ||
826 | * dump shutdown action: Dump Linux on shutdown. | ||
827 | */ | ||
828 | |||
829 | /* FCP dump device attributes */ | ||
830 | |||
831 | DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", | ||
832 | dump_block_fcp->ipl_info.fcp.wwpn); | ||
833 | DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", | ||
834 | dump_block_fcp->ipl_info.fcp.lun); | ||
835 | DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", | ||
836 | dump_block_fcp->ipl_info.fcp.bootprog); | ||
837 | DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n", | ||
838 | dump_block_fcp->ipl_info.fcp.br_lba); | ||
839 | DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
840 | dump_block_fcp->ipl_info.fcp.devno); | ||
841 | |||
842 | static struct attribute *dump_fcp_attrs[] = { | ||
843 | &sys_dump_fcp_device_attr.attr, | ||
844 | &sys_dump_fcp_wwpn_attr.attr, | ||
845 | &sys_dump_fcp_lun_attr.attr, | ||
846 | &sys_dump_fcp_bootprog_attr.attr, | ||
847 | &sys_dump_fcp_br_lba_attr.attr, | ||
848 | NULL, | ||
849 | }; | ||
850 | |||
851 | static struct attribute_group dump_fcp_attr_group = { | ||
852 | .name = IPL_FCP_STR, | ||
853 | .attrs = dump_fcp_attrs, | ||
854 | }; | ||
855 | |||
856 | /* CCW dump device attributes */ | ||
857 | |||
858 | DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
859 | dump_block_ccw->ipl_info.ccw.devno); | ||
860 | |||
861 | static struct attribute *dump_ccw_attrs[] = { | ||
862 | &sys_dump_ccw_device_attr.attr, | ||
863 | NULL, | ||
864 | }; | ||
865 | |||
866 | static struct attribute_group dump_ccw_attr_group = { | ||
867 | .name = IPL_CCW_STR, | ||
868 | .attrs = dump_ccw_attrs, | ||
869 | }; | ||
870 | |||
871 | /* dump type */ | ||
872 | |||
873 | static int dump_set_type(enum dump_type type) | ||
874 | { | ||
875 | if (!(dump_capabilities & type)) | ||
876 | return -EINVAL; | ||
877 | switch (type) { | ||
878 | case DUMP_TYPE_CCW: | ||
879 | if (diag308_set_works) | ||
880 | dump_method = DUMP_METHOD_CCW_DIAG; | ||
881 | else if (MACHINE_IS_VM) | ||
882 | dump_method = DUMP_METHOD_CCW_VM; | ||
883 | else | ||
884 | dump_method = DUMP_METHOD_CCW_CIO; | ||
885 | break; | ||
886 | case DUMP_TYPE_FCP: | ||
887 | dump_method = DUMP_METHOD_FCP_DIAG; | ||
888 | break; | ||
889 | default: | ||
890 | dump_method = DUMP_METHOD_NONE; | ||
891 | } | ||
892 | dump_type = type; | ||
893 | return 0; | ||
894 | } | ||
895 | |||
896 | static ssize_t dump_type_show(struct kobject *kobj, | ||
897 | struct kobj_attribute *attr, char *page) | ||
898 | { | ||
899 | return sprintf(page, "%s\n", dump_type_str(dump_type)); | ||
900 | } | ||
901 | |||
902 | static ssize_t dump_type_store(struct kobject *kobj, | ||
903 | struct kobj_attribute *attr, | ||
904 | const char *buf, size_t len) | ||
905 | { | ||
906 | int rc = -EINVAL; | ||
907 | |||
908 | if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0) | ||
909 | rc = dump_set_type(DUMP_TYPE_NONE); | ||
910 | else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0) | ||
911 | rc = dump_set_type(DUMP_TYPE_CCW); | ||
912 | else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) | ||
913 | rc = dump_set_type(DUMP_TYPE_FCP); | ||
914 | return (rc != 0) ? rc : len; | ||
915 | } | ||
916 | |||
917 | static struct kobj_attribute dump_type_attr = | ||
918 | __ATTR(dump_type, 0644, dump_type_show, dump_type_store); | ||
919 | |||
920 | static struct kset *dump_kset; | ||
921 | |||
922 | static void dump_run(struct shutdown_trigger *trigger) | ||
923 | { | ||
924 | struct ccw_dev_id devid; | ||
925 | static char buf[100]; | ||
926 | |||
927 | switch (dump_method) { | ||
928 | case DUMP_METHOD_CCW_CIO: | ||
929 | smp_send_stop(); | ||
930 | devid.devno = dump_block_ccw->ipl_info.ccw.devno; | ||
931 | devid.ssid = 0; | ||
932 | reipl_ccw_dev(&devid); | ||
933 | break; | ||
934 | case DUMP_METHOD_CCW_VM: | ||
935 | smp_send_stop(); | ||
936 | sprintf(buf, "STORE STATUS"); | ||
937 | __cpcmd(buf, NULL, 0, NULL); | ||
938 | sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); | ||
939 | __cpcmd(buf, NULL, 0, NULL); | ||
940 | break; | ||
941 | case DUMP_METHOD_CCW_DIAG: | ||
942 | diag308(DIAG308_SET, dump_block_ccw); | ||
943 | diag308(DIAG308_DUMP, NULL); | ||
944 | break; | ||
945 | case DUMP_METHOD_FCP_DIAG: | ||
946 | diag308(DIAG308_SET, dump_block_fcp); | ||
947 | diag308(DIAG308_DUMP, NULL); | ||
948 | break; | ||
949 | case DUMP_METHOD_NONE: | ||
950 | default: | ||
951 | return; | ||
952 | } | ||
953 | printk(KERN_EMERG "Dump failed!\n"); | ||
954 | } | ||
955 | |||
1000 | static int __init dump_ccw_init(void) | 956 | static int __init dump_ccw_init(void) |
1001 | { | 957 | { |
1002 | int rc; | 958 | int rc; |
@@ -1042,31 +998,14 @@ static int __init dump_fcp_init(void) | |||
1042 | return 0; | 998 | return 0; |
1043 | } | 999 | } |
1044 | 1000 | ||
1045 | #define SHUTDOWN_ON_PANIC_PRIO 0 | 1001 | static int dump_init(void) |
1046 | |||
1047 | static int shutdown_on_panic_notify(struct notifier_block *self, | ||
1048 | unsigned long event, void *data) | ||
1049 | { | ||
1050 | if (on_panic_action == SHUTDOWN_DUMP) | ||
1051 | do_dump(); | ||
1052 | else if (on_panic_action == SHUTDOWN_REIPL) | ||
1053 | do_reipl(); | ||
1054 | return NOTIFY_OK; | ||
1055 | } | ||
1056 | |||
1057 | static struct notifier_block shutdown_on_panic_nb = { | ||
1058 | .notifier_call = shutdown_on_panic_notify, | ||
1059 | .priority = SHUTDOWN_ON_PANIC_PRIO | ||
1060 | }; | ||
1061 | |||
1062 | static int __init dump_init(void) | ||
1063 | { | 1002 | { |
1064 | int rc; | 1003 | int rc; |
1065 | 1004 | ||
1066 | dump_kset = kset_create_and_add("dump", NULL, firmware_kobj); | 1005 | dump_kset = kset_create_and_add("dump", NULL, firmware_kobj); |
1067 | if (!dump_kset) | 1006 | if (!dump_kset) |
1068 | return -ENOMEM; | 1007 | return -ENOMEM; |
1069 | rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr); | 1008 | rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr); |
1070 | if (rc) { | 1009 | if (rc) { |
1071 | kset_unregister(dump_kset); | 1010 | kset_unregister(dump_kset); |
1072 | return rc; | 1011 | return rc; |
@@ -1081,47 +1020,381 @@ static int __init dump_init(void) | |||
1081 | return 0; | 1020 | return 0; |
1082 | } | 1021 | } |
1083 | 1022 | ||
1084 | static int __init shutdown_actions_init(void) | 1023 | static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR, |
1024 | dump_run, dump_init}; | ||
1025 | |||
1026 | /* | ||
1027 | * vmcmd shutdown action: Trigger vm command on shutdown. | ||
1028 | */ | ||
1029 | |||
1030 | static char vmcmd_on_reboot[128]; | ||
1031 | static char vmcmd_on_panic[128]; | ||
1032 | static char vmcmd_on_halt[128]; | ||
1033 | static char vmcmd_on_poff[128]; | ||
1034 | |||
1035 | DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot); | ||
1036 | DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic); | ||
1037 | DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt); | ||
1038 | DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff); | ||
1039 | |||
1040 | static struct attribute *vmcmd_attrs[] = { | ||
1041 | &sys_vmcmd_on_reboot_attr.attr, | ||
1042 | &sys_vmcmd_on_panic_attr.attr, | ||
1043 | &sys_vmcmd_on_halt_attr.attr, | ||
1044 | &sys_vmcmd_on_poff_attr.attr, | ||
1045 | NULL, | ||
1046 | }; | ||
1047 | |||
1048 | static struct attribute_group vmcmd_attr_group = { | ||
1049 | .attrs = vmcmd_attrs, | ||
1050 | }; | ||
1051 | |||
1052 | static struct kset *vmcmd_kset; | ||
1053 | |||
1054 | static void vmcmd_run(struct shutdown_trigger *trigger) | ||
1055 | { | ||
1056 | char *cmd, *next_cmd; | ||
1057 | |||
1058 | if (strcmp(trigger->name, ON_REIPL_STR) == 0) | ||
1059 | cmd = vmcmd_on_reboot; | ||
1060 | else if (strcmp(trigger->name, ON_PANIC_STR) == 0) | ||
1061 | cmd = vmcmd_on_panic; | ||
1062 | else if (strcmp(trigger->name, ON_HALT_STR) == 0) | ||
1063 | cmd = vmcmd_on_halt; | ||
1064 | else if (strcmp(trigger->name, ON_POFF_STR) == 0) | ||
1065 | cmd = vmcmd_on_poff; | ||
1066 | else | ||
1067 | return; | ||
1068 | |||
1069 | if (strlen(cmd) == 0) | ||
1070 | return; | ||
1071 | do { | ||
1072 | next_cmd = strchr(cmd, '\n'); | ||
1073 | if (next_cmd) { | ||
1074 | next_cmd[0] = 0; | ||
1075 | next_cmd += 1; | ||
1076 | } | ||
1077 | __cpcmd(cmd, NULL, 0, NULL); | ||
1078 | cmd = next_cmd; | ||
1079 | } while (cmd != NULL); | ||
1080 | } | ||
1081 | |||
1082 | static int vmcmd_init(void) | ||
1085 | { | 1083 | { |
1086 | int rc; | 1084 | if (!MACHINE_IS_VM) |
1085 | return -ENOTSUPP; | ||
1086 | vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); | ||
1087 | if (!vmcmd_kset) | ||
1088 | return -ENOMEM; | ||
1089 | return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group); | ||
1090 | } | ||
1091 | |||
1092 | static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR, | ||
1093 | vmcmd_run, vmcmd_init}; | ||
1094 | |||
1095 | /* | ||
1096 | * stop shutdown action: Stop Linux on shutdown. | ||
1097 | */ | ||
1098 | |||
1099 | static void stop_run(struct shutdown_trigger *trigger) | ||
1100 | { | ||
1101 | if (strcmp(trigger->name, ON_PANIC_STR) == 0) | ||
1102 | disabled_wait((unsigned long) __builtin_return_address(0)); | ||
1103 | else { | ||
1104 | signal_processor(smp_processor_id(), sigp_stop); | ||
1105 | for (;;); | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, | ||
1110 | stop_run, NULL}; | ||
1111 | |||
1112 | /* action list */ | ||
1113 | |||
1114 | static struct shutdown_action *shutdown_actions_list[] = { | ||
1115 | &ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action}; | ||
1116 | #define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *)) | ||
1117 | |||
1118 | /* | ||
1119 | * Trigger section | ||
1120 | */ | ||
1121 | |||
1122 | static struct kset *shutdown_actions_kset; | ||
1123 | |||
1124 | static int set_trigger(const char *buf, struct shutdown_trigger *trigger, | ||
1125 | size_t len) | ||
1126 | { | ||
1127 | int i; | ||
1128 | for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) { | ||
1129 | if (!shutdown_actions_list[i]) | ||
1130 | continue; | ||
1131 | if (strncmp(buf, shutdown_actions_list[i]->name, | ||
1132 | strlen(shutdown_actions_list[i]->name)) == 0) { | ||
1133 | trigger->action = shutdown_actions_list[i]; | ||
1134 | return len; | ||
1135 | } | ||
1136 | } | ||
1137 | return -EINVAL; | ||
1138 | } | ||
1139 | |||
1140 | /* on reipl */ | ||
1141 | |||
1142 | static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR, | ||
1143 | &reipl_action}; | ||
1144 | |||
1145 | static ssize_t on_reboot_show(struct kobject *kobj, | ||
1146 | struct kobj_attribute *attr, char *page) | ||
1147 | { | ||
1148 | return sprintf(page, "%s\n", on_reboot_trigger.action->name); | ||
1149 | } | ||
1150 | |||
1151 | static ssize_t on_reboot_store(struct kobject *kobj, | ||
1152 | struct kobj_attribute *attr, | ||
1153 | const char *buf, size_t len) | ||
1154 | { | ||
1155 | return set_trigger(buf, &on_reboot_trigger, len); | ||
1156 | } | ||
1157 | |||
1158 | static struct kobj_attribute on_reboot_attr = | ||
1159 | __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store); | ||
1160 | |||
1161 | static void do_machine_restart(char *__unused) | ||
1162 | { | ||
1163 | smp_send_stop(); | ||
1164 | on_reboot_trigger.action->fn(&on_reboot_trigger); | ||
1165 | reipl_run(NULL); | ||
1166 | } | ||
1167 | void (*_machine_restart)(char *command) = do_machine_restart; | ||
1168 | |||
1169 | /* on panic */ | ||
1170 | |||
1171 | static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action}; | ||
1172 | |||
1173 | static ssize_t on_panic_show(struct kobject *kobj, | ||
1174 | struct kobj_attribute *attr, char *page) | ||
1175 | { | ||
1176 | return sprintf(page, "%s\n", on_panic_trigger.action->name); | ||
1177 | } | ||
1087 | 1178 | ||
1179 | static ssize_t on_panic_store(struct kobject *kobj, | ||
1180 | struct kobj_attribute *attr, | ||
1181 | const char *buf, size_t len) | ||
1182 | { | ||
1183 | return set_trigger(buf, &on_panic_trigger, len); | ||
1184 | } | ||
1185 | |||
1186 | static struct kobj_attribute on_panic_attr = | ||
1187 | __ATTR(on_panic, 0644, on_panic_show, on_panic_store); | ||
1188 | |||
1189 | static void do_panic(void) | ||
1190 | { | ||
1191 | on_panic_trigger.action->fn(&on_panic_trigger); | ||
1192 | stop_run(&on_panic_trigger); | ||
1193 | } | ||
1194 | |||
1195 | /* on halt */ | ||
1196 | |||
1197 | static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; | ||
1198 | |||
1199 | static ssize_t on_halt_show(struct kobject *kobj, | ||
1200 | struct kobj_attribute *attr, char *page) | ||
1201 | { | ||
1202 | return sprintf(page, "%s\n", on_halt_trigger.action->name); | ||
1203 | } | ||
1204 | |||
1205 | static ssize_t on_halt_store(struct kobject *kobj, | ||
1206 | struct kobj_attribute *attr, | ||
1207 | const char *buf, size_t len) | ||
1208 | { | ||
1209 | return set_trigger(buf, &on_halt_trigger, len); | ||
1210 | } | ||
1211 | |||
1212 | static struct kobj_attribute on_halt_attr = | ||
1213 | __ATTR(on_halt, 0644, on_halt_show, on_halt_store); | ||
1214 | |||
1215 | |||
1216 | static void do_machine_halt(void) | ||
1217 | { | ||
1218 | smp_send_stop(); | ||
1219 | on_halt_trigger.action->fn(&on_halt_trigger); | ||
1220 | stop_run(&on_halt_trigger); | ||
1221 | } | ||
1222 | void (*_machine_halt)(void) = do_machine_halt; | ||
1223 | |||
1224 | /* on power off */ | ||
1225 | |||
1226 | static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action}; | ||
1227 | |||
1228 | static ssize_t on_poff_show(struct kobject *kobj, | ||
1229 | struct kobj_attribute *attr, char *page) | ||
1230 | { | ||
1231 | return sprintf(page, "%s\n", on_poff_trigger.action->name); | ||
1232 | } | ||
1233 | |||
1234 | static ssize_t on_poff_store(struct kobject *kobj, | ||
1235 | struct kobj_attribute *attr, | ||
1236 | const char *buf, size_t len) | ||
1237 | { | ||
1238 | return set_trigger(buf, &on_poff_trigger, len); | ||
1239 | } | ||
1240 | |||
1241 | static struct kobj_attribute on_poff_attr = | ||
1242 | __ATTR(on_poff, 0644, on_poff_show, on_poff_store); | ||
1243 | |||
1244 | |||
1245 | static void do_machine_power_off(void) | ||
1246 | { | ||
1247 | smp_send_stop(); | ||
1248 | on_poff_trigger.action->fn(&on_poff_trigger); | ||
1249 | stop_run(&on_poff_trigger); | ||
1250 | } | ||
1251 | void (*_machine_power_off)(void) = do_machine_power_off; | ||
1252 | |||
1253 | static void __init shutdown_triggers_init(void) | ||
1254 | { | ||
1088 | shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL, | 1255 | shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL, |
1089 | firmware_kobj); | 1256 | firmware_kobj); |
1090 | if (!shutdown_actions_kset) | 1257 | if (!shutdown_actions_kset) |
1091 | return -ENOMEM; | 1258 | goto fail; |
1092 | rc = sysfs_create_file(&shutdown_actions_kset->kobj, &on_panic_attr); | 1259 | if (sysfs_create_file(&shutdown_actions_kset->kobj, |
1093 | if (rc) { | 1260 | &on_reboot_attr.attr)) |
1094 | kset_unregister(shutdown_actions_kset); | 1261 | goto fail; |
1095 | return rc; | 1262 | if (sysfs_create_file(&shutdown_actions_kset->kobj, |
1263 | &on_panic_attr.attr)) | ||
1264 | goto fail; | ||
1265 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1266 | &on_halt_attr.attr)) | ||
1267 | goto fail; | ||
1268 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1269 | &on_poff_attr.attr)) | ||
1270 | goto fail; | ||
1271 | |||
1272 | return; | ||
1273 | fail: | ||
1274 | panic("shutdown_triggers_init failed\n"); | ||
1275 | } | ||
1276 | |||
1277 | static void __init shutdown_actions_init(void) | ||
1278 | { | ||
1279 | int i; | ||
1280 | |||
1281 | for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) { | ||
1282 | if (!shutdown_actions_list[i]->init) | ||
1283 | continue; | ||
1284 | if (shutdown_actions_list[i]->init()) | ||
1285 | shutdown_actions_list[i] = NULL; | ||
1096 | } | 1286 | } |
1097 | atomic_notifier_chain_register(&panic_notifier_list, | ||
1098 | &shutdown_on_panic_nb); | ||
1099 | return 0; | ||
1100 | } | 1287 | } |
1101 | 1288 | ||
1102 | static int __init s390_ipl_init(void) | 1289 | static int __init s390_ipl_init(void) |
1103 | { | 1290 | { |
1104 | int rc; | ||
1105 | |||
1106 | sclp_get_ipl_info(&sclp_ipl_info); | ||
1107 | reipl_probe(); | 1291 | reipl_probe(); |
1108 | rc = ipl_init(); | 1292 | sclp_get_ipl_info(&sclp_ipl_info); |
1109 | if (rc) | 1293 | shutdown_actions_init(); |
1110 | return rc; | 1294 | shutdown_triggers_init(); |
1111 | rc = reipl_init(); | ||
1112 | if (rc) | ||
1113 | return rc; | ||
1114 | rc = dump_init(); | ||
1115 | if (rc) | ||
1116 | return rc; | ||
1117 | rc = shutdown_actions_init(); | ||
1118 | if (rc) | ||
1119 | return rc; | ||
1120 | return 0; | 1295 | return 0; |
1121 | } | 1296 | } |
1122 | 1297 | ||
1123 | __initcall(s390_ipl_init); | 1298 | __initcall(s390_ipl_init); |
1124 | 1299 | ||
1300 | static void __init strncpy_skip_quote(char *dst, char *src, int n) | ||
1301 | { | ||
1302 | int sx, dx; | ||
1303 | |||
1304 | dx = 0; | ||
1305 | for (sx = 0; src[sx] != 0; sx++) { | ||
1306 | if (src[sx] == '"') | ||
1307 | continue; | ||
1308 | dst[dx++] = src[sx]; | ||
1309 | if (dx >= n) | ||
1310 | break; | ||
1311 | } | ||
1312 | } | ||
1313 | |||
1314 | static int __init vmcmd_on_reboot_setup(char *str) | ||
1315 | { | ||
1316 | if (!MACHINE_IS_VM) | ||
1317 | return 1; | ||
1318 | strncpy_skip_quote(vmcmd_on_reboot, str, 127); | ||
1319 | vmcmd_on_reboot[127] = 0; | ||
1320 | on_reboot_trigger.action = &vmcmd_action; | ||
1321 | return 1; | ||
1322 | } | ||
1323 | __setup("vmreboot=", vmcmd_on_reboot_setup); | ||
1324 | |||
1325 | static int __init vmcmd_on_panic_setup(char *str) | ||
1326 | { | ||
1327 | if (!MACHINE_IS_VM) | ||
1328 | return 1; | ||
1329 | strncpy_skip_quote(vmcmd_on_panic, str, 127); | ||
1330 | vmcmd_on_panic[127] = 0; | ||
1331 | on_panic_trigger.action = &vmcmd_action; | ||
1332 | return 1; | ||
1333 | } | ||
1334 | __setup("vmpanic=", vmcmd_on_panic_setup); | ||
1335 | |||
1336 | static int __init vmcmd_on_halt_setup(char *str) | ||
1337 | { | ||
1338 | if (!MACHINE_IS_VM) | ||
1339 | return 1; | ||
1340 | strncpy_skip_quote(vmcmd_on_halt, str, 127); | ||
1341 | vmcmd_on_halt[127] = 0; | ||
1342 | on_halt_trigger.action = &vmcmd_action; | ||
1343 | return 1; | ||
1344 | } | ||
1345 | __setup("vmhalt=", vmcmd_on_halt_setup); | ||
1346 | |||
1347 | static int __init vmcmd_on_poff_setup(char *str) | ||
1348 | { | ||
1349 | if (!MACHINE_IS_VM) | ||
1350 | return 1; | ||
1351 | strncpy_skip_quote(vmcmd_on_poff, str, 127); | ||
1352 | vmcmd_on_poff[127] = 0; | ||
1353 | on_poff_trigger.action = &vmcmd_action; | ||
1354 | return 1; | ||
1355 | } | ||
1356 | __setup("vmpoff=", vmcmd_on_poff_setup); | ||
1357 | |||
1358 | static int on_panic_notify(struct notifier_block *self, | ||
1359 | unsigned long event, void *data) | ||
1360 | { | ||
1361 | do_panic(); | ||
1362 | return NOTIFY_OK; | ||
1363 | } | ||
1364 | |||
1365 | static struct notifier_block on_panic_nb = { | ||
1366 | .notifier_call = on_panic_notify, | ||
1367 | .priority = 0, | ||
1368 | }; | ||
1369 | |||
1370 | void __init setup_ipl(void) | ||
1371 | { | ||
1372 | ipl_info.type = get_ipl_type(); | ||
1373 | switch (ipl_info.type) { | ||
1374 | case IPL_TYPE_CCW: | ||
1375 | ipl_info.data.ccw.dev_id.devno = ipl_devno; | ||
1376 | ipl_info.data.ccw.dev_id.ssid = 0; | ||
1377 | break; | ||
1378 | case IPL_TYPE_FCP: | ||
1379 | case IPL_TYPE_FCP_DUMP: | ||
1380 | ipl_info.data.fcp.dev_id.devno = | ||
1381 | IPL_PARMBLOCK_START->ipl_info.fcp.devno; | ||
1382 | ipl_info.data.fcp.dev_id.ssid = 0; | ||
1383 | ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; | ||
1384 | ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; | ||
1385 | break; | ||
1386 | case IPL_TYPE_NSS: | ||
1387 | strncpy(ipl_info.data.nss.name, kernel_nss_name, | ||
1388 | sizeof(ipl_info.data.nss.name)); | ||
1389 | break; | ||
1390 | case IPL_TYPE_UNKNOWN: | ||
1391 | default: | ||
1392 | /* We have no info to copy */ | ||
1393 | break; | ||
1394 | } | ||
1395 | atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); | ||
1396 | } | ||
1397 | |||
1125 | void __init ipl_save_parameters(void) | 1398 | void __init ipl_save_parameters(void) |
1126 | { | 1399 | { |
1127 | struct cio_iplinfo iplinfo; | 1400 | struct cio_iplinfo iplinfo; |
@@ -1202,3 +1475,4 @@ void s390_reset_system(void) | |||
1202 | 1475 | ||
1203 | do_reset_calls(); | 1476 | do_reset_calls(); |
1204 | } | 1477 | } |
1478 | |||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 29f7884b4ffa..0e7aca039307 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
38 | #include <linux/notifier.h> | 38 | #include <linux/notifier.h> |
39 | 39 | #include <linux/utsname.h> | |
40 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
41 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
42 | #include <asm/system.h> | 42 | #include <asm/system.h> |
@@ -182,13 +182,15 @@ void cpu_idle(void) | |||
182 | 182 | ||
183 | void show_regs(struct pt_regs *regs) | 183 | void show_regs(struct pt_regs *regs) |
184 | { | 184 | { |
185 | struct task_struct *tsk = current; | 185 | print_modules(); |
186 | 186 | printk("CPU: %d %s %s %.*s\n", | |
187 | printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted()); | 187 | task_thread_info(current)->cpu, print_tainted(), |
188 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | 188 | init_utsname()->release, |
189 | current->comm, task_pid_nr(current), (void *) tsk, | 189 | (int)strcspn(init_utsname()->version, " "), |
190 | (void *) tsk->thread.ksp); | 190 | init_utsname()->version); |
191 | 191 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | |
192 | current->comm, current->pid, current, | ||
193 | (void *) current->thread.ksp); | ||
192 | show_registers(regs); | 194 | show_registers(regs); |
193 | /* Show stack backtrace if pt_regs is from kernel mode */ | 195 | /* Show stack backtrace if pt_regs is from kernel mode */ |
194 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 196 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 1d81bf9488ae..6e036bae9875 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -86,13 +86,13 @@ FixPerRegisters(struct task_struct *task) | |||
86 | per_info->control_regs.bits.storage_alt_space_ctl = 0; | 86 | per_info->control_regs.bits.storage_alt_space_ctl = 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void set_single_step(struct task_struct *task) | 89 | void user_enable_single_step(struct task_struct *task) |
90 | { | 90 | { |
91 | task->thread.per_info.single_step = 1; | 91 | task->thread.per_info.single_step = 1; |
92 | FixPerRegisters(task); | 92 | FixPerRegisters(task); |
93 | } | 93 | } |
94 | 94 | ||
95 | static void clear_single_step(struct task_struct *task) | 95 | void user_disable_single_step(struct task_struct *task) |
96 | { | 96 | { |
97 | task->thread.per_info.single_step = 0; | 97 | task->thread.per_info.single_step = 0; |
98 | FixPerRegisters(task); | 98 | FixPerRegisters(task); |
@@ -107,7 +107,7 @@ void | |||
107 | ptrace_disable(struct task_struct *child) | 107 | ptrace_disable(struct task_struct *child) |
108 | { | 108 | { |
109 | /* make sure the single step bit is not set. */ | 109 | /* make sure the single step bit is not set. */ |
110 | clear_single_step(child); | 110 | user_disable_single_step(child); |
111 | } | 111 | } |
112 | 112 | ||
113 | #ifndef CONFIG_64BIT | 113 | #ifndef CONFIG_64BIT |
@@ -651,7 +651,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data) | |||
651 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 651 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
652 | child->exit_code = data; | 652 | child->exit_code = data; |
653 | /* make sure the single step bit is not set. */ | 653 | /* make sure the single step bit is not set. */ |
654 | clear_single_step(child); | 654 | user_disable_single_step(child); |
655 | wake_up_process(child); | 655 | wake_up_process(child); |
656 | return 0; | 656 | return 0; |
657 | 657 | ||
@@ -665,7 +665,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data) | |||
665 | return 0; | 665 | return 0; |
666 | child->exit_code = SIGKILL; | 666 | child->exit_code = SIGKILL; |
667 | /* make sure the single step bit is not set. */ | 667 | /* make sure the single step bit is not set. */ |
668 | clear_single_step(child); | 668 | user_disable_single_step(child); |
669 | wake_up_process(child); | 669 | wake_up_process(child); |
670 | return 0; | 670 | return 0; |
671 | 671 | ||
@@ -675,10 +675,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data) | |||
675 | return -EIO; | 675 | return -EIO; |
676 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 676 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
677 | child->exit_code = data; | 677 | child->exit_code = data; |
678 | if (data) | 678 | user_enable_single_step(child); |
679 | set_tsk_thread_flag(child, TIF_SINGLE_STEP); | ||
680 | else | ||
681 | set_single_step(child); | ||
682 | /* give it a chance to run. */ | 679 | /* give it a chance to run. */ |
683 | wake_up_process(child); | 680 | wake_up_process(child); |
684 | return 0; | 681 | return 0; |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 577aa7dd660e..766c783bd7a7 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -126,75 +126,6 @@ void __cpuinit cpu_init(void) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * VM halt and poweroff setup routines | ||
130 | */ | ||
131 | char vmhalt_cmd[128] = ""; | ||
132 | char vmpoff_cmd[128] = ""; | ||
133 | static char vmpanic_cmd[128] = ""; | ||
134 | |||
135 | static void strncpy_skip_quote(char *dst, char *src, int n) | ||
136 | { | ||
137 | int sx, dx; | ||
138 | |||
139 | dx = 0; | ||
140 | for (sx = 0; src[sx] != 0; sx++) { | ||
141 | if (src[sx] == '"') continue; | ||
142 | dst[dx++] = src[sx]; | ||
143 | if (dx >= n) break; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static int __init vmhalt_setup(char *str) | ||
148 | { | ||
149 | strncpy_skip_quote(vmhalt_cmd, str, 127); | ||
150 | vmhalt_cmd[127] = 0; | ||
151 | return 1; | ||
152 | } | ||
153 | |||
154 | __setup("vmhalt=", vmhalt_setup); | ||
155 | |||
156 | static int __init vmpoff_setup(char *str) | ||
157 | { | ||
158 | strncpy_skip_quote(vmpoff_cmd, str, 127); | ||
159 | vmpoff_cmd[127] = 0; | ||
160 | return 1; | ||
161 | } | ||
162 | |||
163 | __setup("vmpoff=", vmpoff_setup); | ||
164 | |||
165 | static int vmpanic_notify(struct notifier_block *self, unsigned long event, | ||
166 | void *data) | ||
167 | { | ||
168 | if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0) | ||
169 | cpcmd(vmpanic_cmd, NULL, 0, NULL); | ||
170 | |||
171 | return NOTIFY_OK; | ||
172 | } | ||
173 | |||
174 | #define PANIC_PRI_VMPANIC 0 | ||
175 | |||
176 | static struct notifier_block vmpanic_nb = { | ||
177 | .notifier_call = vmpanic_notify, | ||
178 | .priority = PANIC_PRI_VMPANIC | ||
179 | }; | ||
180 | |||
181 | static int __init vmpanic_setup(char *str) | ||
182 | { | ||
183 | static int register_done __initdata = 0; | ||
184 | |||
185 | strncpy_skip_quote(vmpanic_cmd, str, 127); | ||
186 | vmpanic_cmd[127] = 0; | ||
187 | if (!register_done) { | ||
188 | register_done = 1; | ||
189 | atomic_notifier_chain_register(&panic_notifier_list, | ||
190 | &vmpanic_nb); | ||
191 | } | ||
192 | return 1; | ||
193 | } | ||
194 | |||
195 | __setup("vmpanic=", vmpanic_setup); | ||
196 | |||
197 | /* | ||
198 | * condev= and conmode= setup parameter. | 129 | * condev= and conmode= setup parameter. |
199 | */ | 130 | */ |
200 | 131 | ||
@@ -308,38 +239,6 @@ static void __init setup_zfcpdump(unsigned int console_devno) | |||
308 | static inline void setup_zfcpdump(unsigned int console_devno) {} | 239 | static inline void setup_zfcpdump(unsigned int console_devno) {} |
309 | #endif /* CONFIG_ZFCPDUMP */ | 240 | #endif /* CONFIG_ZFCPDUMP */ |
310 | 241 | ||
311 | #ifdef CONFIG_SMP | ||
312 | void (*_machine_restart)(char *command) = machine_restart_smp; | ||
313 | void (*_machine_halt)(void) = machine_halt_smp; | ||
314 | void (*_machine_power_off)(void) = machine_power_off_smp; | ||
315 | #else | ||
316 | /* | ||
317 | * Reboot, halt and power_off routines for non SMP. | ||
318 | */ | ||
319 | static void do_machine_restart_nonsmp(char * __unused) | ||
320 | { | ||
321 | do_reipl(); | ||
322 | } | ||
323 | |||
324 | static void do_machine_halt_nonsmp(void) | ||
325 | { | ||
326 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | ||
327 | __cpcmd(vmhalt_cmd, NULL, 0, NULL); | ||
328 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
329 | } | ||
330 | |||
331 | static void do_machine_power_off_nonsmp(void) | ||
332 | { | ||
333 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | ||
334 | __cpcmd(vmpoff_cmd, NULL, 0, NULL); | ||
335 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
336 | } | ||
337 | |||
338 | void (*_machine_restart)(char *command) = do_machine_restart_nonsmp; | ||
339 | void (*_machine_halt)(void) = do_machine_halt_nonsmp; | ||
340 | void (*_machine_power_off)(void) = do_machine_power_off_nonsmp; | ||
341 | #endif | ||
342 | |||
343 | /* | 242 | /* |
344 | * Reboot, halt and power_off stubs. They just call _machine_restart, | 243 | * Reboot, halt and power_off stubs. They just call _machine_restart, |
345 | * _machine_halt or _machine_power_off. | 244 | * _machine_halt or _machine_power_off. |
@@ -559,7 +458,9 @@ setup_resources(void) | |||
559 | data_resource.start = (unsigned long) &_etext; | 458 | data_resource.start = (unsigned long) &_etext; |
560 | data_resource.end = (unsigned long) &_edata - 1; | 459 | data_resource.end = (unsigned long) &_edata - 1; |
561 | 460 | ||
562 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 461 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
462 | if (!memory_chunk[i].size) | ||
463 | continue; | ||
563 | res = alloc_bootmem_low(sizeof(struct resource)); | 464 | res = alloc_bootmem_low(sizeof(struct resource)); |
564 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 465 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
565 | switch (memory_chunk[i].type) { | 466 | switch (memory_chunk[i].type) { |
@@ -617,7 +518,7 @@ EXPORT_SYMBOL_GPL(real_memory_size); | |||
617 | static void __init setup_memory_end(void) | 518 | static void __init setup_memory_end(void) |
618 | { | 519 | { |
619 | unsigned long memory_size; | 520 | unsigned long memory_size; |
620 | unsigned long max_mem, max_phys; | 521 | unsigned long max_mem; |
621 | int i; | 522 | int i; |
622 | 523 | ||
623 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) | 524 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) |
@@ -625,10 +526,31 @@ static void __init setup_memory_end(void) | |||
625 | memory_end = ZFCPDUMP_HSA_SIZE; | 526 | memory_end = ZFCPDUMP_HSA_SIZE; |
626 | #endif | 527 | #endif |
627 | memory_size = 0; | 528 | memory_size = 0; |
628 | max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; | ||
629 | memory_end &= PAGE_MASK; | 529 | memory_end &= PAGE_MASK; |
630 | 530 | ||
631 | max_mem = memory_end ? min(max_phys, memory_end) : max_phys; | 531 | max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START; |
532 | memory_end = min(max_mem, memory_end); | ||
533 | |||
534 | /* | ||
535 | * Make sure all chunks are MAX_ORDER aligned so we don't need the | ||
536 | * extra checks that HOLES_IN_ZONE would require. | ||
537 | */ | ||
538 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
539 | unsigned long start, end; | ||
540 | struct mem_chunk *chunk; | ||
541 | unsigned long align; | ||
542 | |||
543 | chunk = &memory_chunk[i]; | ||
544 | align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); | ||
545 | start = (chunk->addr + align - 1) & ~(align - 1); | ||
546 | end = (chunk->addr + chunk->size) & ~(align - 1); | ||
547 | if (start >= end) | ||
548 | memset(chunk, 0, sizeof(*chunk)); | ||
549 | else { | ||
550 | chunk->addr = start; | ||
551 | chunk->size = end - start; | ||
552 | } | ||
553 | } | ||
632 | 554 | ||
633 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 555 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
634 | struct mem_chunk *chunk = &memory_chunk[i]; | 556 | struct mem_chunk *chunk = &memory_chunk[i]; |
@@ -890,7 +812,7 @@ setup_arch(char **cmdline_p) | |||
890 | 812 | ||
891 | parse_early_param(); | 813 | parse_early_param(); |
892 | 814 | ||
893 | setup_ipl_info(); | 815 | setup_ipl(); |
894 | setup_memory_end(); | 816 | setup_memory_end(); |
895 | setup_addressing_mode(); | 817 | setup_addressing_mode(); |
896 | setup_memory(); | 818 | setup_memory(); |
@@ -899,7 +821,6 @@ setup_arch(char **cmdline_p) | |||
899 | 821 | ||
900 | cpu_init(); | 822 | cpu_init(); |
901 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; | 823 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; |
902 | smp_setup_cpu_possible_map(); | ||
903 | 824 | ||
904 | /* | 825 | /* |
905 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). | 826 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). |
@@ -920,7 +841,7 @@ setup_arch(char **cmdline_p) | |||
920 | 841 | ||
921 | void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) | 842 | void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) |
922 | { | 843 | { |
923 | printk("cpu %d " | 844 | printk(KERN_INFO "cpu %d " |
924 | #ifdef CONFIG_SMP | 845 | #ifdef CONFIG_SMP |
925 | "phys_idx=%d " | 846 | "phys_idx=%d " |
926 | #endif | 847 | #endif |
@@ -996,7 +917,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos) | |||
996 | static void c_stop(struct seq_file *m, void *v) | 917 | static void c_stop(struct seq_file *m, void *v) |
997 | { | 918 | { |
998 | } | 919 | } |
999 | struct seq_operations cpuinfo_op = { | 920 | const struct seq_operations cpuinfo_op = { |
1000 | .start = c_start, | 921 | .start = c_start, |
1001 | .next = c_next, | 922 | .next = c_next, |
1002 | .stop = c_stop, | 923 | .stop = c_stop, |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index d264671c1b71..4449bf32cbf1 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -471,6 +471,7 @@ void do_signal(struct pt_regs *regs) | |||
471 | 471 | ||
472 | if (signr > 0) { | 472 | if (signr > 0) { |
473 | /* Whee! Actually deliver the signal. */ | 473 | /* Whee! Actually deliver the signal. */ |
474 | int ret; | ||
474 | #ifdef CONFIG_COMPAT | 475 | #ifdef CONFIG_COMPAT |
475 | if (test_thread_flag(TIF_31BIT)) { | 476 | if (test_thread_flag(TIF_31BIT)) { |
476 | extern int handle_signal32(unsigned long sig, | 477 | extern int handle_signal32(unsigned long sig, |
@@ -478,15 +479,12 @@ void do_signal(struct pt_regs *regs) | |||
478 | siginfo_t *info, | 479 | siginfo_t *info, |
479 | sigset_t *oldset, | 480 | sigset_t *oldset, |
480 | struct pt_regs *regs); | 481 | struct pt_regs *regs); |
481 | if (handle_signal32( | 482 | ret = handle_signal32(signr, &ka, &info, oldset, regs); |
482 | signr, &ka, &info, oldset, regs) == 0) { | ||
483 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | ||
484 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
485 | } | ||
486 | return; | ||
487 | } | 483 | } |
484 | else | ||
488 | #endif | 485 | #endif |
489 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { | 486 | ret = handle_signal(signr, &ka, &info, oldset, regs); |
487 | if (!ret) { | ||
490 | /* | 488 | /* |
491 | * A signal was successfully delivered; the saved | 489 | * A signal was successfully delivered; the saved |
492 | * sigmask will have been stored in the signal frame, | 490 | * sigmask will have been stored in the signal frame, |
@@ -495,6 +493,14 @@ void do_signal(struct pt_regs *regs) | |||
495 | */ | 493 | */ |
496 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 494 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
497 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 495 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
496 | |||
497 | /* | ||
498 | * If we would have taken a single-step trap | ||
499 | * for a normal instruction, act like we took | ||
500 | * one for the handler setup. | ||
501 | */ | ||
502 | if (current->thread.per_info.single_step) | ||
503 | set_thread_flag(TIF_SINGLE_STEP); | ||
498 | } | 504 | } |
499 | return; | 505 | return; |
500 | } | 506 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 264ea906db4c..aa37fa154512 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/tlbflush.h> | 42 | #include <asm/tlbflush.h> |
43 | #include <asm/timer.h> | 43 | #include <asm/timer.h> |
44 | #include <asm/lowcore.h> | 44 | #include <asm/lowcore.h> |
45 | #include <asm/sclp.h> | ||
45 | #include <asm/cpu.h> | 46 | #include <asm/cpu.h> |
46 | 47 | ||
47 | /* | 48 | /* |
@@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr); | |||
53 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 54 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
54 | EXPORT_SYMBOL(cpu_online_map); | 55 | EXPORT_SYMBOL(cpu_online_map); |
55 | 56 | ||
56 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | 57 | cpumask_t cpu_possible_map = CPU_MASK_ALL; |
57 | EXPORT_SYMBOL(cpu_possible_map); | 58 | EXPORT_SYMBOL(cpu_possible_map); |
58 | 59 | ||
59 | static struct task_struct *current_set[NR_CPUS]; | 60 | static struct task_struct *current_set[NR_CPUS]; |
60 | 61 | ||
62 | static u8 smp_cpu_type; | ||
63 | static int smp_use_sigp_detection; | ||
64 | |||
65 | enum s390_cpu_state { | ||
66 | CPU_STATE_STANDBY, | ||
67 | CPU_STATE_CONFIGURED, | ||
68 | }; | ||
69 | |||
70 | #ifdef CONFIG_HOTPLUG_CPU | ||
71 | static DEFINE_MUTEX(smp_cpu_state_mutex); | ||
72 | #endif | ||
73 | static int smp_cpu_state[NR_CPUS]; | ||
74 | |||
75 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | ||
76 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | ||
77 | |||
61 | static void smp_ext_bitcall(int, ec_bit_sig); | 78 | static void smp_ext_bitcall(int, ec_bit_sig); |
62 | 79 | ||
63 | /* | 80 | /* |
@@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
193 | } | 210 | } |
194 | EXPORT_SYMBOL(smp_call_function_single); | 211 | EXPORT_SYMBOL(smp_call_function_single); |
195 | 212 | ||
213 | /** | ||
214 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
215 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
216 | * @func: The function to run. This must be fast and non-blocking. | ||
217 | * @info: An arbitrary pointer to pass to the function. | ||
218 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
219 | * | ||
220 | * Returns 0 on success, else a negative status code. | ||
221 | * | ||
222 | * If @wait is true, then returns once @func has returned; otherwise | ||
223 | * it returns just before the target cpu calls @func. | ||
224 | * | ||
225 | * You must not call this function with disabled interrupts or from a | ||
226 | * hardware interrupt handler or from a bottom half handler. | ||
227 | */ | ||
228 | int | ||
229 | smp_call_function_mask(cpumask_t mask, | ||
230 | void (*func)(void *), void *info, | ||
231 | int wait) | ||
232 | { | ||
233 | preempt_disable(); | ||
234 | __smp_call_function_map(func, info, 0, wait, mask); | ||
235 | preempt_enable(); | ||
236 | return 0; | ||
237 | } | ||
238 | EXPORT_SYMBOL(smp_call_function_mask); | ||
239 | |||
196 | void smp_send_stop(void) | 240 | void smp_send_stop(void) |
197 | { | 241 | { |
198 | int cpu, rc; | 242 | int cpu, rc; |
@@ -217,33 +261,6 @@ void smp_send_stop(void) | |||
217 | } | 261 | } |
218 | 262 | ||
219 | /* | 263 | /* |
220 | * Reboot, halt and power_off routines for SMP. | ||
221 | */ | ||
222 | void machine_restart_smp(char *__unused) | ||
223 | { | ||
224 | smp_send_stop(); | ||
225 | do_reipl(); | ||
226 | } | ||
227 | |||
228 | void machine_halt_smp(void) | ||
229 | { | ||
230 | smp_send_stop(); | ||
231 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | ||
232 | __cpcmd(vmhalt_cmd, NULL, 0, NULL); | ||
233 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
234 | for (;;); | ||
235 | } | ||
236 | |||
237 | void machine_power_off_smp(void) | ||
238 | { | ||
239 | smp_send_stop(); | ||
240 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | ||
241 | __cpcmd(vmpoff_cmd, NULL, 0, NULL); | ||
242 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
243 | for (;;); | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * This is the main routine where commands issued by other | 264 | * This is the main routine where commands issued by other |
248 | * cpus are handled. | 265 | * cpus are handled. |
249 | */ | 266 | */ |
@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
355 | } | 372 | } |
356 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 373 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
357 | 374 | ||
375 | /* | ||
376 | * In early ipl state a temp. logically cpu number is needed, so the sigp | ||
377 | * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on | ||
378 | * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1. | ||
379 | */ | ||
380 | #define CPU_INIT_NO 1 | ||
381 | |||
358 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) | 382 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) |
359 | 383 | ||
360 | /* | 384 | /* |
@@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | |||
375 | "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); | 399 | "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); |
376 | return; | 400 | return; |
377 | } | 401 | } |
378 | zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); | 402 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); |
379 | __cpu_logical_map[1] = (__u16) phy_cpu; | 403 | __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; |
380 | while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) | 404 | while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == |
405 | sigp_busy) | ||
381 | cpu_relax(); | 406 | cpu_relax(); |
382 | memcpy(zfcpdump_save_areas[cpu], | 407 | memcpy(zfcpdump_save_areas[cpu], |
383 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, | 408 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, |
@@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | |||
397 | 422 | ||
398 | #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ | 423 | #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ |
399 | 424 | ||
400 | /* | 425 | static int cpu_stopped(int cpu) |
401 | * Lets check how many CPUs we have. | ||
402 | */ | ||
403 | static unsigned int __init smp_count_cpus(void) | ||
404 | { | 426 | { |
405 | unsigned int cpu, num_cpus; | 427 | __u32 status; |
406 | __u16 boot_cpu_addr; | ||
407 | 428 | ||
408 | /* | 429 | /* Check for stopped state */ |
409 | * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. | 430 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == |
410 | */ | 431 | sigp_status_stored) { |
432 | if (status & 0x40) | ||
433 | return 1; | ||
434 | } | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int cpu_known(int cpu_id) | ||
439 | { | ||
440 | int cpu; | ||
441 | |||
442 | for_each_present_cpu(cpu) { | ||
443 | if (__cpu_logical_map[cpu] == cpu_id) | ||
444 | return 1; | ||
445 | } | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | static int smp_rescan_cpus_sigp(cpumask_t avail) | ||
450 | { | ||
451 | int cpu_id, logical_cpu; | ||
452 | |||
453 | logical_cpu = first_cpu(avail); | ||
454 | if (logical_cpu == NR_CPUS) | ||
455 | return 0; | ||
456 | for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { | ||
457 | if (cpu_known(cpu_id)) | ||
458 | continue; | ||
459 | __cpu_logical_map[logical_cpu] = cpu_id; | ||
460 | if (!cpu_stopped(logical_cpu)) | ||
461 | continue; | ||
462 | cpu_set(logical_cpu, cpu_present_map); | ||
463 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | ||
464 | logical_cpu = next_cpu(logical_cpu, avail); | ||
465 | if (logical_cpu == NR_CPUS) | ||
466 | break; | ||
467 | } | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static int smp_rescan_cpus_sclp(cpumask_t avail) | ||
472 | { | ||
473 | struct sclp_cpu_info *info; | ||
474 | int cpu_id, logical_cpu, cpu; | ||
475 | int rc; | ||
476 | |||
477 | logical_cpu = first_cpu(avail); | ||
478 | if (logical_cpu == NR_CPUS) | ||
479 | return 0; | ||
480 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
481 | if (!info) | ||
482 | return -ENOMEM; | ||
483 | rc = sclp_get_cpu_info(info); | ||
484 | if (rc) | ||
485 | goto out; | ||
486 | for (cpu = 0; cpu < info->combined; cpu++) { | ||
487 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | ||
488 | continue; | ||
489 | cpu_id = info->cpu[cpu].address; | ||
490 | if (cpu_known(cpu_id)) | ||
491 | continue; | ||
492 | __cpu_logical_map[logical_cpu] = cpu_id; | ||
493 | cpu_set(logical_cpu, cpu_present_map); | ||
494 | if (cpu >= info->configured) | ||
495 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | ||
496 | else | ||
497 | smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; | ||
498 | logical_cpu = next_cpu(logical_cpu, avail); | ||
499 | if (logical_cpu == NR_CPUS) | ||
500 | break; | ||
501 | } | ||
502 | out: | ||
503 | kfree(info); | ||
504 | return rc; | ||
505 | } | ||
506 | |||
507 | static int smp_rescan_cpus(void) | ||
508 | { | ||
509 | cpumask_t avail; | ||
510 | |||
511 | cpus_xor(avail, cpu_possible_map, cpu_present_map); | ||
512 | if (smp_use_sigp_detection) | ||
513 | return smp_rescan_cpus_sigp(avail); | ||
514 | else | ||
515 | return smp_rescan_cpus_sclp(avail); | ||
516 | } | ||
517 | |||
518 | static void __init smp_detect_cpus(void) | ||
519 | { | ||
520 | unsigned int cpu, c_cpus, s_cpus; | ||
521 | struct sclp_cpu_info *info; | ||
522 | u16 boot_cpu_addr, cpu_addr; | ||
523 | |||
524 | c_cpus = 1; | ||
525 | s_cpus = 0; | ||
411 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | 526 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; |
412 | current_thread_info()->cpu = 0; | 527 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
413 | num_cpus = 1; | 528 | if (!info) |
414 | for (cpu = 0; cpu <= 65535; cpu++) { | 529 | panic("smp_detect_cpus failed to allocate memory\n"); |
415 | if ((__u16) cpu == boot_cpu_addr) | 530 | /* Use sigp detection algorithm if sclp doesn't work. */ |
531 | if (sclp_get_cpu_info(info)) { | ||
532 | smp_use_sigp_detection = 1; | ||
533 | for (cpu = 0; cpu <= 65535; cpu++) { | ||
534 | if (cpu == boot_cpu_addr) | ||
535 | continue; | ||
536 | __cpu_logical_map[CPU_INIT_NO] = cpu; | ||
537 | if (!cpu_stopped(CPU_INIT_NO)) | ||
538 | continue; | ||
539 | smp_get_save_area(c_cpus, cpu); | ||
540 | c_cpus++; | ||
541 | } | ||
542 | goto out; | ||
543 | } | ||
544 | |||
545 | if (info->has_cpu_type) { | ||
546 | for (cpu = 0; cpu < info->combined; cpu++) { | ||
547 | if (info->cpu[cpu].address == boot_cpu_addr) { | ||
548 | smp_cpu_type = info->cpu[cpu].type; | ||
549 | break; | ||
550 | } | ||
551 | } | ||
552 | } | ||
553 | |||
554 | for (cpu = 0; cpu < info->combined; cpu++) { | ||
555 | if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) | ||
556 | continue; | ||
557 | cpu_addr = info->cpu[cpu].address; | ||
558 | if (cpu_addr == boot_cpu_addr) | ||
416 | continue; | 559 | continue; |
417 | __cpu_logical_map[1] = (__u16) cpu; | 560 | __cpu_logical_map[CPU_INIT_NO] = cpu_addr; |
418 | if (signal_processor(1, sigp_sense) == sigp_not_operational) | 561 | if (!cpu_stopped(CPU_INIT_NO)) { |
562 | s_cpus++; | ||
419 | continue; | 563 | continue; |
420 | smp_get_save_area(num_cpus, cpu); | 564 | } |
421 | num_cpus++; | 565 | smp_get_save_area(c_cpus, cpu_addr); |
566 | c_cpus++; | ||
422 | } | 567 | } |
423 | printk("Detected %d CPU's\n", (int) num_cpus); | 568 | out: |
424 | printk("Boot cpu address %2X\n", boot_cpu_addr); | 569 | kfree(info); |
425 | return num_cpus; | 570 | printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); |
571 | get_online_cpus(); | ||
572 | smp_rescan_cpus(); | ||
573 | put_online_cpus(); | ||
426 | } | 574 | } |
427 | 575 | ||
428 | /* | 576 | /* |
@@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
453 | return 0; | 601 | return 0; |
454 | } | 602 | } |
455 | 603 | ||
456 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | ||
457 | |||
458 | static void __init smp_create_idle(unsigned int cpu) | 604 | static void __init smp_create_idle(unsigned int cpu) |
459 | { | 605 | { |
460 | struct task_struct *p; | 606 | struct task_struct *p; |
@@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu) | |||
470 | spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); | 616 | spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); |
471 | } | 617 | } |
472 | 618 | ||
473 | static int cpu_stopped(int cpu) | 619 | static int __cpuinit smp_alloc_lowcore(int cpu) |
474 | { | 620 | { |
475 | __u32 status; | 621 | unsigned long async_stack, panic_stack; |
622 | struct _lowcore *lowcore; | ||
623 | int lc_order; | ||
624 | |||
625 | lc_order = sizeof(long) == 8 ? 1 : 0; | ||
626 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order); | ||
627 | if (!lowcore) | ||
628 | return -ENOMEM; | ||
629 | async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | ||
630 | if (!async_stack) | ||
631 | goto out_async_stack; | ||
632 | panic_stack = __get_free_page(GFP_KERNEL); | ||
633 | if (!panic_stack) | ||
634 | goto out_panic_stack; | ||
635 | |||
636 | *lowcore = S390_lowcore; | ||
637 | lowcore->async_stack = async_stack + ASYNC_SIZE; | ||
638 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | ||
476 | 639 | ||
477 | /* Check for stopped state */ | 640 | #ifndef CONFIG_64BIT |
478 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == | 641 | if (MACHINE_HAS_IEEE) { |
479 | sigp_status_stored) { | 642 | unsigned long save_area; |
480 | if (status & 0x40) | 643 | |
481 | return 1; | 644 | save_area = get_zeroed_page(GFP_KERNEL); |
645 | if (!save_area) | ||
646 | goto out_save_area; | ||
647 | lowcore->extended_save_area_addr = (u32) save_area; | ||
482 | } | 648 | } |
649 | #endif | ||
650 | lowcore_ptr[cpu] = lowcore; | ||
483 | return 0; | 651 | return 0; |
652 | |||
653 | #ifndef CONFIG_64BIT | ||
654 | out_save_area: | ||
655 | free_page(panic_stack); | ||
656 | #endif | ||
657 | out_panic_stack: | ||
658 | free_pages(async_stack, ASYNC_ORDER); | ||
659 | out_async_stack: | ||
660 | free_pages((unsigned long) lowcore, lc_order); | ||
661 | return -ENOMEM; | ||
484 | } | 662 | } |
485 | 663 | ||
486 | /* Upping and downing of CPUs */ | 664 | #ifdef CONFIG_HOTPLUG_CPU |
665 | static void smp_free_lowcore(int cpu) | ||
666 | { | ||
667 | struct _lowcore *lowcore; | ||
668 | int lc_order; | ||
669 | |||
670 | lc_order = sizeof(long) == 8 ? 1 : 0; | ||
671 | lowcore = lowcore_ptr[cpu]; | ||
672 | #ifndef CONFIG_64BIT | ||
673 | if (MACHINE_HAS_IEEE) | ||
674 | free_page((unsigned long) lowcore->extended_save_area_addr); | ||
675 | #endif | ||
676 | free_page(lowcore->panic_stack - PAGE_SIZE); | ||
677 | free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); | ||
678 | free_pages((unsigned long) lowcore, lc_order); | ||
679 | lowcore_ptr[cpu] = NULL; | ||
680 | } | ||
681 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
487 | 682 | ||
488 | int __cpu_up(unsigned int cpu) | 683 | /* Upping and downing of CPUs */ |
684 | int __cpuinit __cpu_up(unsigned int cpu) | ||
489 | { | 685 | { |
490 | struct task_struct *idle; | 686 | struct task_struct *idle; |
491 | struct _lowcore *cpu_lowcore; | 687 | struct _lowcore *cpu_lowcore; |
492 | struct stack_frame *sf; | 688 | struct stack_frame *sf; |
493 | sigp_ccode ccode; | 689 | sigp_ccode ccode; |
494 | int curr_cpu; | ||
495 | 690 | ||
496 | for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { | 691 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
497 | __cpu_logical_map[cpu] = (__u16) curr_cpu; | 692 | return -EIO; |
498 | if (cpu_stopped(cpu)) | 693 | if (smp_alloc_lowcore(cpu)) |
499 | break; | 694 | return -ENOMEM; |
500 | } | ||
501 | |||
502 | if (!cpu_stopped(cpu)) | ||
503 | return -ENODEV; | ||
504 | 695 | ||
505 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | 696 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), |
506 | cpu, sigp_set_prefix); | 697 | cpu, sigp_set_prefix); |
@@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu) | |||
515 | cpu_lowcore = lowcore_ptr[cpu]; | 706 | cpu_lowcore = lowcore_ptr[cpu]; |
516 | cpu_lowcore->kernel_stack = (unsigned long) | 707 | cpu_lowcore->kernel_stack = (unsigned long) |
517 | task_stack_page(idle) + THREAD_SIZE; | 708 | task_stack_page(idle) + THREAD_SIZE; |
709 | cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); | ||
518 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | 710 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack |
519 | - sizeof(struct pt_regs) | 711 | - sizeof(struct pt_regs) |
520 | - sizeof(struct stack_frame)); | 712 | - sizeof(struct stack_frame)); |
@@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu) | |||
528 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | 720 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; |
529 | cpu_lowcore->current_task = (unsigned long) idle; | 721 | cpu_lowcore->current_task = (unsigned long) idle; |
530 | cpu_lowcore->cpu_data.cpu_nr = cpu; | 722 | cpu_lowcore->cpu_data.cpu_nr = cpu; |
723 | cpu_lowcore->softirq_pending = 0; | ||
724 | cpu_lowcore->ext_call_fast = 0; | ||
531 | eieio(); | 725 | eieio(); |
532 | 726 | ||
533 | while (signal_processor(cpu, sigp_restart) == sigp_busy) | 727 | while (signal_processor(cpu, sigp_restart) == sigp_busy) |
@@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu) | |||
538 | return 0; | 732 | return 0; |
539 | } | 733 | } |
540 | 734 | ||
541 | static unsigned int __initdata additional_cpus; | 735 | static int __init setup_possible_cpus(char *s) |
542 | static unsigned int __initdata possible_cpus; | ||
543 | |||
544 | void __init smp_setup_cpu_possible_map(void) | ||
545 | { | 736 | { |
546 | unsigned int phy_cpus, pos_cpus, cpu; | 737 | int pcpus, cpu; |
547 | |||
548 | phy_cpus = smp_count_cpus(); | ||
549 | pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); | ||
550 | |||
551 | if (possible_cpus) | ||
552 | pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); | ||
553 | 738 | ||
554 | for (cpu = 0; cpu < pos_cpus; cpu++) | 739 | pcpus = simple_strtoul(s, NULL, 0); |
740 | cpu_possible_map = cpumask_of_cpu(0); | ||
741 | for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++) | ||
555 | cpu_set(cpu, cpu_possible_map); | 742 | cpu_set(cpu, cpu_possible_map); |
556 | |||
557 | phy_cpus = min(phy_cpus, pos_cpus); | ||
558 | |||
559 | for (cpu = 0; cpu < phy_cpus; cpu++) | ||
560 | cpu_set(cpu, cpu_present_map); | ||
561 | } | ||
562 | |||
563 | #ifdef CONFIG_HOTPLUG_CPU | ||
564 | |||
565 | static int __init setup_additional_cpus(char *s) | ||
566 | { | ||
567 | additional_cpus = simple_strtoul(s, NULL, 0); | ||
568 | return 0; | ||
569 | } | ||
570 | early_param("additional_cpus", setup_additional_cpus); | ||
571 | |||
572 | static int __init setup_possible_cpus(char *s) | ||
573 | { | ||
574 | possible_cpus = simple_strtoul(s, NULL, 0); | ||
575 | return 0; | 743 | return 0; |
576 | } | 744 | } |
577 | early_param("possible_cpus", setup_possible_cpus); | 745 | early_param("possible_cpus", setup_possible_cpus); |
578 | 746 | ||
747 | #ifdef CONFIG_HOTPLUG_CPU | ||
748 | |||
579 | int __cpu_disable(void) | 749 | int __cpu_disable(void) |
580 | { | 750 | { |
581 | struct ec_creg_mask_parms cr_parms; | 751 | struct ec_creg_mask_parms cr_parms; |
@@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu) | |||
612 | /* Wait until target cpu is down */ | 782 | /* Wait until target cpu is down */ |
613 | while (!smp_cpu_not_running(cpu)) | 783 | while (!smp_cpu_not_running(cpu)) |
614 | cpu_relax(); | 784 | cpu_relax(); |
615 | printk("Processor %d spun down\n", cpu); | 785 | smp_free_lowcore(cpu); |
786 | printk(KERN_INFO "Processor %d spun down\n", cpu); | ||
616 | } | 787 | } |
617 | 788 | ||
618 | void cpu_die(void) | 789 | void cpu_die(void) |
@@ -625,49 +796,19 @@ void cpu_die(void) | |||
625 | 796 | ||
626 | #endif /* CONFIG_HOTPLUG_CPU */ | 797 | #endif /* CONFIG_HOTPLUG_CPU */ |
627 | 798 | ||
628 | /* | ||
629 | * Cycle through the processors and setup structures. | ||
630 | */ | ||
631 | |||
632 | void __init smp_prepare_cpus(unsigned int max_cpus) | 799 | void __init smp_prepare_cpus(unsigned int max_cpus) |
633 | { | 800 | { |
634 | unsigned long stack; | ||
635 | unsigned int cpu; | 801 | unsigned int cpu; |
636 | int i; | 802 | |
803 | smp_detect_cpus(); | ||
637 | 804 | ||
638 | /* request the 0x1201 emergency signal external interrupt */ | 805 | /* request the 0x1201 emergency signal external interrupt */ |
639 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 806 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
640 | panic("Couldn't request external interrupt 0x1201"); | 807 | panic("Couldn't request external interrupt 0x1201"); |
641 | memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); | 808 | memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); |
642 | /* | ||
643 | * Initialize prefix pages and stacks for all possible cpus | ||
644 | */ | ||
645 | print_cpu_info(&S390_lowcore.cpu_data); | 809 | print_cpu_info(&S390_lowcore.cpu_data); |
810 | smp_alloc_lowcore(smp_processor_id()); | ||
646 | 811 | ||
647 | for_each_possible_cpu(i) { | ||
648 | lowcore_ptr[i] = (struct _lowcore *) | ||
649 | __get_free_pages(GFP_KERNEL | GFP_DMA, | ||
650 | sizeof(void*) == 8 ? 1 : 0); | ||
651 | stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | ||
652 | if (!lowcore_ptr[i] || !stack) | ||
653 | panic("smp_boot_cpus failed to allocate memory\n"); | ||
654 | |||
655 | *(lowcore_ptr[i]) = S390_lowcore; | ||
656 | lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; | ||
657 | stack = __get_free_pages(GFP_KERNEL, 0); | ||
658 | if (!stack) | ||
659 | panic("smp_boot_cpus failed to allocate memory\n"); | ||
660 | lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; | ||
661 | #ifndef CONFIG_64BIT | ||
662 | if (MACHINE_HAS_IEEE) { | ||
663 | lowcore_ptr[i]->extended_save_area_addr = | ||
664 | (__u32) __get_free_pages(GFP_KERNEL, 0); | ||
665 | if (!lowcore_ptr[i]->extended_save_area_addr) | ||
666 | panic("smp_boot_cpus failed to " | ||
667 | "allocate memory\n"); | ||
668 | } | ||
669 | #endif | ||
670 | } | ||
671 | #ifndef CONFIG_64BIT | 812 | #ifndef CONFIG_64BIT |
672 | if (MACHINE_HAS_IEEE) | 813 | if (MACHINE_HAS_IEEE) |
673 | ctl_set_bit(14, 29); /* enable extended save area */ | 814 | ctl_set_bit(14, 29); /* enable extended save area */ |
@@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void) | |||
683 | { | 824 | { |
684 | BUG_ON(smp_processor_id() != 0); | 825 | BUG_ON(smp_processor_id() != 0); |
685 | 826 | ||
827 | current_thread_info()->cpu = 0; | ||
828 | cpu_set(0, cpu_present_map); | ||
686 | cpu_set(0, cpu_online_map); | 829 | cpu_set(0, cpu_online_map); |
687 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 830 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
688 | current_set[0] = current; | 831 | current_set[0] = current; |
832 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | ||
689 | spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); | 833 | spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); |
690 | } | 834 | } |
691 | 835 | ||
692 | void __init smp_cpus_done(unsigned int max_cpus) | 836 | void __init smp_cpus_done(unsigned int max_cpus) |
693 | { | 837 | { |
694 | cpu_present_map = cpu_possible_map; | ||
695 | } | 838 | } |
696 | 839 | ||
697 | /* | 840 | /* |
@@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier) | |||
705 | return 0; | 848 | return 0; |
706 | } | 849 | } |
707 | 850 | ||
708 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 851 | #ifdef CONFIG_HOTPLUG_CPU |
852 | static ssize_t cpu_configure_show(struct sys_device *dev, char *buf) | ||
853 | { | ||
854 | ssize_t count; | ||
855 | |||
856 | mutex_lock(&smp_cpu_state_mutex); | ||
857 | count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); | ||
858 | mutex_unlock(&smp_cpu_state_mutex); | ||
859 | return count; | ||
860 | } | ||
861 | |||
862 | static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf, | ||
863 | size_t count) | ||
864 | { | ||
865 | int cpu = dev->id; | ||
866 | int val, rc; | ||
867 | char delim; | ||
868 | |||
869 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
870 | return -EINVAL; | ||
871 | if (val != 0 && val != 1) | ||
872 | return -EINVAL; | ||
873 | |||
874 | mutex_lock(&smp_cpu_state_mutex); | ||
875 | get_online_cpus(); | ||
876 | rc = -EBUSY; | ||
877 | if (cpu_online(cpu)) | ||
878 | goto out; | ||
879 | rc = 0; | ||
880 | switch (val) { | ||
881 | case 0: | ||
882 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { | ||
883 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); | ||
884 | if (!rc) | ||
885 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | ||
886 | } | ||
887 | break; | ||
888 | case 1: | ||
889 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { | ||
890 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); | ||
891 | if (!rc) | ||
892 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | ||
893 | } | ||
894 | break; | ||
895 | default: | ||
896 | break; | ||
897 | } | ||
898 | out: | ||
899 | put_online_cpus(); | ||
900 | mutex_unlock(&smp_cpu_state_mutex); | ||
901 | return rc ? rc : count; | ||
902 | } | ||
903 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); | ||
904 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
905 | |||
906 | static ssize_t show_cpu_address(struct sys_device *dev, char *buf) | ||
907 | { | ||
908 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); | ||
909 | } | ||
910 | static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); | ||
911 | |||
912 | |||
913 | static struct attribute *cpu_common_attrs[] = { | ||
914 | #ifdef CONFIG_HOTPLUG_CPU | ||
915 | &attr_configure.attr, | ||
916 | #endif | ||
917 | &attr_address.attr, | ||
918 | NULL, | ||
919 | }; | ||
920 | |||
921 | static struct attribute_group cpu_common_attr_group = { | ||
922 | .attrs = cpu_common_attrs, | ||
923 | }; | ||
709 | 924 | ||
710 | static ssize_t show_capability(struct sys_device *dev, char *buf) | 925 | static ssize_t show_capability(struct sys_device *dev, char *buf) |
711 | { | 926 | { |
@@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf) | |||
750 | } | 965 | } |
751 | static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); | 966 | static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); |
752 | 967 | ||
753 | static struct attribute *cpu_attrs[] = { | 968 | static struct attribute *cpu_online_attrs[] = { |
754 | &attr_capability.attr, | 969 | &attr_capability.attr, |
755 | &attr_idle_count.attr, | 970 | &attr_idle_count.attr, |
756 | &attr_idle_time_us.attr, | 971 | &attr_idle_time_us.attr, |
757 | NULL, | 972 | NULL, |
758 | }; | 973 | }; |
759 | 974 | ||
760 | static struct attribute_group cpu_attr_group = { | 975 | static struct attribute_group cpu_online_attr_group = { |
761 | .attrs = cpu_attrs, | 976 | .attrs = cpu_online_attrs, |
762 | }; | 977 | }; |
763 | 978 | ||
764 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, | 979 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, |
@@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, | |||
778 | idle->idle_time = 0; | 993 | idle->idle_time = 0; |
779 | idle->idle_count = 0; | 994 | idle->idle_count = 0; |
780 | spin_unlock_irq(&idle->lock); | 995 | spin_unlock_irq(&idle->lock); |
781 | if (sysfs_create_group(&s->kobj, &cpu_attr_group)) | 996 | if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) |
782 | return NOTIFY_BAD; | 997 | return NOTIFY_BAD; |
783 | break; | 998 | break; |
784 | case CPU_DEAD: | 999 | case CPU_DEAD: |
785 | case CPU_DEAD_FROZEN: | 1000 | case CPU_DEAD_FROZEN: |
786 | sysfs_remove_group(&s->kobj, &cpu_attr_group); | 1001 | sysfs_remove_group(&s->kobj, &cpu_online_attr_group); |
787 | break; | 1002 | break; |
788 | } | 1003 | } |
789 | return NOTIFY_OK; | 1004 | return NOTIFY_OK; |
@@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = { | |||
793 | .notifier_call = smp_cpu_notify, | 1008 | .notifier_call = smp_cpu_notify, |
794 | }; | 1009 | }; |
795 | 1010 | ||
1011 | static int smp_add_present_cpu(int cpu) | ||
1012 | { | ||
1013 | struct cpu *c = &per_cpu(cpu_devices, cpu); | ||
1014 | struct sys_device *s = &c->sysdev; | ||
1015 | int rc; | ||
1016 | |||
1017 | c->hotpluggable = 1; | ||
1018 | rc = register_cpu(c, cpu); | ||
1019 | if (rc) | ||
1020 | goto out; | ||
1021 | rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); | ||
1022 | if (rc) | ||
1023 | goto out_cpu; | ||
1024 | if (!cpu_online(cpu)) | ||
1025 | goto out; | ||
1026 | rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); | ||
1027 | if (!rc) | ||
1028 | return 0; | ||
1029 | sysfs_remove_group(&s->kobj, &cpu_common_attr_group); | ||
1030 | out_cpu: | ||
1031 | #ifdef CONFIG_HOTPLUG_CPU | ||
1032 | unregister_cpu(c); | ||
1033 | #endif | ||
1034 | out: | ||
1035 | return rc; | ||
1036 | } | ||
1037 | |||
1038 | #ifdef CONFIG_HOTPLUG_CPU | ||
1039 | static ssize_t rescan_store(struct sys_device *dev, const char *buf, | ||
1040 | size_t count) | ||
1041 | { | ||
1042 | cpumask_t newcpus; | ||
1043 | int cpu; | ||
1044 | int rc; | ||
1045 | |||
1046 | mutex_lock(&smp_cpu_state_mutex); | ||
1047 | get_online_cpus(); | ||
1048 | newcpus = cpu_present_map; | ||
1049 | rc = smp_rescan_cpus(); | ||
1050 | if (rc) | ||
1051 | goto out; | ||
1052 | cpus_andnot(newcpus, cpu_present_map, newcpus); | ||
1053 | for_each_cpu_mask(cpu, newcpus) { | ||
1054 | rc = smp_add_present_cpu(cpu); | ||
1055 | if (rc) | ||
1056 | cpu_clear(cpu, cpu_present_map); | ||
1057 | } | ||
1058 | rc = 0; | ||
1059 | out: | ||
1060 | put_online_cpus(); | ||
1061 | mutex_unlock(&smp_cpu_state_mutex); | ||
1062 | return rc ? rc : count; | ||
1063 | } | ||
1064 | static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); | ||
1065 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1066 | |||
796 | static int __init topology_init(void) | 1067 | static int __init topology_init(void) |
797 | { | 1068 | { |
798 | int cpu; | 1069 | int cpu; |
@@ -800,16 +1071,14 @@ static int __init topology_init(void) | |||
800 | 1071 | ||
801 | register_cpu_notifier(&smp_cpu_nb); | 1072 | register_cpu_notifier(&smp_cpu_nb); |
802 | 1073 | ||
803 | for_each_possible_cpu(cpu) { | 1074 | #ifdef CONFIG_HOTPLUG_CPU |
804 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 1075 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, |
805 | struct sys_device *s = &c->sysdev; | 1076 | &attr_rescan.attr); |
806 | 1077 | if (rc) | |
807 | c->hotpluggable = 1; | 1078 | return rc; |
808 | register_cpu(c, cpu); | 1079 | #endif |
809 | if (!cpu_online(cpu)) | 1080 | for_each_present_cpu(cpu) { |
810 | continue; | 1081 | rc = smp_add_present_cpu(cpu); |
811 | s = &c->sysdev; | ||
812 | rc = sysfs_create_group(&s->kobj, &cpu_attr_group); | ||
813 | if (rc) | 1082 | if (rc) |
814 | return rc; | 1083 | return rc; |
815 | } | 1084 | } |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 8ed16a83fba7..52b8342c6bf2 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/reboot.h> | 31 | #include <linux/reboot.h> |
32 | #include <linux/kprobes.h> | 32 | #include <linux/kprobes.h> |
33 | #include <linux/bug.h> | 33 | #include <linux/bug.h> |
34 | #include <linux/utsname.h> | ||
34 | #include <asm/system.h> | 35 | #include <asm/system.h> |
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | #include <asm/io.h> | 37 | #include <asm/io.h> |
@@ -168,9 +169,16 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
168 | */ | 169 | */ |
169 | void dump_stack(void) | 170 | void dump_stack(void) |
170 | { | 171 | { |
172 | printk("CPU: %d %s %s %.*s\n", | ||
173 | task_thread_info(current)->cpu, print_tainted(), | ||
174 | init_utsname()->release, | ||
175 | (int)strcspn(init_utsname()->version, " "), | ||
176 | init_utsname()->version); | ||
177 | printk("Process %s (pid: %d, task: %p, ksp: %p)\n", | ||
178 | current->comm, current->pid, current, | ||
179 | (void *) current->thread.ksp); | ||
171 | show_stack(NULL, NULL); | 180 | show_stack(NULL, NULL); |
172 | } | 181 | } |
173 | |||
174 | EXPORT_SYMBOL(dump_stack); | 182 | EXPORT_SYMBOL(dump_stack); |
175 | 183 | ||
176 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) | 184 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) |
@@ -258,8 +266,14 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
258 | console_verbose(); | 266 | console_verbose(); |
259 | spin_lock_irq(&die_lock); | 267 | spin_lock_irq(&die_lock); |
260 | bust_spinlocks(1); | 268 | bust_spinlocks(1); |
261 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); | 269 | printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); |
262 | print_modules(); | 270 | #ifdef CONFIG_PREEMPT |
271 | printk("PREEMPT "); | ||
272 | #endif | ||
273 | #ifdef CONFIG_SMP | ||
274 | printk("SMP"); | ||
275 | #endif | ||
276 | printk("\n"); | ||
263 | notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); | 277 | notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); |
264 | show_regs(regs); | 278 | show_regs(regs); |
265 | bust_spinlocks(0); | 279 | bust_spinlocks(0); |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 849120e3e28a..936159199346 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -17,6 +17,12 @@ ENTRY(_start) | |||
17 | jiffies = jiffies_64; | 17 | jiffies = jiffies_64; |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | PHDRS { | ||
21 | text PT_LOAD FLAGS(5); /* R_E */ | ||
22 | data PT_LOAD FLAGS(7); /* RWE */ | ||
23 | note PT_NOTE FLAGS(0); /* ___ */ | ||
24 | } | ||
25 | |||
20 | SECTIONS | 26 | SECTIONS |
21 | { | 27 | { |
22 | . = 0x00000000; | 28 | . = 0x00000000; |
@@ -33,6 +39,9 @@ SECTIONS | |||
33 | 39 | ||
34 | _etext = .; /* End of text section */ | 40 | _etext = .; /* End of text section */ |
35 | 41 | ||
42 | NOTES :text :note | ||
43 | BUG_TABLE :text | ||
44 | |||
36 | RODATA | 45 | RODATA |
37 | 46 | ||
38 | #ifdef CONFIG_SHARED_KERNEL | 47 | #ifdef CONFIG_SHARED_KERNEL |
@@ -49,9 +58,6 @@ SECTIONS | |||
49 | __stop___ex_table = .; | 58 | __stop___ex_table = .; |
50 | } | 59 | } |
51 | 60 | ||
52 | NOTES | ||
53 | BUG_TABLE | ||
54 | |||
55 | .data : { /* Data */ | 61 | .data : { /* Data */ |
56 | DATA_DATA | 62 | DATA_DATA |
57 | CONSTRUCTORS | 63 | CONSTRUCTORS |