diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-11-13 14:55:35 -0500 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-11-13 14:55:53 -0500 |
commit | 2290c0d06d82faee87b1ab2d9d4f7bf81ef64379 (patch) | |
tree | e075e4d5534193f28e6059904f61e5ca03958d3c /arch/s390 | |
parent | 4da669a2e3e5bc70b30a0465f3641528681b5f77 (diff) | |
parent | 52e4c2a05256cb83cda12f3c2137ab1533344edb (diff) |
Merge branch 'master' into for-next
Sync with Linus tree to have 157550ff ("mtd: add GPMI-NAND driver
in the config and Makefile") as I have patch depending on that one.
Diffstat (limited to 'arch/s390')
78 files changed, 2144 insertions, 461 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index ce8eaaa65665..7014cfc66d2b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -90,6 +90,7 @@ config S390 | |||
90 | select HAVE_ARCH_MUTEX_CPU_RELAX | 90 | select HAVE_ARCH_MUTEX_CPU_RELAX |
91 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 | 91 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 |
92 | select HAVE_RCU_TABLE_FREE if SMP | 92 | select HAVE_RCU_TABLE_FREE if SMP |
93 | select ARCH_SAVE_PAGE_KEYS if HIBERNATION | ||
93 | select ARCH_INLINE_SPIN_TRYLOCK | 94 | select ARCH_INLINE_SPIN_TRYLOCK |
94 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 95 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
95 | select ARCH_INLINE_SPIN_LOCK | 96 | select ARCH_INLINE_SPIN_LOCK |
@@ -567,6 +568,16 @@ config KEXEC | |||
567 | current kernel, and to start another kernel. It is like a reboot | 568 | current kernel, and to start another kernel. It is like a reboot |
568 | but is independent of hardware/microcode support. | 569 | but is independent of hardware/microcode support. |
569 | 570 | ||
571 | config CRASH_DUMP | ||
572 | bool "kernel crash dumps" | ||
573 | depends on 64BIT | ||
574 | help | ||
575 | Generate crash dump after being started by kexec. | ||
576 | Crash dump kernels are loaded in the main kernel with kexec-tools | ||
577 | into a specially reserved region and then later executed after | ||
578 | a crash by kdump/kexec. | ||
579 | For more details see Documentation/kdump/kdump.txt | ||
580 | |||
570 | config ZFCPDUMP | 581 | config ZFCPDUMP |
571 | def_bool n | 582 | def_bool n |
572 | prompt "zfcpdump support" | 583 | prompt "zfcpdump support" |
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 028f23ea81d1..465eca756feb 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -61,7 +61,7 @@ static unsigned long free_mem_end_ptr; | |||
61 | 61 | ||
62 | extern _sclp_print_early(const char *); | 62 | extern _sclp_print_early(const char *); |
63 | 63 | ||
64 | int puts(const char *s) | 64 | static int puts(const char *s) |
65 | { | 65 | { |
66 | _sclp_print_early(s); | 66 | _sclp_print_early(s); |
67 | return 0; | 67 | return 0; |
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index 48884f89ab92..bd37d09b9d3c 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <crypto/internal/hash.h> | 16 | #include <crypto/internal/hash.h> |
17 | #include <linux/module.h> | ||
17 | #include "sha.h" | 18 | #include "sha.h" |
18 | #include "crypt_s390.h" | 19 | #include "crypt_s390.h" |
19 | 20 | ||
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 29c82c640a88..6cf8e26b3137 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -68,7 +68,7 @@ CONFIG_NET_CLS_RSVP6=m | |||
68 | CONFIG_NET_CLS_ACT=y | 68 | CONFIG_NET_CLS_ACT=y |
69 | CONFIG_NET_ACT_POLICE=y | 69 | CONFIG_NET_ACT_POLICE=y |
70 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 70 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
71 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 71 | CONFIG_DEVTMPFS=y |
72 | CONFIG_BLK_DEV_LOOP=m | 72 | CONFIG_BLK_DEV_LOOP=m |
73 | CONFIG_BLK_DEV_NBD=m | 73 | CONFIG_BLK_DEV_NBD=m |
74 | CONFIG_BLK_DEV_RAM=y | 74 | CONFIG_BLK_DEV_RAM=y |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 6fe874fc5f8e..481f4f76f664 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -108,9 +108,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, int mode) | |||
108 | ret->i_gid = hypfs_info->gid; | 108 | ret->i_gid = hypfs_info->gid; |
109 | ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; | 109 | ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; |
110 | if (mode & S_IFDIR) | 110 | if (mode & S_IFDIR) |
111 | ret->i_nlink = 2; | 111 | set_nlink(ret, 2); |
112 | else | ||
113 | ret->i_nlink = 1; | ||
114 | } | 112 | } |
115 | return ret; | 113 | return ret; |
116 | } | 114 | } |
@@ -361,7 +359,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb, | |||
361 | } else if (mode & S_IFDIR) { | 359 | } else if (mode & S_IFDIR) { |
362 | inode->i_op = &simple_dir_inode_operations; | 360 | inode->i_op = &simple_dir_inode_operations; |
363 | inode->i_fop = &simple_dir_operations; | 361 | inode->i_fop = &simple_dir_operations; |
364 | parent->d_inode->i_nlink++; | 362 | inc_nlink(parent->d_inode); |
365 | } else | 363 | } else |
366 | BUG(); | 364 | BUG(); |
367 | inode->i_private = data; | 365 | inode->i_private = data; |
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 623f2fb71774..9381c92cc779 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
12 | #include <linux/mod_devicetable.h> | 12 | #include <linux/mod_devicetable.h> |
13 | #include <asm/fcx.h> | 13 | #include <asm/fcx.h> |
14 | #include <asm/irq.h> | ||
14 | 15 | ||
15 | /* structs from asm/cio.h */ | 16 | /* structs from asm/cio.h */ |
16 | struct irb; | 17 | struct irb; |
@@ -127,6 +128,7 @@ enum uc_todo { | |||
127 | * @restore: callback for restoring after hibernation | 128 | * @restore: callback for restoring after hibernation |
128 | * @uc_handler: callback for unit check handler | 129 | * @uc_handler: callback for unit check handler |
129 | * @driver: embedded device driver structure | 130 | * @driver: embedded device driver structure |
131 | * @int_class: interruption class to use for accounting interrupts | ||
130 | */ | 132 | */ |
131 | struct ccw_driver { | 133 | struct ccw_driver { |
132 | struct ccw_device_id *ids; | 134 | struct ccw_device_id *ids; |
@@ -144,6 +146,7 @@ struct ccw_driver { | |||
144 | int (*restore)(struct ccw_device *); | 146 | int (*restore)(struct ccw_device *); |
145 | enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); | 147 | enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); |
146 | struct device_driver driver; | 148 | struct device_driver driver; |
149 | enum interruption_class int_class; | ||
147 | }; | 150 | }; |
148 | 151 | ||
149 | extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, | 152 | extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, |
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index da359ca6fe55..2e49748b27da 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define PSW32_MASK_IO 0x02000000UL | 12 | #define PSW32_MASK_IO 0x02000000UL |
13 | #define PSW32_MASK_EXT 0x01000000UL | 13 | #define PSW32_MASK_EXT 0x01000000UL |
14 | #define PSW32_MASK_KEY 0x00F00000UL | 14 | #define PSW32_MASK_KEY 0x00F00000UL |
15 | #define PSW32_MASK_BASE 0x00080000UL /* Always one */ | ||
15 | #define PSW32_MASK_MCHECK 0x00040000UL | 16 | #define PSW32_MASK_MCHECK 0x00040000UL |
16 | #define PSW32_MASK_WAIT 0x00020000UL | 17 | #define PSW32_MASK_WAIT 0x00020000UL |
17 | #define PSW32_MASK_PSTATE 0x00010000UL | 18 | #define PSW32_MASK_PSTATE 0x00010000UL |
@@ -19,21 +20,19 @@ | |||
19 | #define PSW32_MASK_CC 0x00003000UL | 20 | #define PSW32_MASK_CC 0x00003000UL |
20 | #define PSW32_MASK_PM 0x00000f00UL | 21 | #define PSW32_MASK_PM 0x00000f00UL |
21 | 22 | ||
22 | #define PSW32_ADDR_AMODE31 0x80000000UL | 23 | #define PSW32_MASK_USER 0x00003F00UL |
24 | |||
25 | #define PSW32_ADDR_AMODE 0x80000000UL | ||
23 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL | 26 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL |
24 | 27 | ||
25 | #define PSW32_BASE_BITS 0x00080000UL | 28 | #define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20) |
26 | 29 | ||
27 | #define PSW32_ASC_PRIMARY 0x00000000UL | 30 | #define PSW32_ASC_PRIMARY 0x00000000UL |
28 | #define PSW32_ASC_ACCREG 0x00004000UL | 31 | #define PSW32_ASC_ACCREG 0x00004000UL |
29 | #define PSW32_ASC_SECONDARY 0x00008000UL | 32 | #define PSW32_ASC_SECONDARY 0x00008000UL |
30 | #define PSW32_ASC_HOME 0x0000C000UL | 33 | #define PSW32_ASC_HOME 0x0000C000UL |
31 | 34 | ||
32 | #define PSW32_MASK_MERGE(CURRENT,NEW) \ | 35 | extern u32 psw32_user_bits; |
33 | (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \ | ||
34 | ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM))) | ||
35 | |||
36 | extern long psw32_user_bits; | ||
37 | 36 | ||
38 | #define COMPAT_USER_HZ 100 | 37 | #define COMPAT_USER_HZ 100 |
39 | #define COMPAT_UTS_MACHINE "s390\0\0\0\0" | 38 | #define COMPAT_UTS_MACHINE "s390\0\0\0\0" |
@@ -131,7 +130,8 @@ struct compat_statfs { | |||
131 | compat_fsid_t f_fsid; | 130 | compat_fsid_t f_fsid; |
132 | s32 f_namelen; | 131 | s32 f_namelen; |
133 | s32 f_frsize; | 132 | s32 f_frsize; |
134 | s32 f_spare[6]; | 133 | s32 f_flags; |
134 | s32 f_spare[5]; | ||
135 | }; | 135 | }; |
136 | 136 | ||
137 | #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff | 137 | #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff |
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 97cc4403fabf..6940abfbe1d9 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h | |||
@@ -168,5 +168,6 @@ enum diag308_rc { | |||
168 | 168 | ||
169 | extern int diag308(unsigned long subcode, void *addr); | 169 | extern int diag308(unsigned long subcode, void *addr); |
170 | extern void diag308_reset(void); | 170 | extern void diag308_reset(void); |
171 | extern void store_status(void); | ||
171 | 172 | ||
172 | #endif /* _ASM_S390_IPL_H */ | 173 | #endif /* _ASM_S390_IPL_H */ |
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index ba7b01c726a3..ba6d85f88d50 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h | |||
@@ -8,7 +8,8 @@ enum interruption_class { | |||
8 | EXTERNAL_INTERRUPT, | 8 | EXTERNAL_INTERRUPT, |
9 | IO_INTERRUPT, | 9 | IO_INTERRUPT, |
10 | EXTINT_CLK, | 10 | EXTINT_CLK, |
11 | EXTINT_IPI, | 11 | EXTINT_EXC, |
12 | EXTINT_EMS, | ||
12 | EXTINT_TMR, | 13 | EXTINT_TMR, |
13 | EXTINT_TLA, | 14 | EXTINT_TLA, |
14 | EXTINT_PFL, | 15 | EXTINT_PFL, |
@@ -17,8 +18,8 @@ enum interruption_class { | |||
17 | EXTINT_SCP, | 18 | EXTINT_SCP, |
18 | EXTINT_IUC, | 19 | EXTINT_IUC, |
19 | EXTINT_CPM, | 20 | EXTINT_CPM, |
21 | IOINT_CIO, | ||
20 | IOINT_QAI, | 22 | IOINT_QAI, |
21 | IOINT_QDI, | ||
22 | IOINT_DAS, | 23 | IOINT_DAS, |
23 | IOINT_C15, | 24 | IOINT_C15, |
24 | IOINT_C70, | 25 | IOINT_C70, |
@@ -28,6 +29,7 @@ enum interruption_class { | |||
28 | IOINT_CLW, | 29 | IOINT_CLW, |
29 | IOINT_CTC, | 30 | IOINT_CTC, |
30 | IOINT_APB, | 31 | IOINT_APB, |
32 | IOINT_CSC, | ||
31 | NMI_NMI, | 33 | NMI_NMI, |
32 | NR_IRQS, | 34 | NR_IRQS, |
33 | }; | 35 | }; |
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index bb729b84a21e..cf4e47b0948c 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h | |||
@@ -30,9 +30,15 @@ | |||
30 | /* Not more than 2GB */ | 30 | /* Not more than 2GB */ |
31 | #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) | 31 | #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) |
32 | 32 | ||
33 | /* Maximum address we can use for the crash control pages */ | ||
34 | #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) | ||
35 | |||
33 | /* Allocate one page for the pdp and the second for the code */ | 36 | /* Allocate one page for the pdp and the second for the code */ |
34 | #define KEXEC_CONTROL_PAGE_SIZE 4096 | 37 | #define KEXEC_CONTROL_PAGE_SIZE 4096 |
35 | 38 | ||
39 | /* Alignment of crashkernel memory */ | ||
40 | #define KEXEC_CRASH_MEM_ALIGN HPAGE_SIZE | ||
41 | |||
36 | /* The native architecture */ | 42 | /* The native architecture */ |
37 | #define KEXEC_ARCH KEXEC_ARCH_S390 | 43 | #define KEXEC_ARCH KEXEC_ARCH_S390 |
38 | 44 | ||
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 00ff00dfb24c..24e18473d926 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -119,6 +119,7 @@ struct kvm_vcpu_stat { | |||
119 | u32 instruction_lctlg; | 119 | u32 instruction_lctlg; |
120 | u32 exit_program_interruption; | 120 | u32 exit_program_interruption; |
121 | u32 exit_instr_and_program; | 121 | u32 exit_instr_and_program; |
122 | u32 deliver_external_call; | ||
122 | u32 deliver_emergency_signal; | 123 | u32 deliver_emergency_signal; |
123 | u32 deliver_service_signal; | 124 | u32 deliver_service_signal; |
124 | u32 deliver_virtio_interrupt; | 125 | u32 deliver_virtio_interrupt; |
@@ -138,11 +139,13 @@ struct kvm_vcpu_stat { | |||
138 | u32 instruction_stfl; | 139 | u32 instruction_stfl; |
139 | u32 instruction_tprot; | 140 | u32 instruction_tprot; |
140 | u32 instruction_sigp_sense; | 141 | u32 instruction_sigp_sense; |
142 | u32 instruction_sigp_external_call; | ||
141 | u32 instruction_sigp_emergency; | 143 | u32 instruction_sigp_emergency; |
142 | u32 instruction_sigp_stop; | 144 | u32 instruction_sigp_stop; |
143 | u32 instruction_sigp_arch; | 145 | u32 instruction_sigp_arch; |
144 | u32 instruction_sigp_prefix; | 146 | u32 instruction_sigp_prefix; |
145 | u32 instruction_sigp_restart; | 147 | u32 instruction_sigp_restart; |
148 | u32 diagnose_10; | ||
146 | u32 diagnose_44; | 149 | u32 diagnose_44; |
147 | }; | 150 | }; |
148 | 151 | ||
@@ -174,6 +177,10 @@ struct kvm_s390_prefix_info { | |||
174 | __u32 address; | 177 | __u32 address; |
175 | }; | 178 | }; |
176 | 179 | ||
180 | struct kvm_s390_extcall_info { | ||
181 | __u16 code; | ||
182 | }; | ||
183 | |||
177 | struct kvm_s390_emerg_info { | 184 | struct kvm_s390_emerg_info { |
178 | __u16 code; | 185 | __u16 code; |
179 | }; | 186 | }; |
@@ -186,6 +193,7 @@ struct kvm_s390_interrupt_info { | |||
186 | struct kvm_s390_ext_info ext; | 193 | struct kvm_s390_ext_info ext; |
187 | struct kvm_s390_pgm_info pgm; | 194 | struct kvm_s390_pgm_info pgm; |
188 | struct kvm_s390_emerg_info emerg; | 195 | struct kvm_s390_emerg_info emerg; |
196 | struct kvm_s390_extcall_info extcall; | ||
189 | struct kvm_s390_prefix_info prefix; | 197 | struct kvm_s390_prefix_info prefix; |
190 | }; | 198 | }; |
191 | }; | 199 | }; |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index e85c911aabf0..9e13c7d56cc1 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -151,10 +151,8 @@ struct _lowcore { | |||
151 | */ | 151 | */ |
152 | __u32 ipib; /* 0x0e00 */ | 152 | __u32 ipib; /* 0x0e00 */ |
153 | __u32 ipib_checksum; /* 0x0e04 */ | 153 | __u32 ipib_checksum; /* 0x0e04 */ |
154 | 154 | __u32 vmcore_info; /* 0x0e08 */ | |
155 | /* 64 bit save area */ | 155 | __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */ |
156 | __u64 save_area_64; /* 0x0e08 */ | ||
157 | __u8 pad_0x0e10[0x0f00-0x0e10]; /* 0x0e10 */ | ||
158 | 156 | ||
159 | /* Extended facility list */ | 157 | /* Extended facility list */ |
160 | __u64 stfle_fac_list[32]; /* 0x0f00 */ | 158 | __u64 stfle_fac_list[32]; /* 0x0f00 */ |
@@ -290,9 +288,7 @@ struct _lowcore { | |||
290 | */ | 288 | */ |
291 | __u64 ipib; /* 0x0e00 */ | 289 | __u64 ipib; /* 0x0e00 */ |
292 | __u32 ipib_checksum; /* 0x0e08 */ | 290 | __u32 ipib_checksum; /* 0x0e08 */ |
293 | 291 | __u64 vmcore_info; /* 0x0e0c */ | |
294 | /* 64 bit save area */ | ||
295 | __u64 save_area_64; /* 0x0e0c */ | ||
296 | __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ | 292 | __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ |
297 | 293 | ||
298 | /* Extended facility list */ | 294 | /* Extended facility list */ |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index accb372ddc7e..f7ec548c2b9d 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -177,6 +177,7 @@ static inline int page_test_and_clear_young(unsigned long pfn) | |||
177 | struct page; | 177 | struct page; |
178 | void arch_free_page(struct page *page, int order); | 178 | void arch_free_page(struct page *page, int order); |
179 | void arch_alloc_page(struct page *page, int order); | 179 | void arch_alloc_page(struct page *page, int order); |
180 | void arch_set_page_states(int make_stable); | ||
180 | 181 | ||
181 | static inline int devmem_is_allowed(unsigned long pfn) | 182 | static inline int devmem_is_allowed(unsigned long pfn) |
182 | { | 183 | { |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index c0cb794bb365..34ede0ea85a9 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -696,7 +696,9 @@ void gmap_disable(struct gmap *gmap); | |||
696 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | 696 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
697 | unsigned long to, unsigned long length); | 697 | unsigned long to, unsigned long length); |
698 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); | 698 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); |
699 | unsigned long __gmap_fault(unsigned long address, struct gmap *); | ||
699 | unsigned long gmap_fault(unsigned long address, struct gmap *); | 700 | unsigned long gmap_fault(unsigned long address, struct gmap *); |
701 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); | ||
700 | 702 | ||
701 | /* | 703 | /* |
702 | * Certain architectures need to do special things when PTEs | 704 | * Certain architectures need to do special things when PTEs |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index a4b6229e5d4b..5f33d37d032c 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -33,6 +33,8 @@ static inline void get_cpu_id(struct cpuid *ptr) | |||
33 | 33 | ||
34 | extern void s390_adjust_jiffies(void); | 34 | extern void s390_adjust_jiffies(void); |
35 | extern int get_cpu_capability(unsigned int *); | 35 | extern int get_cpu_capability(unsigned int *); |
36 | extern const struct seq_operations cpuinfo_op; | ||
37 | extern int sysctl_ieee_emulation_warnings; | ||
36 | 38 | ||
37 | /* | 39 | /* |
38 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. | 40 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. |
@@ -118,17 +120,17 @@ struct stack_frame { | |||
118 | /* | 120 | /* |
119 | * Do necessary setup to start up a new thread. | 121 | * Do necessary setup to start up a new thread. |
120 | */ | 122 | */ |
121 | #define start_thread(regs, new_psw, new_stackp) do { \ | 123 | #define start_thread(regs, new_psw, new_stackp) do { \ |
122 | regs->psw.mask = psw_user_bits; \ | 124 | regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \ |
123 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 125 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ |
124 | regs->gprs[15] = new_stackp; \ | 126 | regs->gprs[15] = new_stackp; \ |
125 | } while (0) | 127 | } while (0) |
126 | 128 | ||
127 | #define start_thread31(regs, new_psw, new_stackp) do { \ | 129 | #define start_thread31(regs, new_psw, new_stackp) do { \ |
128 | regs->psw.mask = psw_user32_bits; \ | 130 | regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ |
129 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 131 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ |
130 | regs->gprs[15] = new_stackp; \ | 132 | regs->gprs[15] = new_stackp; \ |
131 | crst_table_downgrade(current->mm, 1UL << 31); \ | 133 | crst_table_downgrade(current->mm, 1UL << 31); \ |
132 | } while (0) | 134 | } while (0) |
133 | 135 | ||
134 | /* Forward declaration, a strange C thing */ | 136 | /* Forward declaration, a strange C thing */ |
@@ -187,7 +189,6 @@ static inline void __load_psw(psw_t psw) | |||
187 | * Set PSW mask to specified value, while leaving the | 189 | * Set PSW mask to specified value, while leaving the |
188 | * PSW addr pointing to the next instruction. | 190 | * PSW addr pointing to the next instruction. |
189 | */ | 191 | */ |
190 | |||
191 | static inline void __load_psw_mask (unsigned long mask) | 192 | static inline void __load_psw_mask (unsigned long mask) |
192 | { | 193 | { |
193 | unsigned long addr; | 194 | unsigned long addr; |
@@ -212,26 +213,37 @@ static inline void __load_psw_mask (unsigned long mask) | |||
212 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); | 213 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
213 | #endif /* __s390x__ */ | 214 | #endif /* __s390x__ */ |
214 | } | 215 | } |
215 | 216 | ||
216 | /* | 217 | /* |
217 | * Function to stop a processor until an interruption occurred | 218 | * Rewind PSW instruction address by specified number of bytes. |
218 | */ | 219 | */ |
219 | static inline void enabled_wait(void) | 220 | static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) |
220 | { | 221 | { |
221 | __load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | | 222 | #ifndef __s390x__ |
222 | PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY); | 223 | if (psw.addr & PSW_ADDR_AMODE) |
223 | } | 224 | /* 31 bit mode */ |
225 | return (psw.addr - ilc) | PSW_ADDR_AMODE; | ||
226 | /* 24 bit mode */ | ||
227 | return (psw.addr - ilc) & ((1UL << 24) - 1); | ||
228 | #else | ||
229 | unsigned long mask; | ||
224 | 230 | ||
231 | mask = (psw.mask & PSW_MASK_EA) ? -1UL : | ||
232 | (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : | ||
233 | (1UL << 24) - 1; | ||
234 | return (psw.addr - ilc) & mask; | ||
235 | #endif | ||
236 | } | ||
237 | |||
225 | /* | 238 | /* |
226 | * Function to drop a processor into disabled wait state | 239 | * Function to drop a processor into disabled wait state |
227 | */ | 240 | */ |
228 | |||
229 | static inline void ATTRIB_NORET disabled_wait(unsigned long code) | 241 | static inline void ATTRIB_NORET disabled_wait(unsigned long code) |
230 | { | 242 | { |
231 | unsigned long ctl_buf; | 243 | unsigned long ctl_buf; |
232 | psw_t dw_psw; | 244 | psw_t dw_psw; |
233 | 245 | ||
234 | dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; | 246 | dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA; |
235 | dw_psw.addr = code; | 247 | dw_psw.addr = code; |
236 | /* | 248 | /* |
237 | * Store status and then load disabled wait psw, | 249 | * Store status and then load disabled wait psw, |
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 62fd80c9e98c..a65846340d51 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -230,17 +230,21 @@ typedef struct | |||
230 | #define PSW_MASK_IO 0x02000000UL | 230 | #define PSW_MASK_IO 0x02000000UL |
231 | #define PSW_MASK_EXT 0x01000000UL | 231 | #define PSW_MASK_EXT 0x01000000UL |
232 | #define PSW_MASK_KEY 0x00F00000UL | 232 | #define PSW_MASK_KEY 0x00F00000UL |
233 | #define PSW_MASK_BASE 0x00080000UL /* always one */ | ||
233 | #define PSW_MASK_MCHECK 0x00040000UL | 234 | #define PSW_MASK_MCHECK 0x00040000UL |
234 | #define PSW_MASK_WAIT 0x00020000UL | 235 | #define PSW_MASK_WAIT 0x00020000UL |
235 | #define PSW_MASK_PSTATE 0x00010000UL | 236 | #define PSW_MASK_PSTATE 0x00010000UL |
236 | #define PSW_MASK_ASC 0x0000C000UL | 237 | #define PSW_MASK_ASC 0x0000C000UL |
237 | #define PSW_MASK_CC 0x00003000UL | 238 | #define PSW_MASK_CC 0x00003000UL |
238 | #define PSW_MASK_PM 0x00000F00UL | 239 | #define PSW_MASK_PM 0x00000F00UL |
240 | #define PSW_MASK_EA 0x00000000UL | ||
241 | #define PSW_MASK_BA 0x00000000UL | ||
242 | |||
243 | #define PSW_MASK_USER 0x00003F00UL | ||
239 | 244 | ||
240 | #define PSW_ADDR_AMODE 0x80000000UL | 245 | #define PSW_ADDR_AMODE 0x80000000UL |
241 | #define PSW_ADDR_INSN 0x7FFFFFFFUL | 246 | #define PSW_ADDR_INSN 0x7FFFFFFFUL |
242 | 247 | ||
243 | #define PSW_BASE_BITS 0x00080000UL | ||
244 | #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20) | 248 | #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20) |
245 | 249 | ||
246 | #define PSW_ASC_PRIMARY 0x00000000UL | 250 | #define PSW_ASC_PRIMARY 0x00000000UL |
@@ -254,6 +258,7 @@ typedef struct | |||
254 | #define PSW_MASK_DAT 0x0400000000000000UL | 258 | #define PSW_MASK_DAT 0x0400000000000000UL |
255 | #define PSW_MASK_IO 0x0200000000000000UL | 259 | #define PSW_MASK_IO 0x0200000000000000UL |
256 | #define PSW_MASK_EXT 0x0100000000000000UL | 260 | #define PSW_MASK_EXT 0x0100000000000000UL |
261 | #define PSW_MASK_BASE 0x0000000000000000UL | ||
257 | #define PSW_MASK_KEY 0x00F0000000000000UL | 262 | #define PSW_MASK_KEY 0x00F0000000000000UL |
258 | #define PSW_MASK_MCHECK 0x0004000000000000UL | 263 | #define PSW_MASK_MCHECK 0x0004000000000000UL |
259 | #define PSW_MASK_WAIT 0x0002000000000000UL | 264 | #define PSW_MASK_WAIT 0x0002000000000000UL |
@@ -261,12 +266,14 @@ typedef struct | |||
261 | #define PSW_MASK_ASC 0x0000C00000000000UL | 266 | #define PSW_MASK_ASC 0x0000C00000000000UL |
262 | #define PSW_MASK_CC 0x0000300000000000UL | 267 | #define PSW_MASK_CC 0x0000300000000000UL |
263 | #define PSW_MASK_PM 0x00000F0000000000UL | 268 | #define PSW_MASK_PM 0x00000F0000000000UL |
269 | #define PSW_MASK_EA 0x0000000100000000UL | ||
270 | #define PSW_MASK_BA 0x0000000080000000UL | ||
271 | |||
272 | #define PSW_MASK_USER 0x00003F0180000000UL | ||
264 | 273 | ||
265 | #define PSW_ADDR_AMODE 0x0000000000000000UL | 274 | #define PSW_ADDR_AMODE 0x0000000000000000UL |
266 | #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL | 275 | #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL |
267 | 276 | ||
268 | #define PSW_BASE_BITS 0x0000000180000000UL | ||
269 | #define PSW_BASE32_BITS 0x0000000080000000UL | ||
270 | #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52) | 277 | #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52) |
271 | 278 | ||
272 | #define PSW_ASC_PRIMARY 0x0000000000000000UL | 279 | #define PSW_ASC_PRIMARY 0x0000000000000000UL |
@@ -279,18 +286,7 @@ typedef struct | |||
279 | #ifdef __KERNEL__ | 286 | #ifdef __KERNEL__ |
280 | extern long psw_kernel_bits; | 287 | extern long psw_kernel_bits; |
281 | extern long psw_user_bits; | 288 | extern long psw_user_bits; |
282 | #ifdef CONFIG_64BIT | ||
283 | extern long psw_user32_bits; | ||
284 | #endif | 289 | #endif |
285 | #endif | ||
286 | |||
287 | /* This macro merges a NEW PSW mask specified by the user into | ||
288 | the currently active PSW mask CURRENT, modifying only those | ||
289 | bits in CURRENT that the user may be allowed to change: this | ||
290 | is the condition code and the program mask bits. */ | ||
291 | #define PSW_MASK_MERGE(CURRENT,NEW) \ | ||
292 | (((CURRENT) & ~(PSW_MASK_CC|PSW_MASK_PM)) | \ | ||
293 | ((NEW) & (PSW_MASK_CC|PSW_MASK_PM))) | ||
294 | 290 | ||
295 | /* | 291 | /* |
296 | * The s390_regs structure is used to define the elf_gregset_t. | 292 | * The s390_regs structure is used to define the elf_gregset_t. |
@@ -328,8 +324,7 @@ struct pt_regs | |||
328 | psw_t psw; | 324 | psw_t psw; |
329 | unsigned long gprs[NUM_GPRS]; | 325 | unsigned long gprs[NUM_GPRS]; |
330 | unsigned long orig_gpr2; | 326 | unsigned long orig_gpr2; |
331 | unsigned short ilc; | 327 | unsigned int svc_code; |
332 | unsigned short svcnr; | ||
333 | }; | 328 | }; |
334 | 329 | ||
335 | /* | 330 | /* |
@@ -487,6 +482,8 @@ typedef struct | |||
487 | #define PTRACE_POKETEXT_AREA 0x5004 | 482 | #define PTRACE_POKETEXT_AREA 0x5004 |
488 | #define PTRACE_POKEDATA_AREA 0x5005 | 483 | #define PTRACE_POKEDATA_AREA 0x5005 |
489 | #define PTRACE_GET_LAST_BREAK 0x5006 | 484 | #define PTRACE_GET_LAST_BREAK 0x5006 |
485 | #define PTRACE_PEEK_SYSTEM_CALL 0x5007 | ||
486 | #define PTRACE_POKE_SYSTEM_CALL 0x5008 | ||
490 | 487 | ||
491 | /* | 488 | /* |
492 | * PT_PROT definition is loosely based on hppa bsd definition in | 489 | * PT_PROT definition is loosely based on hppa bsd definition in |
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 15c97625df8d..e63d13dd3bf5 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h | |||
@@ -46,6 +46,8 @@ struct qdesfmt0 { | |||
46 | u32 : 16; | 46 | u32 : 16; |
47 | } __attribute__ ((packed)); | 47 | } __attribute__ ((packed)); |
48 | 48 | ||
49 | #define QDR_AC_MULTI_BUFFER_ENABLE 0x01 | ||
50 | |||
49 | /** | 51 | /** |
50 | * struct qdr - queue description record (QDR) | 52 | * struct qdr - queue description record (QDR) |
51 | * @qfmt: queue format | 53 | * @qfmt: queue format |
@@ -123,6 +125,40 @@ struct slibe { | |||
123 | }; | 125 | }; |
124 | 126 | ||
125 | /** | 127 | /** |
128 | * struct qaob - queue asynchronous operation block | ||
129 | * @res0: reserved parameters | ||
130 | * @res1: reserved parameter | ||
131 | * @res2: reserved parameter | ||
132 | * @res3: reserved parameter | ||
133 | * @aorc: asynchronous operation return code | ||
134 | * @flags: internal flags | ||
135 | * @cbtbs: control block type | ||
136 | * @sb_count: number of storage blocks | ||
137 | * @sba: storage block element addresses | ||
138 | * @dcount: size of storage block elements | ||
139 | * @user0: user defineable value | ||
140 | * @res4: reserved paramater | ||
141 | * @user1: user defineable value | ||
142 | * @user2: user defineable value | ||
143 | */ | ||
144 | struct qaob { | ||
145 | u64 res0[6]; | ||
146 | u8 res1; | ||
147 | u8 res2; | ||
148 | u8 res3; | ||
149 | u8 aorc; | ||
150 | u8 flags; | ||
151 | u16 cbtbs; | ||
152 | u8 sb_count; | ||
153 | u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER]; | ||
154 | u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER]; | ||
155 | u64 user0; | ||
156 | u64 res4[2]; | ||
157 | u64 user1; | ||
158 | u64 user2; | ||
159 | } __attribute__ ((packed, aligned(256))); | ||
160 | |||
161 | /** | ||
126 | * struct slib - storage list information block (SLIB) | 162 | * struct slib - storage list information block (SLIB) |
127 | * @nsliba: next SLIB address (if any) | 163 | * @nsliba: next SLIB address (if any) |
128 | * @sla: SL address | 164 | * @sla: SL address |
@@ -222,9 +258,46 @@ struct slsb { | |||
222 | u8 val[QDIO_MAX_BUFFERS_PER_Q]; | 258 | u8 val[QDIO_MAX_BUFFERS_PER_Q]; |
223 | } __attribute__ ((packed, aligned(256))); | 259 | } __attribute__ ((packed, aligned(256))); |
224 | 260 | ||
261 | #define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080 | ||
262 | #define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040 | ||
225 | #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 | 263 | #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 |
226 | #define CHSC_AC2_DATA_DIV_ENABLED 0x0002 | 264 | #define CHSC_AC2_DATA_DIV_ENABLED 0x0002 |
227 | 265 | ||
266 | /** | ||
267 | * struct qdio_outbuf_state - SBAL related asynchronous operation information | ||
268 | * (for communication with upper layer programs) | ||
269 | * (only required for use with completion queues) | ||
270 | * @flags: flags indicating state of buffer | ||
271 | * @aob: pointer to QAOB used for the particular SBAL | ||
272 | * @user: pointer to upper layer program's state information related to SBAL | ||
273 | * (stored in user1 data of QAOB) | ||
274 | */ | ||
275 | struct qdio_outbuf_state { | ||
276 | u8 flags; | ||
277 | struct qaob *aob; | ||
278 | void *user; | ||
279 | }; | ||
280 | |||
281 | #define QDIO_OUTBUF_STATE_FLAG_NONE 0x00 | ||
282 | #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 | ||
283 | |||
284 | #define CHSC_AC1_INITIATE_INPUTQ 0x80 | ||
285 | |||
286 | |||
287 | /* qdio adapter-characteristics-1 flag */ | ||
288 | #define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ | ||
289 | #define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ | ||
290 | #define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ | ||
291 | #define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ | ||
292 | #define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ | ||
293 | #define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ | ||
294 | #define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ | ||
295 | |||
296 | #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 | ||
297 | #define CHSC_AC2_DATA_DIV_ENABLED 0x0002 | ||
298 | |||
299 | #define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000 | ||
300 | |||
228 | struct qdio_ssqd_desc { | 301 | struct qdio_ssqd_desc { |
229 | u8 flags; | 302 | u8 flags; |
230 | u8:8; | 303 | u8:8; |
@@ -243,8 +316,7 @@ struct qdio_ssqd_desc { | |||
243 | u64 sch_token; | 316 | u64 sch_token; |
244 | u8 mro; | 317 | u8 mro; |
245 | u8 mri; | 318 | u8 mri; |
246 | u8:8; | 319 | u16 qdioac3; |
247 | u8 sbalic; | ||
248 | u16:16; | 320 | u16:16; |
249 | u8:8; | 321 | u8:8; |
250 | u8 mmwc; | 322 | u8 mmwc; |
@@ -280,13 +352,16 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, | |||
280 | * @no_output_qs: number of output queues | 352 | * @no_output_qs: number of output queues |
281 | * @input_handler: handler to be called for input queues | 353 | * @input_handler: handler to be called for input queues |
282 | * @output_handler: handler to be called for output queues | 354 | * @output_handler: handler to be called for output queues |
355 | * @queue_start_poll: polling handlers (one per input queue or NULL) | ||
283 | * @int_parm: interruption parameter | 356 | * @int_parm: interruption parameter |
284 | * @input_sbal_addr_array: address of no_input_qs * 128 pointers | 357 | * @input_sbal_addr_array: address of no_input_qs * 128 pointers |
285 | * @output_sbal_addr_array: address of no_output_qs * 128 pointers | 358 | * @output_sbal_addr_array: address of no_output_qs * 128 pointers |
359 | * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL) | ||
286 | */ | 360 | */ |
287 | struct qdio_initialize { | 361 | struct qdio_initialize { |
288 | struct ccw_device *cdev; | 362 | struct ccw_device *cdev; |
289 | unsigned char q_format; | 363 | unsigned char q_format; |
364 | unsigned char qdr_ac; | ||
290 | unsigned char adapter_name[8]; | 365 | unsigned char adapter_name[8]; |
291 | unsigned int qib_param_field_format; | 366 | unsigned int qib_param_field_format; |
292 | unsigned char *qib_param_field; | 367 | unsigned char *qib_param_field; |
@@ -297,11 +372,12 @@ struct qdio_initialize { | |||
297 | unsigned int no_output_qs; | 372 | unsigned int no_output_qs; |
298 | qdio_handler_t *input_handler; | 373 | qdio_handler_t *input_handler; |
299 | qdio_handler_t *output_handler; | 374 | qdio_handler_t *output_handler; |
300 | void (*queue_start_poll) (struct ccw_device *, int, unsigned long); | 375 | void (**queue_start_poll) (struct ccw_device *, int, unsigned long); |
301 | int scan_threshold; | 376 | int scan_threshold; |
302 | unsigned long int_parm; | 377 | unsigned long int_parm; |
303 | void **input_sbal_addr_array; | 378 | void **input_sbal_addr_array; |
304 | void **output_sbal_addr_array; | 379 | void **output_sbal_addr_array; |
380 | struct qdio_outbuf_state *output_sbal_state_array; | ||
305 | }; | 381 | }; |
306 | 382 | ||
307 | #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ | 383 | #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ |
@@ -316,6 +392,7 @@ struct qdio_initialize { | |||
316 | extern int qdio_allocate(struct qdio_initialize *); | 392 | extern int qdio_allocate(struct qdio_initialize *); |
317 | extern int qdio_establish(struct qdio_initialize *); | 393 | extern int qdio_establish(struct qdio_initialize *); |
318 | extern int qdio_activate(struct ccw_device *); | 394 | extern int qdio_activate(struct ccw_device *); |
395 | extern void qdio_release_aob(struct qaob *); | ||
319 | extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, | 396 | extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, |
320 | unsigned int); | 397 | unsigned int); |
321 | extern int qdio_start_irq(struct ccw_device *, int); | 398 | extern int qdio_start_irq(struct ccw_device *, int); |
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h index f584f4a52581..3d6ad4ad2a3f 100644 --- a/arch/s390/include/asm/reset.h +++ b/arch/s390/include/asm/reset.h | |||
@@ -17,5 +17,5 @@ struct reset_call { | |||
17 | 17 | ||
18 | extern void register_reset_call(struct reset_call *reset); | 18 | extern void register_reset_call(struct reset_call *reset); |
19 | extern void unregister_reset_call(struct reset_call *reset); | 19 | extern void unregister_reset_call(struct reset_call *reset); |
20 | extern void s390_reset_system(void); | 20 | extern void s390_reset_system(void (*func)(void *), void *data); |
21 | #endif /* _ASM_S390_RESET_H */ | 21 | #endif /* _ASM_S390_RESET_H */ |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index d5e2ef10537d..5a099714df04 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -26,15 +26,21 @@ | |||
26 | #define IPL_DEVICE (*(unsigned long *) (0x10404)) | 26 | #define IPL_DEVICE (*(unsigned long *) (0x10404)) |
27 | #define INITRD_START (*(unsigned long *) (0x1040C)) | 27 | #define INITRD_START (*(unsigned long *) (0x1040C)) |
28 | #define INITRD_SIZE (*(unsigned long *) (0x10414)) | 28 | #define INITRD_SIZE (*(unsigned long *) (0x10414)) |
29 | #define OLDMEM_BASE (*(unsigned long *) (0x1041C)) | ||
30 | #define OLDMEM_SIZE (*(unsigned long *) (0x10424)) | ||
29 | #else /* __s390x__ */ | 31 | #else /* __s390x__ */ |
30 | #define IPL_DEVICE (*(unsigned long *) (0x10400)) | 32 | #define IPL_DEVICE (*(unsigned long *) (0x10400)) |
31 | #define INITRD_START (*(unsigned long *) (0x10408)) | 33 | #define INITRD_START (*(unsigned long *) (0x10408)) |
32 | #define INITRD_SIZE (*(unsigned long *) (0x10410)) | 34 | #define INITRD_SIZE (*(unsigned long *) (0x10410)) |
35 | #define OLDMEM_BASE (*(unsigned long *) (0x10418)) | ||
36 | #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) | ||
33 | #endif /* __s390x__ */ | 37 | #endif /* __s390x__ */ |
34 | #define COMMAND_LINE ((char *) (0x10480)) | 38 | #define COMMAND_LINE ((char *) (0x10480)) |
35 | 39 | ||
36 | #define CHUNK_READ_WRITE 0 | 40 | #define CHUNK_READ_WRITE 0 |
37 | #define CHUNK_READ_ONLY 1 | 41 | #define CHUNK_READ_ONLY 1 |
42 | #define CHUNK_OLDMEM 4 | ||
43 | #define CHUNK_CRASHK 5 | ||
38 | 44 | ||
39 | struct mem_chunk { | 45 | struct mem_chunk { |
40 | unsigned long addr; | 46 | unsigned long addr; |
@@ -48,6 +54,8 @@ extern int memory_end_set; | |||
48 | extern unsigned long memory_end; | 54 | extern unsigned long memory_end; |
49 | 55 | ||
50 | void detect_memory_layout(struct mem_chunk chunk[]); | 56 | void detect_memory_layout(struct mem_chunk chunk[]); |
57 | void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr, | ||
58 | unsigned long size, int type); | ||
51 | 59 | ||
52 | #define PRIMARY_SPACE_MODE 0 | 60 | #define PRIMARY_SPACE_MODE 0 |
53 | #define ACCESS_REGISTER_MODE 1 | 61 | #define ACCESS_REGISTER_MODE 1 |
@@ -106,6 +114,7 @@ extern unsigned int user_mode; | |||
106 | #endif /* __s390x__ */ | 114 | #endif /* __s390x__ */ |
107 | 115 | ||
108 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) | 116 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) |
117 | #define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20) | ||
109 | 118 | ||
110 | /* | 119 | /* |
111 | * Console mode. Override with conmode= | 120 | * Console mode. Override with conmode= |
@@ -134,10 +143,14 @@ extern char kernel_nss_name[]; | |||
134 | #define IPL_DEVICE 0x10404 | 143 | #define IPL_DEVICE 0x10404 |
135 | #define INITRD_START 0x1040C | 144 | #define INITRD_START 0x1040C |
136 | #define INITRD_SIZE 0x10414 | 145 | #define INITRD_SIZE 0x10414 |
146 | #define OLDMEM_BASE 0x1041C | ||
147 | #define OLDMEM_SIZE 0x10424 | ||
137 | #else /* __s390x__ */ | 148 | #else /* __s390x__ */ |
138 | #define IPL_DEVICE 0x10400 | 149 | #define IPL_DEVICE 0x10400 |
139 | #define INITRD_START 0x10408 | 150 | #define INITRD_START 0x10408 |
140 | #define INITRD_SIZE 0x10410 | 151 | #define INITRD_SIZE 0x10410 |
152 | #define OLDMEM_BASE 0x10418 | ||
153 | #define OLDMEM_SIZE 0x10420 | ||
141 | #endif /* __s390x__ */ | 154 | #endif /* __s390x__ */ |
142 | #define COMMAND_LINE 0x10480 | 155 | #define COMMAND_LINE 0x10480 |
143 | 156 | ||
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h index 0addc6466d95..ca3f8814e361 100644 --- a/arch/s390/include/asm/sfp-util.h +++ b/arch/s390/include/asm/sfp-util.h | |||
@@ -72,6 +72,6 @@ extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int, | |||
72 | 72 | ||
73 | #define UDIV_NEEDS_NORMALIZATION 0 | 73 | #define UDIV_NEEDS_NORMALIZATION 0 |
74 | 74 | ||
75 | #define abort() return 0 | 75 | #define abort() BUG() |
76 | 76 | ||
77 | #define __BYTE_ORDER __BIG_ENDIAN | 77 | #define __BYTE_ORDER __BIG_ENDIAN |
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 045e009fc164..ab47a69fdf07 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -33,6 +33,7 @@ extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; | |||
33 | extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *); | 33 | extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *); |
34 | extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, | 34 | extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, |
35 | int from, int to); | 35 | int from, int to); |
36 | extern void smp_restart_with_online_cpu(void); | ||
36 | extern void smp_restart_cpu(void); | 37 | extern void smp_restart_cpu(void); |
37 | 38 | ||
38 | /* | 39 | /* |
@@ -64,6 +65,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | |||
64 | func(data); | 65 | func(data); |
65 | } | 66 | } |
66 | 67 | ||
68 | static inline void smp_restart_with_online_cpu(void) | ||
69 | { | ||
70 | } | ||
71 | |||
67 | #define smp_vcpu_scheduled (1) | 72 | #define smp_vcpu_scheduled (1) |
68 | 73 | ||
69 | #endif /* CONFIG_SMP */ | 74 | #endif /* CONFIG_SMP */ |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 56612fc8186e..fd94dfec8d08 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -13,6 +13,8 @@ | |||
13 | 13 | ||
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | 15 | ||
16 | extern int spin_retry; | ||
17 | |||
16 | static inline int | 18 | static inline int |
17 | _raw_compare_and_swap(volatile unsigned int *lock, | 19 | _raw_compare_and_swap(volatile unsigned int *lock, |
18 | unsigned int old, unsigned int new) | 20 | unsigned int old, unsigned int new) |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 5c0246b955d8..b239ff53b189 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define _ASM_SYSCALL_H 1 | 13 | #define _ASM_SYSCALL_H 1 |
14 | 14 | ||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/err.h> | ||
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -25,7 +26,8 @@ extern const unsigned int sys_call_table[]; | |||
25 | static inline long syscall_get_nr(struct task_struct *task, | 26 | static inline long syscall_get_nr(struct task_struct *task, |
26 | struct pt_regs *regs) | 27 | struct pt_regs *regs) |
27 | { | 28 | { |
28 | return regs->svcnr ? regs->svcnr : -1; | 29 | return test_tsk_thread_flag(task, TIF_SYSCALL) ? |
30 | (regs->svc_code & 0xffff) : -1; | ||
29 | } | 31 | } |
30 | 32 | ||
31 | static inline void syscall_rollback(struct task_struct *task, | 33 | static inline void syscall_rollback(struct task_struct *task, |
@@ -37,7 +39,7 @@ static inline void syscall_rollback(struct task_struct *task, | |||
37 | static inline long syscall_get_error(struct task_struct *task, | 39 | static inline long syscall_get_error(struct task_struct *task, |
38 | struct pt_regs *regs) | 40 | struct pt_regs *regs) |
39 | { | 41 | { |
40 | return (regs->gprs[2] >= -4096UL) ? -regs->gprs[2] : 0; | 42 | return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; |
41 | } | 43 | } |
42 | 44 | ||
43 | static inline long syscall_get_return_value(struct task_struct *task, | 45 | static inline long syscall_get_return_value(struct task_struct *task, |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 6582f69f2389..ef573c1d71a7 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | struct task_struct; | 21 | struct task_struct; |
22 | 22 | ||
23 | extern int sysctl_userprocess_debug; | ||
24 | |||
23 | extern struct task_struct *__switch_to(void *, void *); | 25 | extern struct task_struct *__switch_to(void *, void *); |
24 | extern void update_per_regs(struct task_struct *task); | 26 | extern void update_per_regs(struct task_struct *task); |
25 | 27 | ||
@@ -114,6 +116,8 @@ extern void pfault_fini(void); | |||
114 | extern void cmma_init(void); | 116 | extern void cmma_init(void); |
115 | extern int memcpy_real(void *, void *, size_t); | 117 | extern int memcpy_real(void *, void *, size_t); |
116 | extern void copy_to_absolute_zero(void *dest, void *src, size_t count); | 118 | extern void copy_to_absolute_zero(void *dest, void *src, size_t count); |
119 | extern int copy_to_user_real(void __user *dest, void *src, size_t count); | ||
120 | extern int copy_from_user_real(void *dest, void __user *src, size_t count); | ||
117 | 121 | ||
118 | #define finish_arch_switch(prev) do { \ | 122 | #define finish_arch_switch(prev) do { \ |
119 | set_fs(current->thread.mm_segment); \ | 123 | set_fs(current->thread.mm_segment); \ |
@@ -210,8 +214,10 @@ __set_psw_mask(unsigned long mask) | |||
210 | __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); | 214 | __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); |
211 | } | 215 | } |
212 | 216 | ||
213 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) | 217 | #define local_mcck_enable() \ |
214 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | 218 | __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) |
219 | #define local_mcck_disable() \ | ||
220 | __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) | ||
215 | 221 | ||
216 | #ifdef CONFIG_SMP | 222 | #ifdef CONFIG_SMP |
217 | 223 | ||
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 1a5dbb6f1495..a23183423b14 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -48,6 +48,7 @@ struct thread_info { | |||
48 | unsigned int cpu; /* current CPU */ | 48 | unsigned int cpu; /* current CPU */ |
49 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 49 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
50 | struct restart_block restart_block; | 50 | struct restart_block restart_block; |
51 | unsigned int system_call; | ||
51 | __u64 user_timer; | 52 | __u64 user_timer; |
52 | __u64 system_timer; | 53 | __u64 system_timer; |
53 | unsigned long last_break; /* last breaking-event-address. */ | 54 | unsigned long last_break; /* last breaking-event-address. */ |
@@ -84,10 +85,10 @@ static inline struct thread_info *current_thread_info(void) | |||
84 | /* | 85 | /* |
85 | * thread information flags bit numbers | 86 | * thread information flags bit numbers |
86 | */ | 87 | */ |
88 | #define TIF_SYSCALL 0 /* inside a system call */ | ||
87 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ | 89 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ |
88 | #define TIF_SIGPENDING 2 /* signal pending */ | 90 | #define TIF_SIGPENDING 2 /* signal pending */ |
89 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 91 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
90 | #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ | ||
91 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ | 92 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ |
92 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ | 93 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ |
93 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 94 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
@@ -103,11 +104,11 @@ static inline struct thread_info *current_thread_info(void) | |||
103 | #define TIF_SINGLE_STEP 20 /* This task is single stepped */ | 104 | #define TIF_SINGLE_STEP 20 /* This task is single stepped */ |
104 | #define TIF_FREEZE 21 /* thread is freezing for suspend */ | 105 | #define TIF_FREEZE 21 /* thread is freezing for suspend */ |
105 | 106 | ||
107 | #define _TIF_SYSCALL (1<<TIF_SYSCALL) | ||
106 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 108 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
107 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 109 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
108 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 110 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
109 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 111 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
110 | #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) | ||
111 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) | 112 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) |
112 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) | 113 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) |
113 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 114 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -117,7 +118,7 @@ static inline struct thread_info *current_thread_info(void) | |||
117 | #define _TIF_SIE (1<<TIF_SIE) | 118 | #define _TIF_SIE (1<<TIF_SIE) |
118 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 119 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
119 | #define _TIF_31BIT (1<<TIF_31BIT) | 120 | #define _TIF_31BIT (1<<TIF_31BIT) |
120 | #define _TIF_SINGLE_STEP (1<<TIF_FREEZE) | 121 | #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) |
121 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 122 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
122 | 123 | ||
123 | #ifdef CONFIG_64BIT | 124 | #ifdef CONFIG_64BIT |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 88829a40af6f..d610bef9c5e9 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -86,6 +86,17 @@ static inline void get_clock_ext(char *clk) | |||
86 | asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); | 86 | asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline unsigned long long get_clock_fast(void) | ||
90 | { | ||
91 | unsigned long long clk; | ||
92 | |||
93 | if (test_facility(25)) | ||
94 | asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); | ||
95 | else | ||
96 | clk = get_clock(); | ||
97 | return clk; | ||
98 | } | ||
99 | |||
89 | static inline unsigned long long get_clock_xt(void) | 100 | static inline unsigned long long get_clock_xt(void) |
90 | { | 101 | { |
91 | unsigned char clk[16]; | 102 | unsigned char clk[16]; |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 304445382382..1d8648cf2fea 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -59,6 +59,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm) | |||
59 | } | 59 | } |
60 | #else | 60 | #else |
61 | #define __tlb_flush_full(mm) __tlb_flush_local() | 61 | #define __tlb_flush_full(mm) __tlb_flush_local() |
62 | #define __tlb_flush_global() __tlb_flush_local() | ||
62 | #endif | 63 | #endif |
63 | 64 | ||
64 | /* | 65 | /* |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index df3732249baa..dd4f07640919 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -48,6 +48,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) | |||
48 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 48 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
49 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 49 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
50 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | 50 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o |
51 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | ||
51 | 52 | ||
52 | # Kexec part | 53 | # Kexec part |
53 | S390_KEXEC_OBJS := machine_kexec.o crash.o | 54 | S390_KEXEC_OBJS := machine_kexec.o crash.o |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 2b45591e1582..751318765e2e 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -45,8 +45,7 @@ int main(void) | |||
45 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); | 45 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); |
46 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); | 46 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); |
47 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); | 47 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); |
48 | DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc)); | 48 | DEFINE(__PT_SVC_CODE, offsetof(struct pt_regs, svc_code)); |
49 | DEFINE(__PT_SVCNR, offsetof(struct pt_regs, svcnr)); | ||
50 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); | 49 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); |
51 | BLANK(); | 50 | BLANK(); |
52 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); | 51 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); |
@@ -141,7 +140,6 @@ int main(void) | |||
141 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); | 140 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); |
142 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); | 141 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); |
143 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); | 142 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); |
144 | DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64)); | ||
145 | #ifdef CONFIG_32BIT | 143 | #ifdef CONFIG_32BIT |
146 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | 144 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); |
147 | #else /* CONFIG_32BIT */ | 145 | #else /* CONFIG_32BIT */ |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 255435663bf8..f8828d38fa6e 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -86,6 +86,8 @@ s390_base_pgm_handler_fn: | |||
86 | ENTRY(diag308_reset) | 86 | ENTRY(diag308_reset) |
87 | larl %r4,.Lctlregs # Save control registers | 87 | larl %r4,.Lctlregs # Save control registers |
88 | stctg %c0,%c15,0(%r4) | 88 | stctg %c0,%c15,0(%r4) |
89 | larl %r4,.Lfpctl # Floating point control register | ||
90 | stfpc 0(%r4) | ||
89 | larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 | 91 | larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 |
90 | lghi %r3,0 | 92 | lghi %r3,0 |
91 | lg %r4,0(%r4) # Save PSW | 93 | lg %r4,0(%r4) # Save PSW |
@@ -99,6 +101,8 @@ ENTRY(diag308_reset) | |||
99 | sam64 # Switch to 64 bit addressing mode | 101 | sam64 # Switch to 64 bit addressing mode |
100 | larl %r4,.Lctlregs # Restore control registers | 102 | larl %r4,.Lctlregs # Restore control registers |
101 | lctlg %c0,%c15,0(%r4) | 103 | lctlg %c0,%c15,0(%r4) |
104 | larl %r4,.Lfpctl # Restore floating point ctl register | ||
105 | lfpc 0(%r4) | ||
102 | br %r14 | 106 | br %r14 |
103 | .align 16 | 107 | .align 16 |
104 | .Lrestart_psw: | 108 | .Lrestart_psw: |
@@ -110,6 +114,8 @@ ENTRY(diag308_reset) | |||
110 | .rept 16 | 114 | .rept 16 |
111 | .quad 0 | 115 | .quad 0 |
112 | .endr | 116 | .endr |
117 | .Lfpctl: | ||
118 | .long 0 | ||
113 | .previous | 119 | .previous |
114 | 120 | ||
115 | #else /* CONFIG_64BIT */ | 121 | #else /* CONFIG_64BIT */ |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 53acaa86dd94..84a982898448 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -60,12 +60,9 @@ | |||
60 | 60 | ||
61 | #include "compat_linux.h" | 61 | #include "compat_linux.h" |
62 | 62 | ||
63 | long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 63 | u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | |
64 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 64 | PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK | |
65 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | 65 | PSW32_MASK_PSTATE | PSW32_ASC_HOME; |
66 | long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | | ||
67 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
68 | PSW32_MASK_PSTATE); | ||
69 | 66 | ||
70 | /* For this source file, we want overflow handling. */ | 67 | /* For this source file, we want overflow handling. */ |
71 | 68 | ||
@@ -365,12 +362,7 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
365 | if (set) { | 362 | if (set) { |
366 | if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) | 363 | if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) |
367 | return -EFAULT; | 364 | return -EFAULT; |
368 | switch (_NSIG_WORDS) { | 365 | s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); |
369 | case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); | ||
370 | case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); | ||
371 | case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); | ||
372 | case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); | ||
373 | } | ||
374 | } | 366 | } |
375 | set_fs (KERNEL_DS); | 367 | set_fs (KERNEL_DS); |
376 | ret = sys_rt_sigprocmask(how, | 368 | ret = sys_rt_sigprocmask(how, |
@@ -380,12 +372,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
380 | set_fs (old_fs); | 372 | set_fs (old_fs); |
381 | if (ret) return ret; | 373 | if (ret) return ret; |
382 | if (oset) { | 374 | if (oset) { |
383 | switch (_NSIG_WORDS) { | 375 | s32.sig[1] = (s.sig[0] >> 32); |
384 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | 376 | s32.sig[0] = s.sig[0]; |
385 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
386 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
387 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
388 | } | ||
389 | if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) | 377 | if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) |
390 | return -EFAULT; | 378 | return -EFAULT; |
391 | } | 379 | } |
@@ -404,12 +392,8 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, | |||
404 | ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); | 392 | ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); |
405 | set_fs (old_fs); | 393 | set_fs (old_fs); |
406 | if (!ret) { | 394 | if (!ret) { |
407 | switch (_NSIG_WORDS) { | 395 | s32.sig[1] = (s.sig[0] >> 32); |
408 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | 396 | s32.sig[0] = s.sig[0]; |
409 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
410 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
411 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
412 | } | ||
413 | if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) | 397 | if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) |
414 | return -EFAULT; | 398 | return -EFAULT; |
415 | } | 399 | } |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index a9a285b8c4ad..4f68c81d3ffa 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -141,7 +141,8 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | |||
141 | break; | 141 | break; |
142 | case __SI_FAULT >> 16: | 142 | case __SI_FAULT >> 16: |
143 | err |= __get_user(tmp, &from->si_addr); | 143 | err |= __get_user(tmp, &from->si_addr); |
144 | to->si_addr = (void __user *)(u64) (tmp & PSW32_ADDR_INSN); | 144 | to->si_addr = (void __force __user *) |
145 | (u64) (tmp & PSW32_ADDR_INSN); | ||
145 | break; | 146 | break; |
146 | case __SI_POLL >> 16: | 147 | case __SI_POLL >> 16: |
147 | err |= __get_user(to->si_band, &from->si_band); | 148 | err |= __get_user(to->si_band, &from->si_band); |
@@ -213,16 +214,8 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | |||
213 | ret = get_user(sa_handler, &act->sa_handler); | 214 | ret = get_user(sa_handler, &act->sa_handler); |
214 | ret |= __copy_from_user(&set32, &act->sa_mask, | 215 | ret |= __copy_from_user(&set32, &act->sa_mask, |
215 | sizeof(compat_sigset_t)); | 216 | sizeof(compat_sigset_t)); |
216 | switch (_NSIG_WORDS) { | 217 | new_ka.sa.sa_mask.sig[0] = |
217 | case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | 218 | set32.sig[0] | (((long)set32.sig[1]) << 32); |
218 | | (((long)set32.sig[7]) << 32); | ||
219 | case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | ||
220 | | (((long)set32.sig[5]) << 32); | ||
221 | case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | ||
222 | | (((long)set32.sig[3]) << 32); | ||
223 | case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | ||
224 | | (((long)set32.sig[1]) << 32); | ||
225 | } | ||
226 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | 219 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); |
227 | 220 | ||
228 | if (ret) | 221 | if (ret) |
@@ -233,20 +226,8 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | |||
233 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | 226 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
234 | 227 | ||
235 | if (!ret && oact) { | 228 | if (!ret && oact) { |
236 | switch (_NSIG_WORDS) { | 229 | set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); |
237 | case 4: | 230 | set32.sig[0] = old_ka.sa.sa_mask.sig[0]; |
238 | set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); | ||
239 | set32.sig[6] = old_ka.sa.sa_mask.sig[3]; | ||
240 | case 3: | ||
241 | set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); | ||
242 | set32.sig[4] = old_ka.sa.sa_mask.sig[2]; | ||
243 | case 2: | ||
244 | set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); | ||
245 | set32.sig[2] = old_ka.sa.sa_mask.sig[1]; | ||
246 | case 1: | ||
247 | set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); | ||
248 | set32.sig[0] = old_ka.sa.sa_mask.sig[0]; | ||
249 | } | ||
250 | ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); | 231 | ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); |
251 | ret |= __copy_to_user(&oact->sa_mask, &set32, | 232 | ret |= __copy_to_user(&oact->sa_mask, &set32, |
252 | sizeof(compat_sigset_t)); | 233 | sizeof(compat_sigset_t)); |
@@ -300,9 +281,10 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) | |||
300 | _s390_regs_common32 regs32; | 281 | _s390_regs_common32 regs32; |
301 | int err, i; | 282 | int err, i; |
302 | 283 | ||
303 | regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits, | 284 | regs32.psw.mask = psw32_user_bits | |
304 | (__u32)(regs->psw.mask >> 32)); | 285 | ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER); |
305 | regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; | 286 | regs32.psw.addr = (__u32) regs->psw.addr | |
287 | (__u32)(regs->psw.mask & PSW_MASK_BA); | ||
306 | for (i = 0; i < NUM_GPRS; i++) | 288 | for (i = 0; i < NUM_GPRS; i++) |
307 | regs32.gprs[i] = (__u32) regs->gprs[i]; | 289 | regs32.gprs[i] = (__u32) regs->gprs[i]; |
308 | save_access_regs(current->thread.acrs); | 290 | save_access_regs(current->thread.acrs); |
@@ -327,8 +309,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | |||
327 | err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32)); | 309 | err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32)); |
328 | if (err) | 310 | if (err) |
329 | return err; | 311 | return err; |
330 | regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, | 312 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
331 | (__u64)regs32.psw.mask << 32); | 313 | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | |
314 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); | ||
332 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); | 315 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); |
333 | for (i = 0; i < NUM_GPRS; i++) | 316 | for (i = 0; i < NUM_GPRS; i++) |
334 | regs->gprs[i] = (__u64) regs32.gprs[i]; | 317 | regs->gprs[i] = (__u64) regs32.gprs[i]; |
@@ -342,7 +325,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | |||
342 | return err; | 325 | return err; |
343 | 326 | ||
344 | restore_fp_regs(¤t->thread.fp_regs); | 327 | restore_fp_regs(¤t->thread.fp_regs); |
345 | regs->svcnr = 0; /* disable syscall checks */ | 328 | clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ |
346 | return 0; | 329 | return 0; |
347 | } | 330 | } |
348 | 331 | ||
@@ -496,11 +479,11 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
496 | /* Set up to return from userspace. If provided, use a stub | 479 | /* Set up to return from userspace. If provided, use a stub |
497 | already in userspace. */ | 480 | already in userspace. */ |
498 | if (ka->sa.sa_flags & SA_RESTORER) { | 481 | if (ka->sa.sa_flags & SA_RESTORER) { |
499 | regs->gprs[14] = (__u64) ka->sa.sa_restorer; | 482 | regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; |
500 | } else { | 483 | } else { |
501 | regs->gprs[14] = (__u64) frame->retcode; | 484 | regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; |
502 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, | 485 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, |
503 | (u16 __user *)(frame->retcode))) | 486 | (u16 __force __user *)(frame->retcode))) |
504 | goto give_sigsegv; | 487 | goto give_sigsegv; |
505 | } | 488 | } |
506 | 489 | ||
@@ -509,11 +492,12 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
509 | goto give_sigsegv; | 492 | goto give_sigsegv; |
510 | 493 | ||
511 | /* Set up registers for signal handler */ | 494 | /* Set up registers for signal handler */ |
512 | regs->gprs[15] = (__u64) frame; | 495 | regs->gprs[15] = (__force __u64) frame; |
513 | regs->psw.addr = (__u64) ka->sa.sa_handler; | 496 | regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ |
497 | regs->psw.addr = (__force __u64) ka->sa.sa_handler; | ||
514 | 498 | ||
515 | regs->gprs[2] = map_signal(sig); | 499 | regs->gprs[2] = map_signal(sig); |
516 | regs->gprs[3] = (__u64) &frame->sc; | 500 | regs->gprs[3] = (__force __u64) &frame->sc; |
517 | 501 | ||
518 | /* We forgot to include these in the sigcontext. | 502 | /* We forgot to include these in the sigcontext. |
519 | To avoid breaking binary compatibility, they are passed as args. */ | 503 | To avoid breaking binary compatibility, they are passed as args. */ |
@@ -521,7 +505,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
521 | regs->gprs[5] = current->thread.prot_addr; | 505 | regs->gprs[5] = current->thread.prot_addr; |
522 | 506 | ||
523 | /* Place signal number on stack to allow backtrace from handler. */ | 507 | /* Place signal number on stack to allow backtrace from handler. */ |
524 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) | 508 | if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo)) |
525 | goto give_sigsegv; | 509 | goto give_sigsegv; |
526 | return 0; | 510 | return 0; |
527 | 511 | ||
@@ -564,20 +548,21 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
564 | } else { | 548 | } else { |
565 | regs->gprs[14] = (__u64) frame->retcode; | 549 | regs->gprs[14] = (__u64) frame->retcode; |
566 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, | 550 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, |
567 | (u16 __user *)(frame->retcode)); | 551 | (u16 __force __user *)(frame->retcode)); |
568 | } | 552 | } |
569 | 553 | ||
570 | /* Set up backchain. */ | 554 | /* Set up backchain. */ |
571 | if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) | 555 | if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) |
572 | goto give_sigsegv; | 556 | goto give_sigsegv; |
573 | 557 | ||
574 | /* Set up registers for signal handler */ | 558 | /* Set up registers for signal handler */ |
575 | regs->gprs[15] = (__u64) frame; | 559 | regs->gprs[15] = (__force __u64) frame; |
560 | regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ | ||
576 | regs->psw.addr = (__u64) ka->sa.sa_handler; | 561 | regs->psw.addr = (__u64) ka->sa.sa_handler; |
577 | 562 | ||
578 | regs->gprs[2] = map_signal(sig); | 563 | regs->gprs[2] = map_signal(sig); |
579 | regs->gprs[3] = (__u64) &frame->info; | 564 | regs->gprs[3] = (__force __u64) &frame->info; |
580 | regs->gprs[4] = (__u64) &frame->uc; | 565 | regs->gprs[4] = (__force __u64) &frame->uc; |
581 | return 0; | 566 | return 0; |
582 | 567 | ||
583 | give_sigsegv: | 568 | give_sigsegv: |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 7526db6bf501..5006a1d9f5d0 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -1623,8 +1623,7 @@ ENTRY(sys_syncfs_wrapper) | |||
1623 | lgfr %r2,%r2 # int | 1623 | lgfr %r2,%r2 # int |
1624 | jg sys_syncfs | 1624 | jg sys_syncfs |
1625 | 1625 | ||
1626 | .globl sys_setns_wrapper | 1626 | ENTRY(sys_setns_wrapper) |
1627 | sys_setns_wrapper: | ||
1628 | lgfr %r2,%r2 # int | 1627 | lgfr %r2,%r2 # int |
1629 | lgfr %r3,%r3 # int | 1628 | lgfr %r3,%r3 # int |
1630 | jg sys_setns | 1629 | jg sys_setns |
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c new file mode 100644 index 000000000000..39f8fd4438fc --- /dev/null +++ b/arch/s390/kernel/crash_dump.c | |||
@@ -0,0 +1,426 @@ | |||
1 | /* | ||
2 | * S390 kdump implementation | ||
3 | * | ||
4 | * Copyright IBM Corp. 2011 | ||
5 | * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/crash_dump.h> | ||
9 | #include <asm/lowcore.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/crash_dump.h> | ||
15 | #include <linux/bootmem.h> | ||
16 | #include <linux/elf.h> | ||
17 | #include <asm/ipl.h> | ||
18 | |||
19 | #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) | ||
20 | #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) | ||
21 | #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) | ||
22 | |||
23 | /* | ||
24 | * Copy one page from "oldmem" | ||
25 | * | ||
26 | * For the kdump reserved memory this functions performs a swap operation: | ||
27 | * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE]. | ||
28 | * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] | ||
29 | */ | ||
30 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||
31 | size_t csize, unsigned long offset, int userbuf) | ||
32 | { | ||
33 | unsigned long src; | ||
34 | |||
35 | if (!csize) | ||
36 | return 0; | ||
37 | |||
38 | src = (pfn << PAGE_SHIFT) + offset; | ||
39 | if (src < OLDMEM_SIZE) | ||
40 | src += OLDMEM_BASE; | ||
41 | else if (src > OLDMEM_BASE && | ||
42 | src < OLDMEM_BASE + OLDMEM_SIZE) | ||
43 | src -= OLDMEM_BASE; | ||
44 | if (userbuf) | ||
45 | copy_to_user_real((void __force __user *) buf, (void *) src, | ||
46 | csize); | ||
47 | else | ||
48 | memcpy_real(buf, (void *) src, csize); | ||
49 | return csize; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Copy memory from old kernel | ||
54 | */ | ||
55 | static int copy_from_oldmem(void *dest, void *src, size_t count) | ||
56 | { | ||
57 | unsigned long copied = 0; | ||
58 | int rc; | ||
59 | |||
60 | if ((unsigned long) src < OLDMEM_SIZE) { | ||
61 | copied = min(count, OLDMEM_SIZE - (unsigned long) src); | ||
62 | rc = memcpy_real(dest, src + OLDMEM_BASE, copied); | ||
63 | if (rc) | ||
64 | return rc; | ||
65 | } | ||
66 | return memcpy_real(dest + copied, src + copied, count - copied); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Alloc memory and panic in case of ENOMEM | ||
71 | */ | ||
72 | static void *kzalloc_panic(int len) | ||
73 | { | ||
74 | void *rc; | ||
75 | |||
76 | rc = kzalloc(len, GFP_KERNEL); | ||
77 | if (!rc) | ||
78 | panic("s390 kdump kzalloc (%d) failed", len); | ||
79 | return rc; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Get memory layout and create hole for oldmem | ||
84 | */ | ||
85 | static struct mem_chunk *get_memory_layout(void) | ||
86 | { | ||
87 | struct mem_chunk *chunk_array; | ||
88 | |||
89 | chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); | ||
90 | detect_memory_layout(chunk_array); | ||
91 | create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK); | ||
92 | return chunk_array; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Initialize ELF note | ||
97 | */ | ||
98 | static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, | ||
99 | const char *name) | ||
100 | { | ||
101 | Elf64_Nhdr *note; | ||
102 | u64 len; | ||
103 | |||
104 | note = (Elf64_Nhdr *)buf; | ||
105 | note->n_namesz = strlen(name) + 1; | ||
106 | note->n_descsz = d_len; | ||
107 | note->n_type = type; | ||
108 | len = sizeof(Elf64_Nhdr); | ||
109 | |||
110 | memcpy(buf + len, name, note->n_namesz); | ||
111 | len = roundup(len + note->n_namesz, 4); | ||
112 | |||
113 | memcpy(buf + len, desc, note->n_descsz); | ||
114 | len = roundup(len + note->n_descsz, 4); | ||
115 | |||
116 | return PTR_ADD(buf, len); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Initialize prstatus note | ||
121 | */ | ||
122 | static void *nt_prstatus(void *ptr, struct save_area *sa) | ||
123 | { | ||
124 | struct elf_prstatus nt_prstatus; | ||
125 | static int cpu_nr = 1; | ||
126 | |||
127 | memset(&nt_prstatus, 0, sizeof(nt_prstatus)); | ||
128 | memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs)); | ||
129 | memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw)); | ||
130 | memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs)); | ||
131 | nt_prstatus.pr_pid = cpu_nr; | ||
132 | cpu_nr++; | ||
133 | |||
134 | return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus), | ||
135 | "CORE"); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Initialize fpregset (floating point) note | ||
140 | */ | ||
141 | static void *nt_fpregset(void *ptr, struct save_area *sa) | ||
142 | { | ||
143 | elf_fpregset_t nt_fpregset; | ||
144 | |||
145 | memset(&nt_fpregset, 0, sizeof(nt_fpregset)); | ||
146 | memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg)); | ||
147 | memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs)); | ||
148 | |||
149 | return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset), | ||
150 | "CORE"); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Initialize timer note | ||
155 | */ | ||
156 | static void *nt_s390_timer(void *ptr, struct save_area *sa) | ||
157 | { | ||
158 | return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer), | ||
159 | KEXEC_CORE_NOTE_NAME); | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Initialize TOD clock comparator note | ||
164 | */ | ||
165 | static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa) | ||
166 | { | ||
167 | return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp, | ||
168 | sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Initialize TOD programmable register note | ||
173 | */ | ||
174 | static void *nt_s390_tod_preg(void *ptr, struct save_area *sa) | ||
175 | { | ||
176 | return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg, | ||
177 | sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Initialize control register note | ||
182 | */ | ||
183 | static void *nt_s390_ctrs(void *ptr, struct save_area *sa) | ||
184 | { | ||
185 | return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs, | ||
186 | sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Initialize prefix register note | ||
191 | */ | ||
192 | static void *nt_s390_prefix(void *ptr, struct save_area *sa) | ||
193 | { | ||
194 | return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg, | ||
195 | sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME); | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Fill ELF notes for one CPU with save area registers | ||
200 | */ | ||
201 | void *fill_cpu_elf_notes(void *ptr, struct save_area *sa) | ||
202 | { | ||
203 | ptr = nt_prstatus(ptr, sa); | ||
204 | ptr = nt_fpregset(ptr, sa); | ||
205 | ptr = nt_s390_timer(ptr, sa); | ||
206 | ptr = nt_s390_tod_cmp(ptr, sa); | ||
207 | ptr = nt_s390_tod_preg(ptr, sa); | ||
208 | ptr = nt_s390_ctrs(ptr, sa); | ||
209 | ptr = nt_s390_prefix(ptr, sa); | ||
210 | return ptr; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Initialize prpsinfo note (new kernel) | ||
215 | */ | ||
216 | static void *nt_prpsinfo(void *ptr) | ||
217 | { | ||
218 | struct elf_prpsinfo prpsinfo; | ||
219 | |||
220 | memset(&prpsinfo, 0, sizeof(prpsinfo)); | ||
221 | prpsinfo.pr_sname = 'R'; | ||
222 | strcpy(prpsinfo.pr_fname, "vmlinux"); | ||
223 | return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo), | ||
224 | KEXEC_CORE_NOTE_NAME); | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * Initialize vmcoreinfo note (new kernel) | ||
229 | */ | ||
230 | static void *nt_vmcoreinfo(void *ptr) | ||
231 | { | ||
232 | char nt_name[11], *vmcoreinfo; | ||
233 | Elf64_Nhdr note; | ||
234 | void *addr; | ||
235 | |||
236 | if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) | ||
237 | return ptr; | ||
238 | memset(nt_name, 0, sizeof(nt_name)); | ||
239 | if (copy_from_oldmem(¬e, addr, sizeof(note))) | ||
240 | return ptr; | ||
241 | if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1)) | ||
242 | return ptr; | ||
243 | if (strcmp(nt_name, "VMCOREINFO") != 0) | ||
244 | return ptr; | ||
245 | vmcoreinfo = kzalloc_panic(note.n_descsz + 1); | ||
246 | if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz)) | ||
247 | return ptr; | ||
248 | vmcoreinfo[note.n_descsz + 1] = 0; | ||
249 | return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO"); | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Initialize ELF header (new kernel) | ||
254 | */ | ||
255 | static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) | ||
256 | { | ||
257 | memset(ehdr, 0, sizeof(*ehdr)); | ||
258 | memcpy(ehdr->e_ident, ELFMAG, SELFMAG); | ||
259 | ehdr->e_ident[EI_CLASS] = ELFCLASS64; | ||
260 | ehdr->e_ident[EI_DATA] = ELFDATA2MSB; | ||
261 | ehdr->e_ident[EI_VERSION] = EV_CURRENT; | ||
262 | memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); | ||
263 | ehdr->e_type = ET_CORE; | ||
264 | ehdr->e_machine = EM_S390; | ||
265 | ehdr->e_version = EV_CURRENT; | ||
266 | ehdr->e_phoff = sizeof(Elf64_Ehdr); | ||
267 | ehdr->e_ehsize = sizeof(Elf64_Ehdr); | ||
268 | ehdr->e_phentsize = sizeof(Elf64_Phdr); | ||
269 | ehdr->e_phnum = mem_chunk_cnt + 1; | ||
270 | return ehdr + 1; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Return CPU count for ELF header (new kernel) | ||
275 | */ | ||
276 | static int get_cpu_cnt(void) | ||
277 | { | ||
278 | int i, cpus = 0; | ||
279 | |||
280 | for (i = 0; zfcpdump_save_areas[i]; i++) { | ||
281 | if (zfcpdump_save_areas[i]->pref_reg == 0) | ||
282 | continue; | ||
283 | cpus++; | ||
284 | } | ||
285 | return cpus; | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * Return memory chunk count for ELF header (new kernel) | ||
290 | */ | ||
291 | static int get_mem_chunk_cnt(void) | ||
292 | { | ||
293 | struct mem_chunk *chunk_array, *mem_chunk; | ||
294 | int i, cnt = 0; | ||
295 | |||
296 | chunk_array = get_memory_layout(); | ||
297 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
298 | mem_chunk = &chunk_array[i]; | ||
299 | if (chunk_array[i].type != CHUNK_READ_WRITE && | ||
300 | chunk_array[i].type != CHUNK_READ_ONLY) | ||
301 | continue; | ||
302 | if (mem_chunk->size == 0) | ||
303 | continue; | ||
304 | cnt++; | ||
305 | } | ||
306 | kfree(chunk_array); | ||
307 | return cnt; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Relocate pointer in order to allow vmcore code access the data | ||
312 | */ | ||
313 | static inline unsigned long relocate(unsigned long addr) | ||
314 | { | ||
315 | return OLDMEM_BASE + addr; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Initialize ELF loads (new kernel) | ||
320 | */ | ||
321 | static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) | ||
322 | { | ||
323 | struct mem_chunk *chunk_array, *mem_chunk; | ||
324 | int i; | ||
325 | |||
326 | chunk_array = get_memory_layout(); | ||
327 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
328 | mem_chunk = &chunk_array[i]; | ||
329 | if (mem_chunk->size == 0) | ||
330 | break; | ||
331 | if (chunk_array[i].type != CHUNK_READ_WRITE && | ||
332 | chunk_array[i].type != CHUNK_READ_ONLY) | ||
333 | continue; | ||
334 | else | ||
335 | phdr->p_filesz = mem_chunk->size; | ||
336 | phdr->p_type = PT_LOAD; | ||
337 | phdr->p_offset = mem_chunk->addr; | ||
338 | phdr->p_vaddr = mem_chunk->addr; | ||
339 | phdr->p_paddr = mem_chunk->addr; | ||
340 | phdr->p_memsz = mem_chunk->size; | ||
341 | phdr->p_flags = PF_R | PF_W | PF_X; | ||
342 | phdr->p_align = PAGE_SIZE; | ||
343 | phdr++; | ||
344 | } | ||
345 | kfree(chunk_array); | ||
346 | return i; | ||
347 | } | ||
348 | |||
349 | /* | ||
350 | * Initialize notes (new kernel) | ||
351 | */ | ||
352 | static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) | ||
353 | { | ||
354 | struct save_area *sa; | ||
355 | void *ptr_start = ptr; | ||
356 | int i; | ||
357 | |||
358 | ptr = nt_prpsinfo(ptr); | ||
359 | |||
360 | for (i = 0; zfcpdump_save_areas[i]; i++) { | ||
361 | sa = zfcpdump_save_areas[i]; | ||
362 | if (sa->pref_reg == 0) | ||
363 | continue; | ||
364 | ptr = fill_cpu_elf_notes(ptr, sa); | ||
365 | } | ||
366 | ptr = nt_vmcoreinfo(ptr); | ||
367 | memset(phdr, 0, sizeof(*phdr)); | ||
368 | phdr->p_type = PT_NOTE; | ||
369 | phdr->p_offset = relocate(notes_offset); | ||
370 | phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start); | ||
371 | phdr->p_memsz = phdr->p_filesz; | ||
372 | return ptr; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Create ELF core header (new kernel) | ||
377 | */ | ||
378 | static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) | ||
379 | { | ||
380 | Elf64_Phdr *phdr_notes, *phdr_loads; | ||
381 | int mem_chunk_cnt; | ||
382 | void *ptr, *hdr; | ||
383 | u32 alloc_size; | ||
384 | u64 hdr_off; | ||
385 | |||
386 | mem_chunk_cnt = get_mem_chunk_cnt(); | ||
387 | |||
388 | alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + | ||
389 | mem_chunk_cnt * sizeof(Elf64_Phdr); | ||
390 | hdr = kzalloc_panic(alloc_size); | ||
391 | /* Init elf header */ | ||
392 | ptr = ehdr_init(hdr, mem_chunk_cnt); | ||
393 | /* Init program headers */ | ||
394 | phdr_notes = ptr; | ||
395 | ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr)); | ||
396 | phdr_loads = ptr; | ||
397 | ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt); | ||
398 | /* Init notes */ | ||
399 | hdr_off = PTR_DIFF(ptr, hdr); | ||
400 | ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off); | ||
401 | /* Init loads */ | ||
402 | hdr_off = PTR_DIFF(ptr, hdr); | ||
403 | loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off); | ||
404 | *elfcorebuf_sz = hdr_off; | ||
405 | *elfcorebuf = (void *) relocate((unsigned long) hdr); | ||
406 | BUG_ON(*elfcorebuf_sz > alloc_size); | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Create kdump ELF core header in new kernel, if it has not been passed via | ||
411 | * the "elfcorehdr" kernel parameter | ||
412 | */ | ||
413 | static int setup_kdump_elfcorehdr(void) | ||
414 | { | ||
415 | size_t elfcorebuf_sz; | ||
416 | char *elfcorebuf; | ||
417 | |||
418 | if (!OLDMEM_BASE || is_kdump_kernel()) | ||
419 | return -EINVAL; | ||
420 | s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz); | ||
421 | elfcorehdr_addr = (unsigned long long) elfcorebuf; | ||
422 | elfcorehdr_size = elfcorebuf_sz; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | subsys_initcall(setup_kdump_elfcorehdr); | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index f297456dba7a..37394b3413e2 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -252,7 +252,7 @@ static noinline __init void setup_lowcore_early(void) | |||
252 | { | 252 | { |
253 | psw_t psw; | 253 | psw_t psw; |
254 | 254 | ||
255 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 255 | psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; |
256 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; | 256 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; |
257 | S390_lowcore.external_new_psw = psw; | 257 | S390_lowcore.external_new_psw = psw; |
258 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 258 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 02ec8fe7d03f..b13157057e02 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -43,16 +43,15 @@ SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 | |||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | 43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 |
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 | 44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 |
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | 45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 |
46 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | 46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE |
47 | SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR | ||
48 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | 47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE |
49 | 48 | ||
50 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 49 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
51 | _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) | 50 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
52 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 51 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
53 | _TIF_MCCK_PENDING) | 52 | _TIF_MCCK_PENDING) |
54 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 53 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
55 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) | 54 | _TIF_SYSCALL_TRACEPOINT) |
56 | 55 | ||
57 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 56 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
58 | STACK_SIZE = 1 << STACK_SHIFT | 57 | STACK_SIZE = 1 << STACK_SHIFT |
@@ -228,9 +227,10 @@ ENTRY(system_call) | |||
228 | sysc_saveall: | 227 | sysc_saveall: |
229 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 228 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
230 | CREATE_STACK_FRAME __LC_SAVE_AREA | 229 | CREATE_STACK_FRAME __LC_SAVE_AREA |
231 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
232 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
233 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 230 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
231 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
232 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
233 | oi __TI_flags+3(%r12),_TIF_SYSCALL | ||
234 | sysc_vtime: | 234 | sysc_vtime: |
235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
236 | sysc_stime: | 236 | sysc_stime: |
@@ -239,17 +239,17 @@ sysc_update: | |||
239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
240 | sysc_do_svc: | 240 | sysc_do_svc: |
241 | xr %r7,%r7 | 241 | xr %r7,%r7 |
242 | icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0 | 242 | icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0 |
243 | bnz BASED(sysc_nr_ok) # svc number > 0 | 243 | bnz BASED(sysc_nr_ok) # svc number > 0 |
244 | # svc 0: system call number in %r1 | 244 | # svc 0: system call number in %r1 |
245 | cl %r1,BASED(.Lnr_syscalls) | 245 | cl %r1,BASED(.Lnr_syscalls) |
246 | bnl BASED(sysc_nr_ok) | 246 | bnl BASED(sysc_nr_ok) |
247 | sth %r1,SP_SVCNR(%r15) | 247 | sth %r1,SP_SVC_CODE+2(%r15) |
248 | lr %r7,%r1 # copy svc number to %r7 | 248 | lr %r7,%r1 # copy svc number to %r7 |
249 | sysc_nr_ok: | 249 | sysc_nr_ok: |
250 | sll %r7,2 # svc number *4 | 250 | sll %r7,2 # svc number *4 |
251 | l %r10,BASED(.Lsysc_table) | 251 | l %r10,BASED(.Lsysc_table) |
252 | tm __TI_flags+2(%r12),_TIF_SYSCALL | 252 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
253 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | 253 | mvc SP_ARGS(4,%r15),SP_R7(%r15) |
254 | l %r8,0(%r7,%r10) # get system call addr. | 254 | l %r8,0(%r7,%r10) # get system call addr. |
255 | bnz BASED(sysc_tracesys) | 255 | bnz BASED(sysc_tracesys) |
@@ -259,23 +259,19 @@ sysc_nr_ok: | |||
259 | sysc_return: | 259 | sysc_return: |
260 | LOCKDEP_SYS_EXIT | 260 | LOCKDEP_SYS_EXIT |
261 | sysc_tif: | 261 | sysc_tif: |
262 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
263 | bno BASED(sysc_restore) | ||
262 | tm __TI_flags+3(%r12),_TIF_WORK_SVC | 264 | tm __TI_flags+3(%r12),_TIF_WORK_SVC |
263 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 265 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
266 | ni __TI_flags+3(%r12),255-_TIF_SYSCALL | ||
264 | sysc_restore: | 267 | sysc_restore: |
265 | RESTORE_ALL __LC_RETURN_PSW,1 | 268 | RESTORE_ALL __LC_RETURN_PSW,1 |
266 | sysc_done: | 269 | sysc_done: |
267 | 270 | ||
268 | # | 271 | # |
269 | # There is work to do, but first we need to check if we return to userspace. | ||
270 | # | ||
271 | sysc_work: | ||
272 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
273 | bno BASED(sysc_restore) | ||
274 | |||
275 | # | ||
276 | # One of the work bits is on. Find out which one. | 272 | # One of the work bits is on. Find out which one. |
277 | # | 273 | # |
278 | sysc_work_tif: | 274 | sysc_work: |
279 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 275 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
280 | bo BASED(sysc_mcck_pending) | 276 | bo BASED(sysc_mcck_pending) |
281 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 277 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
@@ -284,8 +280,6 @@ sysc_work_tif: | |||
284 | bo BASED(sysc_sigpending) | 280 | bo BASED(sysc_sigpending) |
285 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 281 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
286 | bo BASED(sysc_notify_resume) | 282 | bo BASED(sysc_notify_resume) |
287 | tm __TI_flags+3(%r12),_TIF_RESTART_SVC | ||
288 | bo BASED(sysc_restart) | ||
289 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 283 | tm __TI_flags+3(%r12),_TIF_PER_TRAP |
290 | bo BASED(sysc_singlestep) | 284 | bo BASED(sysc_singlestep) |
291 | b BASED(sysc_return) # beware of critical section cleanup | 285 | b BASED(sysc_return) # beware of critical section cleanup |
@@ -314,11 +308,14 @@ sysc_sigpending: | |||
314 | la %r2,SP_PTREGS(%r15) # load pt_regs | 308 | la %r2,SP_PTREGS(%r15) # load pt_regs |
315 | l %r1,BASED(.Ldo_signal) | 309 | l %r1,BASED(.Ldo_signal) |
316 | basr %r14,%r1 # call do_signal | 310 | basr %r14,%r1 # call do_signal |
317 | tm __TI_flags+3(%r12),_TIF_RESTART_SVC | 311 | tm __TI_flags+3(%r12),_TIF_SYSCALL |
318 | bo BASED(sysc_restart) | 312 | bno BASED(sysc_return) |
319 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 313 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
320 | bo BASED(sysc_singlestep) | 314 | xr %r7,%r7 # svc 0 returns -ENOSYS |
321 | b BASED(sysc_return) | 315 | clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2) |
316 | bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0 | ||
317 | icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number | ||
318 | b BASED(sysc_nr_ok) # restart svc | ||
322 | 319 | ||
323 | # | 320 | # |
324 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 321 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
@@ -329,24 +326,11 @@ sysc_notify_resume: | |||
329 | la %r14,BASED(sysc_return) | 326 | la %r14,BASED(sysc_return) |
330 | br %r1 # call do_notify_resume | 327 | br %r1 # call do_notify_resume |
331 | 328 | ||
332 | |||
333 | # | ||
334 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
335 | # | ||
336 | sysc_restart: | ||
337 | ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
338 | l %r7,SP_R2(%r15) # load new svc number | ||
339 | mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
340 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
341 | sth %r7,SP_SVCNR(%r15) | ||
342 | b BASED(sysc_nr_ok) # restart svc | ||
343 | |||
344 | # | 329 | # |
345 | # _TIF_PER_TRAP is set, call do_per_trap | 330 | # _TIF_PER_TRAP is set, call do_per_trap |
346 | # | 331 | # |
347 | sysc_singlestep: | 332 | sysc_singlestep: |
348 | ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 333 | ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
349 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
350 | la %r2,SP_PTREGS(%r15) # address of register-save area | 334 | la %r2,SP_PTREGS(%r15) # address of register-save area |
351 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 335 | l %r1,BASED(.Lhandle_per) # load adr. of per handler |
352 | la %r14,BASED(sysc_return) # load adr. of system return | 336 | la %r14,BASED(sysc_return) # load adr. of system return |
@@ -361,7 +345,7 @@ sysc_tracesys: | |||
361 | la %r2,SP_PTREGS(%r15) # load pt_regs | 345 | la %r2,SP_PTREGS(%r15) # load pt_regs |
362 | la %r3,0 | 346 | la %r3,0 |
363 | xr %r0,%r0 | 347 | xr %r0,%r0 |
364 | icm %r0,3,SP_SVCNR(%r15) | 348 | icm %r0,3,SP_SVC_CODE(%r15) |
365 | st %r0,SP_R2(%r15) | 349 | st %r0,SP_R2(%r15) |
366 | basr %r14,%r1 | 350 | basr %r14,%r1 |
367 | cl %r2,BASED(.Lnr_syscalls) | 351 | cl %r2,BASED(.Lnr_syscalls) |
@@ -376,7 +360,7 @@ sysc_tracego: | |||
376 | basr %r14,%r8 # call sys_xxx | 360 | basr %r14,%r8 # call sys_xxx |
377 | st %r2,SP_R2(%r15) # store return value | 361 | st %r2,SP_R2(%r15) # store return value |
378 | sysc_tracenogo: | 362 | sysc_tracenogo: |
379 | tm __TI_flags+2(%r12),_TIF_SYSCALL | 363 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
380 | bz BASED(sysc_return) | 364 | bz BASED(sysc_return) |
381 | l %r1,BASED(.Ltrace_exit) | 365 | l %r1,BASED(.Ltrace_exit) |
382 | la %r2,SP_PTREGS(%r15) # load pt_regs | 366 | la %r2,SP_PTREGS(%r15) # load pt_regs |
@@ -454,7 +438,6 @@ ENTRY(pgm_check_handler) | |||
454 | bnz BASED(pgm_per) # got per exception -> special case | 438 | bnz BASED(pgm_per) # got per exception -> special case |
455 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 439 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
456 | CREATE_STACK_FRAME __LC_SAVE_AREA | 440 | CREATE_STACK_FRAME __LC_SAVE_AREA |
457 | xc SP_ILC(4,%r15),SP_ILC(%r15) | ||
458 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW | 441 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW |
459 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 442 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
460 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 443 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -530,9 +513,10 @@ pgm_exit2: | |||
530 | pgm_svcper: | 513 | pgm_svcper: |
531 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 514 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
532 | CREATE_STACK_FRAME __LC_SAVE_AREA | 515 | CREATE_STACK_FRAME __LC_SAVE_AREA |
533 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
534 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
535 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 516 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
517 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
518 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
519 | oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | ||
536 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 520 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
537 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 521 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
538 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 522 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
@@ -540,7 +524,6 @@ pgm_svcper: | |||
540 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | 524 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE |
541 | mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS | 525 | mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS |
542 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | 526 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID |
543 | oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | ||
544 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 527 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
545 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | 528 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
546 | b BASED(sysc_do_svc) | 529 | b BASED(sysc_do_svc) |
@@ -550,7 +533,6 @@ pgm_svcper: | |||
550 | # | 533 | # |
551 | kernel_per: | 534 | kernel_per: |
552 | REENABLE_IRQS | 535 | REENABLE_IRQS |
553 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) | ||
554 | la %r2,SP_PTREGS(%r15) # address of register-save area | 536 | la %r2,SP_PTREGS(%r15) # address of register-save area |
555 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 537 | l %r1,BASED(.Lhandle_per) # load adr. of per handler |
556 | basr %r14,%r1 # branch to do_single_step | 538 | basr %r14,%r1 # branch to do_single_step |
@@ -853,13 +835,13 @@ restart_go: | |||
853 | # PSW restart interrupt handler | 835 | # PSW restart interrupt handler |
854 | # | 836 | # |
855 | ENTRY(psw_restart_int_handler) | 837 | ENTRY(psw_restart_int_handler) |
856 | st %r15,__LC_SAVE_AREA_64(%r0) # save r15 | 838 | st %r15,__LC_SAVE_AREA+48(%r0) # save r15 |
857 | basr %r15,0 | 839 | basr %r15,0 |
858 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack | 840 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack |
859 | l %r15,0(%r15) | 841 | l %r15,0(%r15) |
860 | ahi %r15,-SP_SIZE # make room for pt_regs | 842 | ahi %r15,-SP_SIZE # make room for pt_regs |
861 | stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 843 | stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack |
862 | mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack | 844 | mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack |
863 | mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw | 845 | mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw |
864 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 846 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 |
865 | basr %r14,0 | 847 | basr %r14,0 |
@@ -965,9 +947,11 @@ cleanup_system_call: | |||
965 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 947 | s %r15,BASED(.Lc_spsize) # make room for registers & psw |
966 | st %r15,12(%r12) | 948 | st %r15,12(%r12) |
967 | CREATE_STACK_FRAME __LC_SAVE_AREA | 949 | CREATE_STACK_FRAME __LC_SAVE_AREA |
968 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
969 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
970 | mvc 0(4,%r12),__LC_THREAD_INFO | 950 | mvc 0(4,%r12),__LC_THREAD_INFO |
951 | l %r12,__LC_THREAD_INFO | ||
952 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
953 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
954 | oi __TI_flags+3(%r12),_TIF_SYSCALL | ||
971 | cleanup_vtime: | 955 | cleanup_vtime: |
972 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | 956 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) |
973 | bhe BASED(cleanup_stime) | 957 | bhe BASED(cleanup_stime) |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 66729eb7bbc5..ef8fb1d6e8d7 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
@@ -5,24 +5,33 @@ | |||
5 | #include <linux/signal.h> | 5 | #include <linux/signal.h> |
6 | #include <asm/ptrace.h> | 6 | #include <asm/ptrace.h> |
7 | 7 | ||
8 | |||
9 | extern void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long); | ||
10 | extern void *restart_stack; | ||
11 | |||
12 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); | ||
13 | asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); | ||
14 | |||
8 | void do_protection_exception(struct pt_regs *, long, unsigned long); | 15 | void do_protection_exception(struct pt_regs *, long, unsigned long); |
9 | void do_dat_exception(struct pt_regs *, long, unsigned long); | 16 | void do_dat_exception(struct pt_regs *, long, unsigned long); |
10 | void do_asce_exception(struct pt_regs *, long, unsigned long); | 17 | void do_asce_exception(struct pt_regs *, long, unsigned long); |
11 | 18 | ||
12 | extern int sysctl_userprocess_debug; | ||
13 | |||
14 | void do_per_trap(struct pt_regs *regs); | 19 | void do_per_trap(struct pt_regs *regs); |
15 | void syscall_trace(struct pt_regs *regs, int entryexit); | 20 | void syscall_trace(struct pt_regs *regs, int entryexit); |
16 | void kernel_stack_overflow(struct pt_regs * regs); | 21 | void kernel_stack_overflow(struct pt_regs * regs); |
17 | void do_signal(struct pt_regs *regs); | 22 | void do_signal(struct pt_regs *regs); |
18 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, | 23 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, |
19 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); | 24 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); |
25 | void do_notify_resume(struct pt_regs *regs); | ||
20 | 26 | ||
21 | void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); | 27 | void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); |
28 | void do_restart(void); | ||
22 | int __cpuinit start_secondary(void *cpuvoid); | 29 | int __cpuinit start_secondary(void *cpuvoid); |
23 | void __init startup_init(void); | 30 | void __init startup_init(void); |
24 | void die(const char * str, struct pt_regs * regs, long err); | 31 | void die(const char * str, struct pt_regs * regs, long err); |
25 | 32 | ||
33 | void __init time_init(void); | ||
34 | |||
26 | struct s390_mmap_arg_struct; | 35 | struct s390_mmap_arg_struct; |
27 | struct fadvise64_64_args; | 36 | struct fadvise64_64_args; |
28 | struct old_sigaction; | 37 | struct old_sigaction; |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 713da0760538..83a93747e2fd 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -43,19 +43,18 @@ SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 | |||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 | 43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 |
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 | 44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 |
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | 45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 |
46 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | 46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE |
47 | SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR | ||
48 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | 47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE |
49 | 48 | ||
50 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 49 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
51 | STACK_SIZE = 1 << STACK_SHIFT | 50 | STACK_SIZE = 1 << STACK_SHIFT |
52 | 51 | ||
53 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 52 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
54 | _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) | 53 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
55 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
56 | _TIF_MCCK_PENDING) | 55 | _TIF_MCCK_PENDING) |
57 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 56 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
58 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) | 57 | _TIF_SYSCALL_TRACEPOINT) |
59 | _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) | 58 | _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) |
60 | 59 | ||
61 | #define BASED(name) name-system_call(%r13) | 60 | #define BASED(name) name-system_call(%r13) |
@@ -249,9 +248,10 @@ ENTRY(system_call) | |||
249 | sysc_saveall: | 248 | sysc_saveall: |
250 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 249 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
251 | CREATE_STACK_FRAME __LC_SAVE_AREA | 250 | CREATE_STACK_FRAME __LC_SAVE_AREA |
252 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
253 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
254 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 251 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
252 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
253 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
254 | oi __TI_flags+7(%r12),_TIF_SYSCALL | ||
255 | sysc_vtime: | 255 | sysc_vtime: |
256 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 256 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
257 | sysc_stime: | 257 | sysc_stime: |
@@ -260,14 +260,14 @@ sysc_update: | |||
260 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 260 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
261 | LAST_BREAK | 261 | LAST_BREAK |
262 | sysc_do_svc: | 262 | sysc_do_svc: |
263 | llgh %r7,SP_SVCNR(%r15) | 263 | llgh %r7,SP_SVC_CODE+2(%r15) |
264 | slag %r7,%r7,2 # shift and test for svc 0 | 264 | slag %r7,%r7,2 # shift and test for svc 0 |
265 | jnz sysc_nr_ok | 265 | jnz sysc_nr_ok |
266 | # svc 0: system call number in %r1 | 266 | # svc 0: system call number in %r1 |
267 | llgfr %r1,%r1 # clear high word in r1 | 267 | llgfr %r1,%r1 # clear high word in r1 |
268 | cghi %r1,NR_syscalls | 268 | cghi %r1,NR_syscalls |
269 | jnl sysc_nr_ok | 269 | jnl sysc_nr_ok |
270 | sth %r1,SP_SVCNR(%r15) | 270 | sth %r1,SP_SVC_CODE+2(%r15) |
271 | slag %r7,%r1,2 # shift and test for svc 0 | 271 | slag %r7,%r1,2 # shift and test for svc 0 |
272 | sysc_nr_ok: | 272 | sysc_nr_ok: |
273 | larl %r10,sys_call_table | 273 | larl %r10,sys_call_table |
@@ -277,7 +277,7 @@ sysc_nr_ok: | |||
277 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls | 277 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls |
278 | sysc_noemu: | 278 | sysc_noemu: |
279 | #endif | 279 | #endif |
280 | tm __TI_flags+6(%r12),_TIF_SYSCALL | 280 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 |
281 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | 281 | mvc SP_ARGS(8,%r15),SP_R7(%r15) |
282 | lgf %r8,0(%r7,%r10) # load address of system call routine | 282 | lgf %r8,0(%r7,%r10) # load address of system call routine |
283 | jnz sysc_tracesys | 283 | jnz sysc_tracesys |
@@ -287,23 +287,19 @@ sysc_noemu: | |||
287 | sysc_return: | 287 | sysc_return: |
288 | LOCKDEP_SYS_EXIT | 288 | LOCKDEP_SYS_EXIT |
289 | sysc_tif: | 289 | sysc_tif: |
290 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
291 | jno sysc_restore | ||
290 | tm __TI_flags+7(%r12),_TIF_WORK_SVC | 292 | tm __TI_flags+7(%r12),_TIF_WORK_SVC |
291 | jnz sysc_work # there is work to do (signals etc.) | 293 | jnz sysc_work # there is work to do (signals etc.) |
294 | ni __TI_flags+7(%r12),255-_TIF_SYSCALL | ||
292 | sysc_restore: | 295 | sysc_restore: |
293 | RESTORE_ALL __LC_RETURN_PSW,1 | 296 | RESTORE_ALL __LC_RETURN_PSW,1 |
294 | sysc_done: | 297 | sysc_done: |
295 | 298 | ||
296 | # | 299 | # |
297 | # There is work to do, but first we need to check if we return to userspace. | ||
298 | # | ||
299 | sysc_work: | ||
300 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
301 | jno sysc_restore | ||
302 | |||
303 | # | ||
304 | # One of the work bits is on. Find out which one. | 300 | # One of the work bits is on. Find out which one. |
305 | # | 301 | # |
306 | sysc_work_tif: | 302 | sysc_work: |
307 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING | 303 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING |
308 | jo sysc_mcck_pending | 304 | jo sysc_mcck_pending |
309 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 305 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
@@ -312,8 +308,6 @@ sysc_work_tif: | |||
312 | jo sysc_sigpending | 308 | jo sysc_sigpending |
313 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 309 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
314 | jo sysc_notify_resume | 310 | jo sysc_notify_resume |
315 | tm __TI_flags+7(%r12),_TIF_RESTART_SVC | ||
316 | jo sysc_restart | ||
317 | tm __TI_flags+7(%r12),_TIF_PER_TRAP | 311 | tm __TI_flags+7(%r12),_TIF_PER_TRAP |
318 | jo sysc_singlestep | 312 | jo sysc_singlestep |
319 | j sysc_return # beware of critical section cleanup | 313 | j sysc_return # beware of critical section cleanup |
@@ -339,11 +333,15 @@ sysc_sigpending: | |||
339 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 333 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP |
340 | la %r2,SP_PTREGS(%r15) # load pt_regs | 334 | la %r2,SP_PTREGS(%r15) # load pt_regs |
341 | brasl %r14,do_signal # call do_signal | 335 | brasl %r14,do_signal # call do_signal |
342 | tm __TI_flags+7(%r12),_TIF_RESTART_SVC | 336 | tm __TI_flags+7(%r12),_TIF_SYSCALL |
343 | jo sysc_restart | 337 | jno sysc_return |
344 | tm __TI_flags+7(%r12),_TIF_PER_TRAP | 338 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
345 | jo sysc_singlestep | 339 | lghi %r7,0 # svc 0 returns -ENOSYS |
346 | j sysc_return | 340 | lh %r1,SP_SVC_CODE+2(%r15) # load new svc number |
341 | cghi %r1,NR_syscalls | ||
342 | jnl sysc_nr_ok # invalid svc number -> do svc 0 | ||
343 | slag %r7,%r1,2 | ||
344 | j sysc_nr_ok # restart svc | ||
347 | 345 | ||
348 | # | 346 | # |
349 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 347 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
@@ -354,23 +352,10 @@ sysc_notify_resume: | |||
354 | jg do_notify_resume # call do_notify_resume | 352 | jg do_notify_resume # call do_notify_resume |
355 | 353 | ||
356 | # | 354 | # |
357 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
358 | # | ||
359 | sysc_restart: | ||
360 | ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
361 | lg %r7,SP_R2(%r15) # load new svc number | ||
362 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
363 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
364 | sth %r7,SP_SVCNR(%r15) | ||
365 | slag %r7,%r7,2 | ||
366 | j sysc_nr_ok # restart svc | ||
367 | |||
368 | # | ||
369 | # _TIF_PER_TRAP is set, call do_per_trap | 355 | # _TIF_PER_TRAP is set, call do_per_trap |
370 | # | 356 | # |
371 | sysc_singlestep: | 357 | sysc_singlestep: |
372 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 358 | ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
373 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
374 | la %r2,SP_PTREGS(%r15) # address of register-save area | 359 | la %r2,SP_PTREGS(%r15) # address of register-save area |
375 | larl %r14,sysc_return # load adr. of system return | 360 | larl %r14,sysc_return # load adr. of system return |
376 | jg do_per_trap | 361 | jg do_per_trap |
@@ -382,7 +367,7 @@ sysc_singlestep: | |||
382 | sysc_tracesys: | 367 | sysc_tracesys: |
383 | la %r2,SP_PTREGS(%r15) # load pt_regs | 368 | la %r2,SP_PTREGS(%r15) # load pt_regs |
384 | la %r3,0 | 369 | la %r3,0 |
385 | llgh %r0,SP_SVCNR(%r15) | 370 | llgh %r0,SP_SVC_CODE+2(%r15) |
386 | stg %r0,SP_R2(%r15) | 371 | stg %r0,SP_R2(%r15) |
387 | brasl %r14,do_syscall_trace_enter | 372 | brasl %r14,do_syscall_trace_enter |
388 | lghi %r0,NR_syscalls | 373 | lghi %r0,NR_syscalls |
@@ -397,7 +382,7 @@ sysc_tracego: | |||
397 | basr %r14,%r8 # call sys_xxx | 382 | basr %r14,%r8 # call sys_xxx |
398 | stg %r2,SP_R2(%r15) # store return value | 383 | stg %r2,SP_R2(%r15) # store return value |
399 | sysc_tracenogo: | 384 | sysc_tracenogo: |
400 | tm __TI_flags+6(%r12),_TIF_SYSCALL | 385 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 |
401 | jz sysc_return | 386 | jz sysc_return |
402 | la %r2,SP_PTREGS(%r15) # load pt_regs | 387 | la %r2,SP_PTREGS(%r15) # load pt_regs |
403 | larl %r14,sysc_return # return point is sysc_return | 388 | larl %r14,sysc_return # return point is sysc_return |
@@ -470,7 +455,6 @@ ENTRY(pgm_check_handler) | |||
470 | jnz pgm_per # got per exception -> special case | 455 | jnz pgm_per # got per exception -> special case |
471 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 456 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
472 | CREATE_STACK_FRAME __LC_SAVE_AREA | 457 | CREATE_STACK_FRAME __LC_SAVE_AREA |
473 | xc SP_ILC(4,%r15),SP_ILC(%r15) | ||
474 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW | 458 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW |
475 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 459 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
476 | HANDLE_SIE_INTERCEPT | 460 | HANDLE_SIE_INTERCEPT |
@@ -550,9 +534,10 @@ pgm_exit2: | |||
550 | pgm_svcper: | 534 | pgm_svcper: |
551 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 535 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
552 | CREATE_STACK_FRAME __LC_SAVE_AREA | 536 | CREATE_STACK_FRAME __LC_SAVE_AREA |
553 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
554 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
555 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 537 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
538 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
539 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
540 | oi __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | ||
556 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 541 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
557 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 542 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
558 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 543 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
@@ -561,7 +546,6 @@ pgm_svcper: | |||
561 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | 546 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE |
562 | mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS | 547 | mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS |
563 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | 548 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID |
564 | oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | ||
565 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 549 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
566 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | 550 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
567 | j sysc_do_svc | 551 | j sysc_do_svc |
@@ -571,7 +555,6 @@ pgm_svcper: | |||
571 | # | 555 | # |
572 | kernel_per: | 556 | kernel_per: |
573 | REENABLE_IRQS | 557 | REENABLE_IRQS |
574 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
575 | la %r2,SP_PTREGS(%r15) # address of register-save area | 558 | la %r2,SP_PTREGS(%r15) # address of register-save area |
576 | brasl %r14,do_per_trap | 559 | brasl %r14,do_per_trap |
577 | j pgm_exit | 560 | j pgm_exit |
@@ -869,12 +852,12 @@ restart_go: | |||
869 | # PSW restart interrupt handler | 852 | # PSW restart interrupt handler |
870 | # | 853 | # |
871 | ENTRY(psw_restart_int_handler) | 854 | ENTRY(psw_restart_int_handler) |
872 | stg %r15,__LC_SAVE_AREA_64(%r0) # save r15 | 855 | stg %r15,__LC_SAVE_AREA+120(%r0) # save r15 |
873 | larl %r15,restart_stack # load restart stack | 856 | larl %r15,restart_stack # load restart stack |
874 | lg %r15,0(%r15) | 857 | lg %r15,0(%r15) |
875 | aghi %r15,-SP_SIZE # make room for pt_regs | 858 | aghi %r15,-SP_SIZE # make room for pt_regs |
876 | stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 859 | stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack |
877 | mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack | 860 | mvc SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack |
878 | mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw | 861 | mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw |
879 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 862 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 |
880 | brasl %r14,do_restart | 863 | brasl %r14,do_restart |
@@ -972,9 +955,11 @@ cleanup_system_call: | |||
972 | stg %r15,32(%r12) | 955 | stg %r15,32(%r12) |
973 | stg %r11,0(%r12) | 956 | stg %r11,0(%r12) |
974 | CREATE_STACK_FRAME __LC_SAVE_AREA | 957 | CREATE_STACK_FRAME __LC_SAVE_AREA |
975 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
976 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
977 | mvc 8(8,%r12),__LC_THREAD_INFO | 958 | mvc 8(8,%r12),__LC_THREAD_INFO |
959 | lg %r12,__LC_THREAD_INFO | ||
960 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
961 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
962 | oi __TI_flags+7(%r12),_TIF_SYSCALL | ||
978 | cleanup_vtime: | 963 | cleanup_vtime: |
979 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | 964 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) |
980 | jhe cleanup_stime | 965 | jhe cleanup_stime |
@@ -1096,6 +1081,7 @@ sie_exit: | |||
1096 | lghi %r2,0 | 1081 | lghi %r2,0 |
1097 | br %r14 | 1082 | br %r14 |
1098 | sie_fault: | 1083 | sie_fault: |
1084 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
1099 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct | 1085 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct |
1100 | ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) | 1086 | ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) |
1101 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | 1087 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 2d781bab37bb..900068d2bf92 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -449,10 +449,28 @@ ENTRY(start) | |||
449 | # | 449 | # |
450 | .org 0x10000 | 450 | .org 0x10000 |
451 | ENTRY(startup) | 451 | ENTRY(startup) |
452 | j .Lep_startup_normal | ||
453 | .org 0x10008 | ||
454 | # | ||
455 | # This is a list of s390 kernel entry points. At address 0x1000f the number of | ||
456 | # valid entry points is stored. | ||
457 | # | ||
458 | # IMPORTANT: Do not change this table, it is s390 kernel ABI! | ||
459 | # | ||
460 | .ascii "S390EP" | ||
461 | .byte 0x00,0x01 | ||
462 | # | ||
463 | # kdump startup-code at 0x10010, running in 64 bit absolute addressing mode | ||
464 | # | ||
465 | .org 0x10010 | ||
466 | ENTRY(startup_kdump) | ||
467 | j .Lep_startup_kdump | ||
468 | .Lep_startup_normal: | ||
452 | basr %r13,0 # get base | 469 | basr %r13,0 # get base |
453 | .LPG0: | 470 | .LPG0: |
454 | xc 0x200(256),0x200 # partially clear lowcore | 471 | xc 0x200(256),0x200 # partially clear lowcore |
455 | xc 0x300(256),0x300 | 472 | xc 0x300(256),0x300 |
473 | xc 0xe00(256),0xe00 | ||
456 | stck __LC_LAST_UPDATE_CLOCK | 474 | stck __LC_LAST_UPDATE_CLOCK |
457 | spt 5f-.LPG0(%r13) | 475 | spt 5f-.LPG0(%r13) |
458 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) | 476 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) |
@@ -534,6 +552,8 @@ ENTRY(startup) | |||
534 | .align 8 | 552 | .align 8 |
535 | 5: .long 0x7fffffff,0xffffffff | 553 | 5: .long 0x7fffffff,0xffffffff |
536 | 554 | ||
555 | #include "head_kdump.S" | ||
556 | |||
537 | # | 557 | # |
538 | # params at 10400 (setup.h) | 558 | # params at 10400 (setup.h) |
539 | # | 559 | # |
@@ -541,6 +561,8 @@ ENTRY(startup) | |||
541 | .long 0,0 # IPL_DEVICE | 561 | .long 0,0 # IPL_DEVICE |
542 | .long 0,0 # INITRD_START | 562 | .long 0,0 # INITRD_START |
543 | .long 0,0 # INITRD_SIZE | 563 | .long 0,0 # INITRD_SIZE |
564 | .long 0,0 # OLDMEM_BASE | ||
565 | .long 0,0 # OLDMEM_SIZE | ||
544 | 566 | ||
545 | .org COMMAND_LINE | 567 | .org COMMAND_LINE |
546 | .byte "root=/dev/ram0 ro" | 568 | .byte "root=/dev/ram0 ro" |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index f21954b44dc1..d3f1ab7d90ad 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -92,7 +92,7 @@ ENTRY(_stext) | |||
92 | .LPG3: | 92 | .LPG3: |
93 | # check control registers | 93 | # check control registers |
94 | stctl %c0,%c15,0(%r15) | 94 | stctl %c0,%c15,0(%r15) |
95 | oi 2(%r15),0x40 # enable sigp emergency signal | 95 | oi 2(%r15),0x60 # enable sigp emergency & external call |
96 | oi 0(%r15),0x10 # switch on low address protection | 96 | oi 0(%r15),0x10 # switch on low address protection |
97 | lctl %c0,%c15,0(%r15) | 97 | lctl %c0,%c15,0(%r15) |
98 | 98 | ||
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index ae5d492b069e..99348c0eaa41 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -90,7 +90,7 @@ ENTRY(_stext) | |||
90 | .LPG3: | 90 | .LPG3: |
91 | # check control registers | 91 | # check control registers |
92 | stctg %c0,%c15,0(%r15) | 92 | stctg %c0,%c15,0(%r15) |
93 | oi 6(%r15),0x40 # enable sigp emergency signal | 93 | oi 6(%r15),0x60 # enable sigp emergency & external call |
94 | oi 4(%r15),0x10 # switch on low address proctection | 94 | oi 4(%r15),0x10 # switch on low address proctection |
95 | lctlg %c0,%c15,0(%r15) | 95 | lctlg %c0,%c15,0(%r15) |
96 | 96 | ||
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S new file mode 100644 index 000000000000..e1ac3893e972 --- /dev/null +++ b/arch/s390/kernel/head_kdump.S | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * S390 kdump lowlevel functions (new kernel) | ||
3 | * | ||
4 | * Copyright IBM Corp. 2011 | ||
5 | * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #define DATAMOVER_ADDR 0x4000 | ||
9 | #define COPY_PAGE_ADDR 0x6000 | ||
10 | |||
11 | #ifdef CONFIG_CRASH_DUMP | ||
12 | |||
13 | # | ||
14 | # kdump entry (new kernel - not yet relocated) | ||
15 | # | ||
16 | # Note: This code has to be position independent | ||
17 | # | ||
18 | |||
19 | .align 2 | ||
20 | .Lep_startup_kdump: | ||
21 | lhi %r1,2 # mode 2 = esame (dump) | ||
22 | sigp %r1,%r0,0x12 # Switch to esame mode | ||
23 | sam64 # Switch to 64 bit addressing | ||
24 | basr %r13,0 | ||
25 | .Lbase: | ||
26 | larl %r2,.Lbase_addr # Check, if we have been | ||
27 | lg %r2,0(%r2) # already relocated: | ||
28 | clgr %r2,%r13 # | ||
29 | jne .Lrelocate # No : Start data mover | ||
30 | lghi %r2,0 # Yes: Start kdump kernel | ||
31 | brasl %r14,startup_kdump_relocated | ||
32 | |||
33 | .Lrelocate: | ||
34 | larl %r4,startup | ||
35 | lg %r2,0x418(%r4) # Get kdump base | ||
36 | lg %r3,0x420(%r4) # Get kdump size | ||
37 | |||
38 | larl %r10,.Lcopy_start # Source of data mover | ||
39 | lghi %r8,DATAMOVER_ADDR # Target of data mover | ||
40 | mvc 0(256,%r8),0(%r10) # Copy data mover code | ||
41 | |||
42 | agr %r8,%r2 # Copy data mover to | ||
43 | mvc 0(256,%r8),0(%r10) # reserved mem | ||
44 | |||
45 | lghi %r14,DATAMOVER_ADDR # Jump to copied data mover | ||
46 | basr %r14,%r14 | ||
47 | .Lbase_addr: | ||
48 | .quad .Lbase | ||
49 | |||
50 | # | ||
51 | # kdump data mover code (runs at address DATAMOVER_ADDR) | ||
52 | # | ||
53 | # r2: kdump base address | ||
54 | # r3: kdump size | ||
55 | # | ||
56 | .Lcopy_start: | ||
57 | basr %r13,0 # Base | ||
58 | 0: | ||
59 | lgr %r11,%r2 # Save kdump base address | ||
60 | lgr %r12,%r2 | ||
61 | agr %r12,%r3 # Compute kdump end address | ||
62 | |||
63 | lghi %r5,0 | ||
64 | lghi %r10,COPY_PAGE_ADDR # Load copy page address | ||
65 | 1: | ||
66 | mvc 0(256,%r10),0(%r5) # Copy old kernel to tmp | ||
67 | mvc 0(256,%r5),0(%r11) # Copy new kernel to old | ||
68 | mvc 0(256,%r11),0(%r10) # Copy tmp to new | ||
69 | aghi %r11,256 | ||
70 | aghi %r5,256 | ||
71 | clgr %r11,%r12 | ||
72 | jl 1b | ||
73 | |||
74 | lg %r14,.Lstartup_kdump-0b(%r13) | ||
75 | basr %r14,%r14 # Start relocated kernel | ||
76 | .Lstartup_kdump: | ||
77 | .long 0x00000000,0x00000000 + startup_kdump_relocated | ||
78 | .Lcopy_end: | ||
79 | |||
80 | # | ||
81 | # Startup of kdump (relocated new kernel) | ||
82 | # | ||
83 | .align 2 | ||
84 | startup_kdump_relocated: | ||
85 | basr %r13,0 | ||
86 | 0: | ||
87 | mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW | ||
88 | mvc 464(16,%r0),.Lpgm_psw-0b(%r13) # Setup pgm check PSW | ||
89 | lhi %r1,1 # Start new kernel | ||
90 | diag %r1,%r1,0x308 # with diag 308 | ||
91 | |||
92 | .Lno_diag308: # No diag 308 | ||
93 | sam31 # Switch to 31 bit addr mode | ||
94 | sr %r1,%r1 # Erase register r1 | ||
95 | sr %r2,%r2 # Erase register r2 | ||
96 | sigp %r1,%r2,0x12 # Switch to 31 bit arch mode | ||
97 | lpsw 0 # Start new kernel... | ||
98 | .align 8 | ||
99 | .Lrestart_psw: | ||
100 | .long 0x00080000,0x80000000 + startup | ||
101 | .Lpgm_psw: | ||
102 | .quad 0x0000000180000000,0x0000000000000000 + .Lno_diag308 | ||
103 | #else | ||
104 | .align 2 | ||
105 | .Lep_startup_kdump: | ||
106 | #ifdef CONFIG_64BIT | ||
107 | larl %r13,startup_kdump_crash | ||
108 | lpswe 0(%r13) | ||
109 | .align 8 | ||
110 | startup_kdump_crash: | ||
111 | .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash | ||
112 | #else | ||
113 | basr %r13,0 | ||
114 | 0: lpsw startup_kdump_crash-0b(%r13) | ||
115 | .align 8 | ||
116 | startup_kdump_crash: | ||
117 | .long 0x000a0000,0x00000000 + startup_kdump_crash | ||
118 | #endif /* CONFIG_64BIT */ | ||
119 | #endif /* CONFIG_CRASH_DUMP */ | ||
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 48c710206366..affa8e68124a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/gfp.h> | 18 | #include <linux/gfp.h> |
19 | #include <linux/crash_dump.h> | ||
19 | #include <asm/ipl.h> | 20 | #include <asm/ipl.h> |
20 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
21 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
@@ -26,6 +27,7 @@ | |||
26 | #include <asm/sclp.h> | 27 | #include <asm/sclp.h> |
27 | #include <asm/sigp.h> | 28 | #include <asm/sigp.h> |
28 | #include <asm/checksum.h> | 29 | #include <asm/checksum.h> |
30 | #include "entry.h" | ||
29 | 31 | ||
30 | #define IPL_PARM_BLOCK_VERSION 0 | 32 | #define IPL_PARM_BLOCK_VERSION 0 |
31 | 33 | ||
@@ -275,8 +277,8 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
275 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); | 277 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); |
276 | 278 | ||
277 | /* VM IPL PARM routines */ | 279 | /* VM IPL PARM routines */ |
278 | size_t reipl_get_ascii_vmparm(char *dest, size_t size, | 280 | static size_t reipl_get_ascii_vmparm(char *dest, size_t size, |
279 | const struct ipl_parameter_block *ipb) | 281 | const struct ipl_parameter_block *ipb) |
280 | { | 282 | { |
281 | int i; | 283 | int i; |
282 | size_t len; | 284 | size_t len; |
@@ -338,8 +340,8 @@ static size_t scpdata_length(const char* buf, size_t count) | |||
338 | return count; | 340 | return count; |
339 | } | 341 | } |
340 | 342 | ||
341 | size_t reipl_append_ascii_scpdata(char *dest, size_t size, | 343 | static size_t reipl_append_ascii_scpdata(char *dest, size_t size, |
342 | const struct ipl_parameter_block *ipb) | 344 | const struct ipl_parameter_block *ipb) |
343 | { | 345 | { |
344 | size_t count; | 346 | size_t count; |
345 | size_t i; | 347 | size_t i; |
@@ -1738,7 +1740,11 @@ static struct kobj_attribute on_restart_attr = | |||
1738 | 1740 | ||
1739 | void do_restart(void) | 1741 | void do_restart(void) |
1740 | { | 1742 | { |
1743 | smp_restart_with_online_cpu(); | ||
1741 | smp_send_stop(); | 1744 | smp_send_stop(); |
1745 | #ifdef CONFIG_CRASH_DUMP | ||
1746 | crash_kexec(NULL); | ||
1747 | #endif | ||
1742 | on_restart_trigger.action->fn(&on_restart_trigger); | 1748 | on_restart_trigger.action->fn(&on_restart_trigger); |
1743 | stop_run(&on_restart_trigger); | 1749 | stop_run(&on_restart_trigger); |
1744 | } | 1750 | } |
@@ -2009,7 +2015,7 @@ static void do_reset_calls(void) | |||
2009 | 2015 | ||
2010 | u32 dump_prefix_page; | 2016 | u32 dump_prefix_page; |
2011 | 2017 | ||
2012 | void s390_reset_system(void) | 2018 | void s390_reset_system(void (*func)(void *), void *data) |
2013 | { | 2019 | { |
2014 | struct _lowcore *lc; | 2020 | struct _lowcore *lc; |
2015 | 2021 | ||
@@ -2028,15 +2034,19 @@ void s390_reset_system(void) | |||
2028 | __ctl_clear_bit(0,28); | 2034 | __ctl_clear_bit(0,28); |
2029 | 2035 | ||
2030 | /* Set new machine check handler */ | 2036 | /* Set new machine check handler */ |
2031 | S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 2037 | S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; |
2032 | S390_lowcore.mcck_new_psw.addr = | 2038 | S390_lowcore.mcck_new_psw.addr = |
2033 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; | 2039 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; |
2034 | 2040 | ||
2035 | /* Set new program check handler */ | 2041 | /* Set new program check handler */ |
2036 | S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 2042 | S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; |
2037 | S390_lowcore.program_new_psw.addr = | 2043 | S390_lowcore.program_new_psw.addr = |
2038 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 2044 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
2039 | 2045 | ||
2046 | /* Store status at absolute zero */ | ||
2047 | store_status(); | ||
2048 | |||
2040 | do_reset_calls(); | 2049 | do_reset_calls(); |
2050 | if (func) | ||
2051 | func(data); | ||
2041 | } | 2052 | } |
2042 | |||
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1f4050d45f78..b9a7fdd9c814 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -33,7 +33,8 @@ static const struct irq_class intrclass_names[] = { | |||
33 | {.name = "EXT" }, | 33 | {.name = "EXT" }, |
34 | {.name = "I/O" }, | 34 | {.name = "I/O" }, |
35 | {.name = "CLK", .desc = "[EXT] Clock Comparator" }, | 35 | {.name = "CLK", .desc = "[EXT] Clock Comparator" }, |
36 | {.name = "IPI", .desc = "[EXT] Signal Processor" }, | 36 | {.name = "EXC", .desc = "[EXT] External Call" }, |
37 | {.name = "EMS", .desc = "[EXT] Emergency Signal" }, | ||
37 | {.name = "TMR", .desc = "[EXT] CPU Timer" }, | 38 | {.name = "TMR", .desc = "[EXT] CPU Timer" }, |
38 | {.name = "TAL", .desc = "[EXT] Timing Alert" }, | 39 | {.name = "TAL", .desc = "[EXT] Timing Alert" }, |
39 | {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, | 40 | {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, |
@@ -42,8 +43,8 @@ static const struct irq_class intrclass_names[] = { | |||
42 | {.name = "SCP", .desc = "[EXT] Service Call" }, | 43 | {.name = "SCP", .desc = "[EXT] Service Call" }, |
43 | {.name = "IUC", .desc = "[EXT] IUCV" }, | 44 | {.name = "IUC", .desc = "[EXT] IUCV" }, |
44 | {.name = "CPM", .desc = "[EXT] CPU Measurement" }, | 45 | {.name = "CPM", .desc = "[EXT] CPU Measurement" }, |
46 | {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" }, | ||
45 | {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, | 47 | {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, |
46 | {.name = "QDI", .desc = "[I/O] QDIO Interrupt" }, | ||
47 | {.name = "DAS", .desc = "[I/O] DASD" }, | 48 | {.name = "DAS", .desc = "[I/O] DASD" }, |
48 | {.name = "C15", .desc = "[I/O] 3215" }, | 49 | {.name = "C15", .desc = "[I/O] 3215" }, |
49 | {.name = "C70", .desc = "[I/O] 3270" }, | 50 | {.name = "C70", .desc = "[I/O] 3270" }, |
@@ -53,6 +54,7 @@ static const struct irq_class intrclass_names[] = { | |||
53 | {.name = "CLW", .desc = "[I/O] CLAW" }, | 54 | {.name = "CLW", .desc = "[I/O] CLAW" }, |
54 | {.name = "CTC", .desc = "[I/O] CTC" }, | 55 | {.name = "CTC", .desc = "[I/O] CTC" }, |
55 | {.name = "APB", .desc = "[I/O] AP Bus" }, | 56 | {.name = "APB", .desc = "[I/O] AP Bus" }, |
57 | {.name = "CSC", .desc = "[I/O] CHSC Subchannel" }, | ||
56 | {.name = "NMI", .desc = "[NMI] Machine Check" }, | 58 | {.name = "NMI", .desc = "[NMI] Machine Check" }, |
57 | }; | 59 | }; |
58 | 60 | ||
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 44cc06bedf77..b987ab2c1541 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -18,26 +18,15 @@ struct insn { | |||
18 | } __packed; | 18 | } __packed; |
19 | 19 | ||
20 | struct insn_args { | 20 | struct insn_args { |
21 | unsigned long *target; | 21 | struct jump_entry *entry; |
22 | struct insn *insn; | 22 | enum jump_label_type type; |
23 | ssize_t size; | ||
24 | }; | 23 | }; |
25 | 24 | ||
26 | static int __arch_jump_label_transform(void *data) | 25 | static void __jump_label_transform(struct jump_entry *entry, |
26 | enum jump_label_type type) | ||
27 | { | 27 | { |
28 | struct insn_args *args = data; | ||
29 | int rc; | ||
30 | |||
31 | rc = probe_kernel_write(args->target, args->insn, args->size); | ||
32 | WARN_ON_ONCE(rc < 0); | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | void arch_jump_label_transform(struct jump_entry *entry, | ||
37 | enum jump_label_type type) | ||
38 | { | ||
39 | struct insn_args args; | ||
40 | struct insn insn; | 28 | struct insn insn; |
29 | int rc; | ||
41 | 30 | ||
42 | if (type == JUMP_LABEL_ENABLE) { | 31 | if (type == JUMP_LABEL_ENABLE) { |
43 | /* brcl 15,offset */ | 32 | /* brcl 15,offset */ |
@@ -49,11 +38,33 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
49 | insn.offset = 0; | 38 | insn.offset = 0; |
50 | } | 39 | } |
51 | 40 | ||
52 | args.target = (void *) entry->code; | 41 | rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE); |
53 | args.insn = &insn; | 42 | WARN_ON_ONCE(rc < 0); |
54 | args.size = JUMP_LABEL_NOP_SIZE; | 43 | } |
55 | 44 | ||
56 | stop_machine(__arch_jump_label_transform, &args, NULL); | 45 | static int __sm_arch_jump_label_transform(void *data) |
46 | { | ||
47 | struct insn_args *args = data; | ||
48 | |||
49 | __jump_label_transform(args->entry, args->type); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | void arch_jump_label_transform(struct jump_entry *entry, | ||
54 | enum jump_label_type type) | ||
55 | { | ||
56 | struct insn_args args; | ||
57 | |||
58 | args.entry = entry; | ||
59 | args.type = type; | ||
60 | |||
61 | stop_machine(__sm_arch_jump_label_transform, &args, NULL); | ||
62 | } | ||
63 | |||
64 | void arch_jump_label_transform_static(struct jump_entry *entry, | ||
65 | enum jump_label_type type) | ||
66 | { | ||
67 | __jump_label_transform(entry, type); | ||
57 | } | 68 | } |
58 | 69 | ||
59 | #endif | 70 | #endif |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 1d05d669107c..64b761aef004 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -635,7 +635,7 @@ void __kprobes jprobe_return(void) | |||
635 | asm volatile(".word 0x0002"); | 635 | asm volatile(".word 0x0002"); |
636 | } | 636 | } |
637 | 637 | ||
638 | void __kprobes jprobe_return_end(void) | 638 | static void __used __kprobes jprobe_return_end(void) |
639 | { | 639 | { |
640 | asm volatile("bcr 0,0"); | 640 | asm volatile("bcr 0,0"); |
641 | } | 641 | } |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b09b9c62573e..3cd0f25ab015 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -1,10 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/machine_kexec.c | 2 | * arch/s390/kernel/machine_kexec.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2005,2006 | 4 | * Copyright IBM Corp. 2005,2011 |
5 | * | 5 | * |
6 | * Author(s): Rolf Adelsberger, | 6 | * Author(s): Rolf Adelsberger, |
7 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 7 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
8 | * Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
8 | */ | 9 | */ |
9 | 10 | ||
10 | #include <linux/device.h> | 11 | #include <linux/device.h> |
@@ -21,12 +22,162 @@ | |||
21 | #include <asm/smp.h> | 22 | #include <asm/smp.h> |
22 | #include <asm/reset.h> | 23 | #include <asm/reset.h> |
23 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
25 | #include <asm/diag.h> | ||
26 | #include <asm/asm-offsets.h> | ||
24 | 27 | ||
25 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); | 28 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); |
26 | 29 | ||
27 | extern const unsigned char relocate_kernel[]; | 30 | extern const unsigned char relocate_kernel[]; |
28 | extern const unsigned long long relocate_kernel_len; | 31 | extern const unsigned long long relocate_kernel_len; |
29 | 32 | ||
33 | #ifdef CONFIG_CRASH_DUMP | ||
34 | |||
35 | void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); | ||
36 | |||
37 | /* | ||
38 | * Create ELF notes for one CPU | ||
39 | */ | ||
40 | static void add_elf_notes(int cpu) | ||
41 | { | ||
42 | struct save_area *sa = (void *) 4608 + store_prefix(); | ||
43 | void *ptr; | ||
44 | |||
45 | memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa)); | ||
46 | ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); | ||
47 | ptr = fill_cpu_elf_notes(ptr, sa); | ||
48 | memset(ptr, 0, sizeof(struct elf_note)); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Store status of next available physical CPU | ||
53 | */ | ||
54 | static int store_status_next(int start_cpu, int this_cpu) | ||
55 | { | ||
56 | struct save_area *sa = (void *) 4608 + store_prefix(); | ||
57 | int cpu, rc; | ||
58 | |||
59 | for (cpu = start_cpu; cpu < 65536; cpu++) { | ||
60 | if (cpu == this_cpu) | ||
61 | continue; | ||
62 | do { | ||
63 | rc = raw_sigp(cpu, sigp_stop_and_store_status); | ||
64 | } while (rc == sigp_busy); | ||
65 | if (rc != sigp_order_code_accepted) | ||
66 | continue; | ||
67 | if (sa->pref_reg) | ||
68 | return cpu; | ||
69 | } | ||
70 | return -1; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Initialize CPU ELF notes | ||
75 | */ | ||
76 | void setup_regs(void) | ||
77 | { | ||
78 | unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; | ||
79 | int cpu, this_cpu, phys_cpu = 0, first = 1; | ||
80 | |||
81 | this_cpu = stap(); | ||
82 | |||
83 | if (!S390_lowcore.prefixreg_save_area) | ||
84 | first = 0; | ||
85 | for_each_online_cpu(cpu) { | ||
86 | if (first) { | ||
87 | add_elf_notes(cpu); | ||
88 | first = 0; | ||
89 | continue; | ||
90 | } | ||
91 | phys_cpu = store_status_next(phys_cpu, this_cpu); | ||
92 | if (phys_cpu == -1) | ||
93 | break; | ||
94 | add_elf_notes(cpu); | ||
95 | phys_cpu++; | ||
96 | } | ||
97 | /* Copy dump CPU store status info to absolute zero */ | ||
98 | memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); | ||
99 | } | ||
100 | |||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * Start kdump: We expect here that a store status has been done on our CPU | ||
105 | */ | ||
106 | static void __do_machine_kdump(void *image) | ||
107 | { | ||
108 | #ifdef CONFIG_CRASH_DUMP | ||
109 | int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; | ||
110 | |||
111 | __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); | ||
112 | setup_regs(); | ||
113 | start_kdump(1); | ||
114 | #endif | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Check if kdump checksums are valid: We call purgatory with parameter "0" | ||
119 | */ | ||
120 | static int kdump_csum_valid(struct kimage *image) | ||
121 | { | ||
122 | #ifdef CONFIG_CRASH_DUMP | ||
123 | int (*start_kdump)(int) = (void *)image->start; | ||
124 | int rc; | ||
125 | |||
126 | __arch_local_irq_stnsm(0xfb); /* disable DAT */ | ||
127 | rc = start_kdump(0); | ||
128 | __arch_local_irq_stosm(0x04); /* enable DAT */ | ||
129 | return rc ? 0 : -EINVAL; | ||
130 | #else | ||
131 | return -EINVAL; | ||
132 | #endif | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Map or unmap crashkernel memory | ||
137 | */ | ||
138 | static void crash_map_pages(int enable) | ||
139 | { | ||
140 | unsigned long size = resource_size(&crashk_res); | ||
141 | |||
142 | BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN || | ||
143 | size % KEXEC_CRASH_MEM_ALIGN); | ||
144 | if (enable) | ||
145 | vmem_add_mapping(crashk_res.start, size); | ||
146 | else | ||
147 | vmem_remove_mapping(crashk_res.start, size); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Map crashkernel memory | ||
152 | */ | ||
153 | void crash_map_reserved_pages(void) | ||
154 | { | ||
155 | crash_map_pages(1); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Unmap crashkernel memory | ||
160 | */ | ||
161 | void crash_unmap_reserved_pages(void) | ||
162 | { | ||
163 | crash_map_pages(0); | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Give back memory to hypervisor before new kdump is loaded | ||
168 | */ | ||
169 | static int machine_kexec_prepare_kdump(void) | ||
170 | { | ||
171 | #ifdef CONFIG_CRASH_DUMP | ||
172 | if (MACHINE_IS_VM) | ||
173 | diag10_range(PFN_DOWN(crashk_res.start), | ||
174 | PFN_DOWN(crashk_res.end - crashk_res.start + 1)); | ||
175 | return 0; | ||
176 | #else | ||
177 | return -EINVAL; | ||
178 | #endif | ||
179 | } | ||
180 | |||
30 | int machine_kexec_prepare(struct kimage *image) | 181 | int machine_kexec_prepare(struct kimage *image) |
31 | { | 182 | { |
32 | void *reboot_code_buffer; | 183 | void *reboot_code_buffer; |
@@ -35,6 +186,9 @@ int machine_kexec_prepare(struct kimage *image) | |||
35 | if (ipl_flags & IPL_NSS_VALID) | 186 | if (ipl_flags & IPL_NSS_VALID) |
36 | return -ENOSYS; | 187 | return -ENOSYS; |
37 | 188 | ||
189 | if (image->type == KEXEC_TYPE_CRASH) | ||
190 | return machine_kexec_prepare_kdump(); | ||
191 | |||
38 | /* We don't support anything but the default image type for now. */ | 192 | /* We don't support anything but the default image type for now. */ |
39 | if (image->type != KEXEC_TYPE_DEFAULT) | 193 | if (image->type != KEXEC_TYPE_DEFAULT) |
40 | return -EINVAL; | 194 | return -EINVAL; |
@@ -51,27 +205,53 @@ void machine_kexec_cleanup(struct kimage *image) | |||
51 | { | 205 | { |
52 | } | 206 | } |
53 | 207 | ||
208 | void arch_crash_save_vmcoreinfo(void) | ||
209 | { | ||
210 | VMCOREINFO_SYMBOL(lowcore_ptr); | ||
211 | VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); | ||
212 | } | ||
213 | |||
54 | void machine_shutdown(void) | 214 | void machine_shutdown(void) |
55 | { | 215 | { |
56 | } | 216 | } |
57 | 217 | ||
58 | static void __machine_kexec(void *data) | 218 | /* |
219 | * Do normal kexec | ||
220 | */ | ||
221 | static void __do_machine_kexec(void *data) | ||
59 | { | 222 | { |
60 | relocate_kernel_t data_mover; | 223 | relocate_kernel_t data_mover; |
61 | struct kimage *image = data; | 224 | struct kimage *image = data; |
62 | 225 | ||
63 | pfault_fini(); | ||
64 | s390_reset_system(); | ||
65 | |||
66 | data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); | 226 | data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); |
67 | 227 | ||
68 | /* Call the moving routine */ | 228 | /* Call the moving routine */ |
69 | (*data_mover)(&image->head, image->start); | 229 | (*data_mover)(&image->head, image->start); |
70 | for (;;); | ||
71 | } | 230 | } |
72 | 231 | ||
232 | /* | ||
233 | * Reset system and call either kdump or normal kexec | ||
234 | */ | ||
235 | static void __machine_kexec(void *data) | ||
236 | { | ||
237 | struct kimage *image = data; | ||
238 | |||
239 | pfault_fini(); | ||
240 | if (image->type == KEXEC_TYPE_CRASH) | ||
241 | s390_reset_system(__do_machine_kdump, data); | ||
242 | else | ||
243 | s390_reset_system(__do_machine_kexec, data); | ||
244 | disabled_wait((unsigned long) __builtin_return_address(0)); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Do either kdump or normal kexec. In case of kdump we first ask | ||
249 | * purgatory, if kdump checksums are valid. | ||
250 | */ | ||
73 | void machine_kexec(struct kimage *image) | 251 | void machine_kexec(struct kimage *image) |
74 | { | 252 | { |
253 | if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image)) | ||
254 | return; | ||
75 | tracer_disable(); | 255 | tracer_disable(); |
76 | smp_send_stop(); | 256 | smp_send_stop(); |
77 | smp_switch_to_ipl_cpu(__machine_kexec, image); | 257 | smp_switch_to_ipl_cpu(__machine_kexec, image); |
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c index 0fbe4e32f7ba..19b4568f4cee 100644 --- a/arch/s390/kernel/mem_detect.c +++ b/arch/s390/kernel/mem_detect.c | |||
@@ -62,3 +62,72 @@ void detect_memory_layout(struct mem_chunk chunk[]) | |||
62 | arch_local_irq_restore(flags); | 62 | arch_local_irq_restore(flags); |
63 | } | 63 | } |
64 | EXPORT_SYMBOL(detect_memory_layout); | 64 | EXPORT_SYMBOL(detect_memory_layout); |
65 | |||
66 | /* | ||
67 | * Create memory hole with given address, size, and type | ||
68 | */ | ||
69 | void create_mem_hole(struct mem_chunk chunks[], unsigned long addr, | ||
70 | unsigned long size, int type) | ||
71 | { | ||
72 | unsigned long start, end, new_size; | ||
73 | int i; | ||
74 | |||
75 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
76 | if (chunks[i].size == 0) | ||
77 | continue; | ||
78 | if (addr + size < chunks[i].addr) | ||
79 | continue; | ||
80 | if (addr >= chunks[i].addr + chunks[i].size) | ||
81 | continue; | ||
82 | start = max(addr, chunks[i].addr); | ||
83 | end = min(addr + size, chunks[i].addr + chunks[i].size); | ||
84 | new_size = end - start; | ||
85 | if (new_size == 0) | ||
86 | continue; | ||
87 | if (start == chunks[i].addr && | ||
88 | end == chunks[i].addr + chunks[i].size) { | ||
89 | /* Remove chunk */ | ||
90 | chunks[i].type = type; | ||
91 | } else if (start == chunks[i].addr) { | ||
92 | /* Make chunk smaller at start */ | ||
93 | if (i >= MEMORY_CHUNKS - 1) | ||
94 | panic("Unable to create memory hole"); | ||
95 | memmove(&chunks[i + 1], &chunks[i], | ||
96 | sizeof(struct mem_chunk) * | ||
97 | (MEMORY_CHUNKS - (i + 1))); | ||
98 | chunks[i + 1].addr = chunks[i].addr + new_size; | ||
99 | chunks[i + 1].size = chunks[i].size - new_size; | ||
100 | chunks[i].size = new_size; | ||
101 | chunks[i].type = type; | ||
102 | i += 1; | ||
103 | } else if (end == chunks[i].addr + chunks[i].size) { | ||
104 | /* Make chunk smaller at end */ | ||
105 | if (i >= MEMORY_CHUNKS - 1) | ||
106 | panic("Unable to create memory hole"); | ||
107 | memmove(&chunks[i + 1], &chunks[i], | ||
108 | sizeof(struct mem_chunk) * | ||
109 | (MEMORY_CHUNKS - (i + 1))); | ||
110 | chunks[i + 1].addr = start; | ||
111 | chunks[i + 1].size = new_size; | ||
112 | chunks[i + 1].type = type; | ||
113 | chunks[i].size -= new_size; | ||
114 | i += 1; | ||
115 | } else { | ||
116 | /* Create memory hole */ | ||
117 | if (i >= MEMORY_CHUNKS - 2) | ||
118 | panic("Unable to create memory hole"); | ||
119 | memmove(&chunks[i + 2], &chunks[i], | ||
120 | sizeof(struct mem_chunk) * | ||
121 | (MEMORY_CHUNKS - (i + 2))); | ||
122 | chunks[i + 1].addr = addr; | ||
123 | chunks[i + 1].size = size; | ||
124 | chunks[i + 1].type = type; | ||
125 | chunks[i + 2].addr = addr + size; | ||
126 | chunks[i + 2].size = | ||
127 | chunks[i].addr + chunks[i].size - (addr + size); | ||
128 | chunks[i + 2].type = chunks[i].type; | ||
129 | chunks[i].size = addr - chunks[i].addr; | ||
130 | i += 2; | ||
131 | } | ||
132 | } | ||
133 | } | ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 541a7509faeb..9451b210a1b4 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/elfcore.h> | ||
15 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
16 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
17 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
@@ -117,7 +118,8 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
117 | struct pt_regs regs; | 118 | struct pt_regs regs; |
118 | 119 | ||
119 | memset(®s, 0, sizeof(regs)); | 120 | memset(®s, 0, sizeof(regs)); |
120 | regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; | 121 | regs.psw.mask = psw_kernel_bits | |
122 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
121 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; | 123 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; |
122 | regs.gprs[9] = (unsigned long) fn; | 124 | regs.gprs[9] = (unsigned long) fn; |
123 | regs.gprs[10] = (unsigned long) arg; | 125 | regs.gprs[10] = (unsigned long) arg; |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 311e9d712888..6e0073e43f54 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -74,7 +74,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
74 | 74 | ||
75 | static void *c_start(struct seq_file *m, loff_t *pos) | 75 | static void *c_start(struct seq_file *m, loff_t *pos) |
76 | { | 76 | { |
77 | return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; | 77 | return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; |
78 | } | 78 | } |
79 | 79 | ||
80 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 80 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index ef86ad243986..450931a45b68 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -42,34 +42,37 @@ enum s390_regset { | |||
42 | REGSET_GENERAL, | 42 | REGSET_GENERAL, |
43 | REGSET_FP, | 43 | REGSET_FP, |
44 | REGSET_LAST_BREAK, | 44 | REGSET_LAST_BREAK, |
45 | REGSET_SYSTEM_CALL, | ||
45 | REGSET_GENERAL_EXTENDED, | 46 | REGSET_GENERAL_EXTENDED, |
46 | }; | 47 | }; |
47 | 48 | ||
48 | void update_per_regs(struct task_struct *task) | 49 | void update_per_regs(struct task_struct *task) |
49 | { | 50 | { |
50 | static const struct per_regs per_single_step = { | ||
51 | .control = PER_EVENT_IFETCH, | ||
52 | .start = 0, | ||
53 | .end = PSW_ADDR_INSN, | ||
54 | }; | ||
55 | struct pt_regs *regs = task_pt_regs(task); | 51 | struct pt_regs *regs = task_pt_regs(task); |
56 | struct thread_struct *thread = &task->thread; | 52 | struct thread_struct *thread = &task->thread; |
57 | const struct per_regs *new; | 53 | struct per_regs old, new; |
58 | struct per_regs old; | 54 | |
59 | 55 | /* Copy user specified PER registers */ | |
60 | /* TIF_SINGLE_STEP overrides the user specified PER registers. */ | 56 | new.control = thread->per_user.control; |
61 | new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ? | 57 | new.start = thread->per_user.start; |
62 | &per_single_step : &thread->per_user; | 58 | new.end = thread->per_user.end; |
59 | |||
60 | /* merge TIF_SINGLE_STEP into user specified PER registers. */ | ||
61 | if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { | ||
62 | new.control |= PER_EVENT_IFETCH; | ||
63 | new.start = 0; | ||
64 | new.end = PSW_ADDR_INSN; | ||
65 | } | ||
63 | 66 | ||
64 | /* Take care of the PER enablement bit in the PSW. */ | 67 | /* Take care of the PER enablement bit in the PSW. */ |
65 | if (!(new->control & PER_EVENT_MASK)) { | 68 | if (!(new.control & PER_EVENT_MASK)) { |
66 | regs->psw.mask &= ~PSW_MASK_PER; | 69 | regs->psw.mask &= ~PSW_MASK_PER; |
67 | return; | 70 | return; |
68 | } | 71 | } |
69 | regs->psw.mask |= PSW_MASK_PER; | 72 | regs->psw.mask |= PSW_MASK_PER; |
70 | __ctl_store(old, 9, 11); | 73 | __ctl_store(old, 9, 11); |
71 | if (memcmp(new, &old, sizeof(struct per_regs)) != 0) | 74 | if (memcmp(&new, &old, sizeof(struct per_regs)) != 0) |
72 | __ctl_load(*new, 9, 11); | 75 | __ctl_load(new, 9, 11); |
73 | } | 76 | } |
74 | 77 | ||
75 | void user_enable_single_step(struct task_struct *task) | 78 | void user_enable_single_step(struct task_struct *task) |
@@ -166,8 +169,8 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
166 | */ | 169 | */ |
167 | tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); | 170 | tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); |
168 | if (addr == (addr_t) &dummy->regs.psw.mask) | 171 | if (addr == (addr_t) &dummy->regs.psw.mask) |
169 | /* Remove per bit from user psw. */ | 172 | /* Return a clean psw mask. */ |
170 | tmp &= ~PSW_MASK_PER; | 173 | tmp = psw_user_bits | (tmp & PSW_MASK_USER); |
171 | 174 | ||
172 | } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { | 175 | } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { |
173 | /* | 176 | /* |
@@ -289,18 +292,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
289 | * psw and gprs are stored on the stack | 292 | * psw and gprs are stored on the stack |
290 | */ | 293 | */ |
291 | if (addr == (addr_t) &dummy->regs.psw.mask && | 294 | if (addr == (addr_t) &dummy->regs.psw.mask && |
292 | #ifdef CONFIG_COMPAT | 295 | ((data & ~PSW_MASK_USER) != psw_user_bits || |
293 | data != PSW_MASK_MERGE(psw_user32_bits, data) && | 296 | ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) |
294 | #endif | ||
295 | data != PSW_MASK_MERGE(psw_user_bits, data)) | ||
296 | /* Invalid psw mask. */ | 297 | /* Invalid psw mask. */ |
297 | return -EINVAL; | 298 | return -EINVAL; |
298 | #ifndef CONFIG_64BIT | ||
299 | if (addr == (addr_t) &dummy->regs.psw.addr) | 299 | if (addr == (addr_t) &dummy->regs.psw.addr) |
300 | /* I'd like to reject addresses without the | 300 | /* |
301 | high order bit but older gdb's rely on it */ | 301 | * The debugger changed the instruction address, |
302 | data |= PSW_ADDR_AMODE; | 302 | * reset system call restart, see signal.c:do_signal |
303 | #endif | 303 | */ |
304 | task_thread_info(child)->system_call = 0; | ||
305 | |||
304 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; | 306 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; |
305 | 307 | ||
306 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { | 308 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { |
@@ -495,21 +497,21 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) | |||
495 | __u32 tmp; | 497 | __u32 tmp; |
496 | 498 | ||
497 | if (addr < (addr_t) &dummy32->regs.acrs) { | 499 | if (addr < (addr_t) &dummy32->regs.acrs) { |
500 | struct pt_regs *regs = task_pt_regs(child); | ||
498 | /* | 501 | /* |
499 | * psw and gprs are stored on the stack | 502 | * psw and gprs are stored on the stack |
500 | */ | 503 | */ |
501 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 504 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
502 | /* Fake a 31 bit psw mask. */ | 505 | /* Fake a 31 bit psw mask. */ |
503 | tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); | 506 | tmp = (__u32)(regs->psw.mask >> 32); |
504 | tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp); | 507 | tmp = psw32_user_bits | (tmp & PSW32_MASK_USER); |
505 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 508 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
506 | /* Fake a 31 bit psw address. */ | 509 | /* Fake a 31 bit psw address. */ |
507 | tmp = (__u32) task_pt_regs(child)->psw.addr | | 510 | tmp = (__u32) regs->psw.addr | |
508 | PSW32_ADDR_AMODE31; | 511 | (__u32)(regs->psw.mask & PSW_MASK_BA); |
509 | } else { | 512 | } else { |
510 | /* gpr 0-15 */ | 513 | /* gpr 0-15 */ |
511 | tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw + | 514 | tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4); |
512 | addr*2 + 4); | ||
513 | } | 515 | } |
514 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { | 516 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { |
515 | /* | 517 | /* |
@@ -594,24 +596,32 @@ static int __poke_user_compat(struct task_struct *child, | |||
594 | addr_t offset; | 596 | addr_t offset; |
595 | 597 | ||
596 | if (addr < (addr_t) &dummy32->regs.acrs) { | 598 | if (addr < (addr_t) &dummy32->regs.acrs) { |
599 | struct pt_regs *regs = task_pt_regs(child); | ||
597 | /* | 600 | /* |
598 | * psw, gprs, acrs and orig_gpr2 are stored on the stack | 601 | * psw, gprs, acrs and orig_gpr2 are stored on the stack |
599 | */ | 602 | */ |
600 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 603 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
601 | /* Build a 64 bit psw mask from 31 bit mask. */ | 604 | /* Build a 64 bit psw mask from 31 bit mask. */ |
602 | if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) | 605 | if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits) |
603 | /* Invalid psw mask. */ | 606 | /* Invalid psw mask. */ |
604 | return -EINVAL; | 607 | return -EINVAL; |
605 | task_pt_regs(child)->psw.mask = | 608 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
606 | PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); | 609 | (regs->psw.mask & PSW_MASK_BA) | |
610 | (__u64)(tmp & PSW32_MASK_USER) << 32; | ||
607 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 611 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
608 | /* Build a 64 bit psw address from 31 bit address. */ | 612 | /* Build a 64 bit psw address from 31 bit address. */ |
609 | task_pt_regs(child)->psw.addr = | 613 | regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; |
610 | (__u64) tmp & PSW32_ADDR_INSN; | 614 | /* Transfer 31 bit amode bit to psw mask. */ |
615 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | | ||
616 | (__u64)(tmp & PSW32_ADDR_AMODE); | ||
617 | /* | ||
618 | * The debugger changed the instruction address, | ||
619 | * reset system call restart, see signal.c:do_signal | ||
620 | */ | ||
621 | task_thread_info(child)->system_call = 0; | ||
611 | } else { | 622 | } else { |
612 | /* gpr 0-15 */ | 623 | /* gpr 0-15 */ |
613 | *(__u32*)((addr_t) &task_pt_regs(child)->psw | 624 | *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; |
614 | + addr*2 + 4) = tmp; | ||
615 | } | 625 | } |
616 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { | 626 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { |
617 | /* | 627 | /* |
@@ -735,7 +745,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
735 | * debugger stored an invalid system call number. Skip | 745 | * debugger stored an invalid system call number. Skip |
736 | * the system call and the system call restart handling. | 746 | * the system call and the system call restart handling. |
737 | */ | 747 | */ |
738 | regs->svcnr = 0; | 748 | clear_thread_flag(TIF_SYSCALL); |
739 | ret = -1; | 749 | ret = -1; |
740 | } | 750 | } |
741 | 751 | ||
@@ -897,6 +907,26 @@ static int s390_last_break_get(struct task_struct *target, | |||
897 | 907 | ||
898 | #endif | 908 | #endif |
899 | 909 | ||
910 | static int s390_system_call_get(struct task_struct *target, | ||
911 | const struct user_regset *regset, | ||
912 | unsigned int pos, unsigned int count, | ||
913 | void *kbuf, void __user *ubuf) | ||
914 | { | ||
915 | unsigned int *data = &task_thread_info(target)->system_call; | ||
916 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
917 | data, 0, sizeof(unsigned int)); | ||
918 | } | ||
919 | |||
920 | static int s390_system_call_set(struct task_struct *target, | ||
921 | const struct user_regset *regset, | ||
922 | unsigned int pos, unsigned int count, | ||
923 | const void *kbuf, const void __user *ubuf) | ||
924 | { | ||
925 | unsigned int *data = &task_thread_info(target)->system_call; | ||
926 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
927 | data, 0, sizeof(unsigned int)); | ||
928 | } | ||
929 | |||
900 | static const struct user_regset s390_regsets[] = { | 930 | static const struct user_regset s390_regsets[] = { |
901 | [REGSET_GENERAL] = { | 931 | [REGSET_GENERAL] = { |
902 | .core_note_type = NT_PRSTATUS, | 932 | .core_note_type = NT_PRSTATUS, |
@@ -923,6 +953,14 @@ static const struct user_regset s390_regsets[] = { | |||
923 | .get = s390_last_break_get, | 953 | .get = s390_last_break_get, |
924 | }, | 954 | }, |
925 | #endif | 955 | #endif |
956 | [REGSET_SYSTEM_CALL] = { | ||
957 | .core_note_type = NT_S390_SYSTEM_CALL, | ||
958 | .n = 1, | ||
959 | .size = sizeof(unsigned int), | ||
960 | .align = sizeof(unsigned int), | ||
961 | .get = s390_system_call_get, | ||
962 | .set = s390_system_call_set, | ||
963 | }, | ||
926 | }; | 964 | }; |
927 | 965 | ||
928 | static const struct user_regset_view user_s390_view = { | 966 | static const struct user_regset_view user_s390_view = { |
@@ -1102,6 +1140,14 @@ static const struct user_regset s390_compat_regsets[] = { | |||
1102 | .align = sizeof(long), | 1140 | .align = sizeof(long), |
1103 | .get = s390_compat_last_break_get, | 1141 | .get = s390_compat_last_break_get, |
1104 | }, | 1142 | }, |
1143 | [REGSET_SYSTEM_CALL] = { | ||
1144 | .core_note_type = NT_S390_SYSTEM_CALL, | ||
1145 | .n = 1, | ||
1146 | .size = sizeof(compat_uint_t), | ||
1147 | .align = sizeof(compat_uint_t), | ||
1148 | .get = s390_system_call_get, | ||
1149 | .set = s390_system_call_set, | ||
1150 | }, | ||
1105 | [REGSET_GENERAL_EXTENDED] = { | 1151 | [REGSET_GENERAL_EXTENDED] = { |
1106 | .core_note_type = NT_S390_HIGH_GPRS, | 1152 | .core_note_type = NT_S390_HIGH_GPRS, |
1107 | .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), | 1153 | .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 303d961c3bb5..ad67c214be04 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -10,6 +10,12 @@ | |||
10 | #include <asm/asm-offsets.h> | 10 | #include <asm/asm-offsets.h> |
11 | 11 | ||
12 | # | 12 | # |
13 | # store_status: Empty implementation until kdump is supported on 31 bit | ||
14 | # | ||
15 | ENTRY(store_status) | ||
16 | br %r14 | ||
17 | |||
18 | # | ||
13 | # do_reipl_asm | 19 | # do_reipl_asm |
14 | # Parameter: r2 = schid of reipl device | 20 | # Parameter: r2 = schid of reipl device |
15 | # | 21 | # |
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index e690975403f4..732a793ec53a 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S | |||
@@ -17,11 +17,11 @@ | |||
17 | # | 17 | # |
18 | ENTRY(store_status) | 18 | ENTRY(store_status) |
19 | /* Save register one and load save area base */ | 19 | /* Save register one and load save area base */ |
20 | stg %r1,__LC_SAVE_AREA_64(%r0) | 20 | stg %r1,__LC_SAVE_AREA+120(%r0) |
21 | lghi %r1,SAVE_AREA_BASE | 21 | lghi %r1,SAVE_AREA_BASE |
22 | /* General purpose registers */ | 22 | /* General purpose registers */ |
23 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | 23 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) |
24 | lg %r2,__LC_SAVE_AREA_64(%r0) | 24 | lg %r2,__LC_SAVE_AREA+120(%r0) |
25 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) | 25 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) |
26 | /* Control registers */ | 26 | /* Control registers */ |
27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | 27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) |
@@ -62,8 +62,11 @@ ENTRY(store_status) | |||
62 | larl %r2,store_status | 62 | larl %r2,store_status |
63 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) | 63 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) |
64 | br %r14 | 64 | br %r14 |
65 | .align 8 | 65 | |
66 | .section .bss | ||
67 | .align 8 | ||
66 | .Lclkcmp: .quad 0x0000000000000000 | 68 | .Lclkcmp: .quad 0x0000000000000000 |
69 | .previous | ||
67 | 70 | ||
68 | # | 71 | # |
69 | # do_reipl_asm | 72 | # do_reipl_asm |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7b371c37061d..8ac6bfa2786c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -42,6 +42,9 @@ | |||
42 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
43 | #include <linux/topology.h> | 43 | #include <linux/topology.h> |
44 | #include <linux/ftrace.h> | 44 | #include <linux/ftrace.h> |
45 | #include <linux/kexec.h> | ||
46 | #include <linux/crash_dump.h> | ||
47 | #include <linux/memory.h> | ||
45 | 48 | ||
46 | #include <asm/ipl.h> | 49 | #include <asm/ipl.h> |
47 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
@@ -57,12 +60,13 @@ | |||
57 | #include <asm/ebcdic.h> | 60 | #include <asm/ebcdic.h> |
58 | #include <asm/compat.h> | 61 | #include <asm/compat.h> |
59 | #include <asm/kvm_virtio.h> | 62 | #include <asm/kvm_virtio.h> |
63 | #include <asm/diag.h> | ||
60 | 64 | ||
61 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | | 65 | long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | |
62 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); | 66 | PSW_MASK_EA | PSW_MASK_BA; |
63 | long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 67 | long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | |
64 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 68 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | |
65 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | 69 | PSW_MASK_PSTATE | PSW_ASC_HOME; |
66 | 70 | ||
67 | /* | 71 | /* |
68 | * User copy operations. | 72 | * User copy operations. |
@@ -274,22 +278,14 @@ early_param("mem", early_parse_mem); | |||
274 | unsigned int user_mode = HOME_SPACE_MODE; | 278 | unsigned int user_mode = HOME_SPACE_MODE; |
275 | EXPORT_SYMBOL_GPL(user_mode); | 279 | EXPORT_SYMBOL_GPL(user_mode); |
276 | 280 | ||
277 | static int set_amode_and_uaccess(unsigned long user_amode, | 281 | static int set_amode_primary(void) |
278 | unsigned long user32_amode) | ||
279 | { | 282 | { |
280 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | 283 | psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; |
281 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 284 | psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; |
282 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
283 | #ifdef CONFIG_COMPAT | 285 | #ifdef CONFIG_COMPAT |
284 | psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | | 286 | psw32_user_bits = |
285 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 287 | (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; |
286 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
287 | psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | | ||
288 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
289 | PSW32_MASK_PSTATE; | ||
290 | #endif | 288 | #endif |
291 | psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
292 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | ||
293 | 289 | ||
294 | if (MACHINE_HAS_MVCOS) { | 290 | if (MACHINE_HAS_MVCOS) { |
295 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | 291 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); |
@@ -325,7 +321,7 @@ early_param("user_mode", early_parse_user_mode); | |||
325 | static void setup_addressing_mode(void) | 321 | static void setup_addressing_mode(void) |
326 | { | 322 | { |
327 | if (user_mode == PRIMARY_SPACE_MODE) { | 323 | if (user_mode == PRIMARY_SPACE_MODE) { |
328 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) | 324 | if (set_amode_primary()) |
329 | pr_info("Address spaces switched, " | 325 | pr_info("Address spaces switched, " |
330 | "mvcos available\n"); | 326 | "mvcos available\n"); |
331 | else | 327 | else |
@@ -344,24 +340,25 @@ setup_lowcore(void) | |||
344 | */ | 340 | */ |
345 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); | 341 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); |
346 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); | 342 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); |
347 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 343 | lc->restart_psw.mask = psw_kernel_bits; |
348 | lc->restart_psw.addr = | 344 | lc->restart_psw.addr = |
349 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 345 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
350 | if (user_mode != HOME_SPACE_MODE) | 346 | lc->external_new_psw.mask = psw_kernel_bits | |
351 | lc->restart_psw.mask |= PSW_ASC_HOME; | 347 | PSW_MASK_DAT | PSW_MASK_MCHECK; |
352 | lc->external_new_psw.mask = psw_kernel_bits; | ||
353 | lc->external_new_psw.addr = | 348 | lc->external_new_psw.addr = |
354 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | 349 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; |
355 | lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; | 350 | lc->svc_new_psw.mask = psw_kernel_bits | |
351 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
356 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 352 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; |
357 | lc->program_new_psw.mask = psw_kernel_bits; | 353 | lc->program_new_psw.mask = psw_kernel_bits | |
354 | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
358 | lc->program_new_psw.addr = | 355 | lc->program_new_psw.addr = |
359 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | 356 | PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; |
360 | lc->mcck_new_psw.mask = | 357 | lc->mcck_new_psw.mask = psw_kernel_bits; |
361 | psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; | ||
362 | lc->mcck_new_psw.addr = | 358 | lc->mcck_new_psw.addr = |
363 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | 359 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; |
364 | lc->io_new_psw.mask = psw_kernel_bits; | 360 | lc->io_new_psw.mask = psw_kernel_bits | |
361 | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
365 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 362 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
366 | lc->clock_comparator = -1ULL; | 363 | lc->clock_comparator = -1ULL; |
367 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 364 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
@@ -435,10 +432,14 @@ static void __init setup_resources(void) | |||
435 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 432 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
436 | if (!memory_chunk[i].size) | 433 | if (!memory_chunk[i].size) |
437 | continue; | 434 | continue; |
435 | if (memory_chunk[i].type == CHUNK_OLDMEM || | ||
436 | memory_chunk[i].type == CHUNK_CRASHK) | ||
437 | continue; | ||
438 | res = alloc_bootmem_low(sizeof(*res)); | 438 | res = alloc_bootmem_low(sizeof(*res)); |
439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
440 | switch (memory_chunk[i].type) { | 440 | switch (memory_chunk[i].type) { |
441 | case CHUNK_READ_WRITE: | 441 | case CHUNK_READ_WRITE: |
442 | case CHUNK_CRASHK: | ||
442 | res->name = "System RAM"; | 443 | res->name = "System RAM"; |
443 | break; | 444 | break; |
444 | case CHUNK_READ_ONLY: | 445 | case CHUNK_READ_ONLY: |
@@ -479,6 +480,7 @@ static void __init setup_memory_end(void) | |||
479 | unsigned long max_mem; | 480 | unsigned long max_mem; |
480 | int i; | 481 | int i; |
481 | 482 | ||
483 | |||
482 | #ifdef CONFIG_ZFCPDUMP | 484 | #ifdef CONFIG_ZFCPDUMP |
483 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { | 485 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { |
484 | memory_end = ZFCPDUMP_HSA_SIZE; | 486 | memory_end = ZFCPDUMP_HSA_SIZE; |
@@ -545,11 +547,201 @@ static void __init setup_restart_psw(void) | |||
545 | * Setup restart PSW for absolute zero lowcore. This is necesary | 547 | * Setup restart PSW for absolute zero lowcore. This is necesary |
546 | * if PSW restart is done on an offline CPU that has lowcore zero | 548 | * if PSW restart is done on an offline CPU that has lowcore zero |
547 | */ | 549 | */ |
548 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 550 | psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; |
549 | psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 551 | psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
550 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); | 552 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); |
551 | } | 553 | } |
552 | 554 | ||
555 | static void __init setup_vmcoreinfo(void) | ||
556 | { | ||
557 | #ifdef CONFIG_KEXEC | ||
558 | unsigned long ptr = paddr_vmcoreinfo_note(); | ||
559 | |||
560 | copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); | ||
561 | #endif | ||
562 | } | ||
563 | |||
564 | #ifdef CONFIG_CRASH_DUMP | ||
565 | |||
566 | /* | ||
567 | * Find suitable location for crashkernel memory | ||
568 | */ | ||
569 | static unsigned long __init find_crash_base(unsigned long crash_size, | ||
570 | char **msg) | ||
571 | { | ||
572 | unsigned long crash_base; | ||
573 | struct mem_chunk *chunk; | ||
574 | int i; | ||
575 | |||
576 | if (memory_chunk[0].size < crash_size) { | ||
577 | *msg = "first memory chunk must be at least crashkernel size"; | ||
578 | return 0; | ||
579 | } | ||
580 | if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE)) | ||
581 | return OLDMEM_BASE; | ||
582 | |||
583 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { | ||
584 | chunk = &memory_chunk[i]; | ||
585 | if (chunk->size == 0) | ||
586 | continue; | ||
587 | if (chunk->type != CHUNK_READ_WRITE) | ||
588 | continue; | ||
589 | if (chunk->size < crash_size) | ||
590 | continue; | ||
591 | crash_base = (chunk->addr + chunk->size) - crash_size; | ||
592 | if (crash_base < crash_size) | ||
593 | continue; | ||
594 | if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) | ||
595 | continue; | ||
596 | if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) | ||
597 | continue; | ||
598 | return crash_base; | ||
599 | } | ||
600 | *msg = "no suitable area found"; | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Check if crash_base and crash_size is valid | ||
606 | */ | ||
607 | static int __init verify_crash_base(unsigned long crash_base, | ||
608 | unsigned long crash_size, | ||
609 | char **msg) | ||
610 | { | ||
611 | struct mem_chunk *chunk; | ||
612 | int i; | ||
613 | |||
614 | /* | ||
615 | * Because we do the swap to zero, we must have at least 'crash_size' | ||
616 | * bytes free space before crash_base | ||
617 | */ | ||
618 | if (crash_size > crash_base) { | ||
619 | *msg = "crashkernel offset must be greater than size"; | ||
620 | return -EINVAL; | ||
621 | } | ||
622 | |||
623 | /* First memory chunk must be at least crash_size */ | ||
624 | if (memory_chunk[0].size < crash_size) { | ||
625 | *msg = "first memory chunk must be at least crashkernel size"; | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | /* Check if we fit into the respective memory chunk */ | ||
629 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
630 | chunk = &memory_chunk[i]; | ||
631 | if (chunk->size == 0) | ||
632 | continue; | ||
633 | if (crash_base < chunk->addr) | ||
634 | continue; | ||
635 | if (crash_base >= chunk->addr + chunk->size) | ||
636 | continue; | ||
637 | /* we have found the memory chunk */ | ||
638 | if (crash_base + crash_size > chunk->addr + chunk->size) { | ||
639 | *msg = "selected memory chunk is too small for " | ||
640 | "crashkernel memory"; | ||
641 | return -EINVAL; | ||
642 | } | ||
643 | return 0; | ||
644 | } | ||
645 | *msg = "invalid memory range specified"; | ||
646 | return -EINVAL; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * Reserve kdump memory by creating a memory hole in the mem_chunk array | ||
651 | */ | ||
652 | static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, | ||
653 | int type) | ||
654 | { | ||
655 | |||
656 | create_mem_hole(memory_chunk, addr, size, type); | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * When kdump is enabled, we have to ensure that no memory from | ||
661 | * the area [0 - crashkernel memory size] and | ||
662 | * [crashk_res.start - crashk_res.end] is set offline. | ||
663 | */ | ||
664 | static int kdump_mem_notifier(struct notifier_block *nb, | ||
665 | unsigned long action, void *data) | ||
666 | { | ||
667 | struct memory_notify *arg = data; | ||
668 | |||
669 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) | ||
670 | return NOTIFY_BAD; | ||
671 | if (arg->start_pfn > PFN_DOWN(crashk_res.end)) | ||
672 | return NOTIFY_OK; | ||
673 | if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) | ||
674 | return NOTIFY_OK; | ||
675 | return NOTIFY_BAD; | ||
676 | } | ||
677 | |||
678 | static struct notifier_block kdump_mem_nb = { | ||
679 | .notifier_call = kdump_mem_notifier, | ||
680 | }; | ||
681 | |||
682 | #endif | ||
683 | |||
684 | /* | ||
685 | * Make sure that oldmem, where the dump is stored, is protected | ||
686 | */ | ||
687 | static void reserve_oldmem(void) | ||
688 | { | ||
689 | #ifdef CONFIG_CRASH_DUMP | ||
690 | if (!OLDMEM_BASE) | ||
691 | return; | ||
692 | |||
693 | reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); | ||
694 | reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, | ||
695 | CHUNK_OLDMEM); | ||
696 | if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) | ||
697 | saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; | ||
698 | else | ||
699 | saved_max_pfn = PFN_DOWN(real_memory_size) - 1; | ||
700 | #endif | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Reserve memory for kdump kernel to be loaded with kexec | ||
705 | */ | ||
706 | static void __init reserve_crashkernel(void) | ||
707 | { | ||
708 | #ifdef CONFIG_CRASH_DUMP | ||
709 | unsigned long long crash_base, crash_size; | ||
710 | char *msg; | ||
711 | int rc; | ||
712 | |||
713 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, | ||
714 | &crash_base); | ||
715 | if (rc || crash_size == 0) | ||
716 | return; | ||
717 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); | ||
718 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); | ||
719 | if (register_memory_notifier(&kdump_mem_nb)) | ||
720 | return; | ||
721 | if (!crash_base) | ||
722 | crash_base = find_crash_base(crash_size, &msg); | ||
723 | if (!crash_base) { | ||
724 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
725 | unregister_memory_notifier(&kdump_mem_nb); | ||
726 | return; | ||
727 | } | ||
728 | if (verify_crash_base(crash_base, crash_size, &msg)) { | ||
729 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
730 | unregister_memory_notifier(&kdump_mem_nb); | ||
731 | return; | ||
732 | } | ||
733 | if (!OLDMEM_BASE && MACHINE_IS_VM) | ||
734 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); | ||
735 | crashk_res.start = crash_base; | ||
736 | crashk_res.end = crash_base + crash_size - 1; | ||
737 | insert_resource(&iomem_resource, &crashk_res); | ||
738 | reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); | ||
739 | pr_info("Reserving %lluMB of memory at %lluMB " | ||
740 | "for crashkernel (System RAM: %luMB)\n", | ||
741 | crash_size >> 20, crash_base >> 20, memory_end >> 20); | ||
742 | #endif | ||
743 | } | ||
744 | |||
553 | static void __init | 745 | static void __init |
554 | setup_memory(void) | 746 | setup_memory(void) |
555 | { | 747 | { |
@@ -580,6 +772,14 @@ setup_memory(void) | |||
580 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { | 772 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { |
581 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | 773 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; |
582 | 774 | ||
775 | #ifdef CONFIG_CRASH_DUMP | ||
776 | if (OLDMEM_BASE) { | ||
777 | /* Move initrd behind kdump oldmem */ | ||
778 | if (start + INITRD_SIZE > OLDMEM_BASE && | ||
779 | start < OLDMEM_BASE + OLDMEM_SIZE) | ||
780 | start = OLDMEM_BASE + OLDMEM_SIZE; | ||
781 | } | ||
782 | #endif | ||
583 | if (start + INITRD_SIZE > memory_end) { | 783 | if (start + INITRD_SIZE > memory_end) { |
584 | pr_err("initrd extends beyond end of " | 784 | pr_err("initrd extends beyond end of " |
585 | "memory (0x%08lx > 0x%08lx) " | 785 | "memory (0x%08lx > 0x%08lx) " |
@@ -610,7 +810,8 @@ setup_memory(void) | |||
610 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 810 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
611 | unsigned long start_chunk, end_chunk, pfn; | 811 | unsigned long start_chunk, end_chunk, pfn; |
612 | 812 | ||
613 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 813 | if (memory_chunk[i].type != CHUNK_READ_WRITE && |
814 | memory_chunk[i].type != CHUNK_CRASHK) | ||
614 | continue; | 815 | continue; |
615 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 816 | start_chunk = PFN_DOWN(memory_chunk[i].addr); |
616 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); | 817 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); |
@@ -644,6 +845,15 @@ setup_memory(void) | |||
644 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, | 845 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, |
645 | BOOTMEM_DEFAULT); | 846 | BOOTMEM_DEFAULT); |
646 | 847 | ||
848 | #ifdef CONFIG_CRASH_DUMP | ||
849 | if (crashk_res.start) | ||
850 | reserve_bootmem(crashk_res.start, | ||
851 | crashk_res.end - crashk_res.start + 1, | ||
852 | BOOTMEM_DEFAULT); | ||
853 | if (is_kdump_kernel()) | ||
854 | reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, | ||
855 | PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); | ||
856 | #endif | ||
647 | #ifdef CONFIG_BLK_DEV_INITRD | 857 | #ifdef CONFIG_BLK_DEV_INITRD |
648 | if (INITRD_START && INITRD_SIZE) { | 858 | if (INITRD_START && INITRD_SIZE) { |
649 | if (INITRD_START + INITRD_SIZE <= memory_end) { | 859 | if (INITRD_START + INITRD_SIZE <= memory_end) { |
@@ -812,8 +1022,11 @@ setup_arch(char **cmdline_p) | |||
812 | setup_ipl(); | 1022 | setup_ipl(); |
813 | setup_memory_end(); | 1023 | setup_memory_end(); |
814 | setup_addressing_mode(); | 1024 | setup_addressing_mode(); |
1025 | reserve_oldmem(); | ||
1026 | reserve_crashkernel(); | ||
815 | setup_memory(); | 1027 | setup_memory(); |
816 | setup_resources(); | 1028 | setup_resources(); |
1029 | setup_vmcoreinfo(); | ||
817 | setup_restart_psw(); | 1030 | setup_restart_psw(); |
818 | setup_lowcore(); | 1031 | setup_lowcore(); |
819 | 1032 | ||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 9a40e1cc5ec3..05a85bc14c98 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/ucontext.h> | 30 | #include <asm/ucontext.h> |
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/lowcore.h> | 32 | #include <asm/lowcore.h> |
33 | #include <asm/compat.h> | ||
33 | #include "entry.h" | 34 | #include "entry.h" |
34 | 35 | ||
35 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 36 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
@@ -116,7 +117,8 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
116 | 117 | ||
117 | /* Copy a 'clean' PSW mask to the user to avoid leaking | 118 | /* Copy a 'clean' PSW mask to the user to avoid leaking |
118 | information about whether PER is currently on. */ | 119 | information about whether PER is currently on. */ |
119 | user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask); | 120 | user_sregs.regs.psw.mask = psw_user_bits | |
121 | (regs->psw.mask & PSW_MASK_USER); | ||
120 | user_sregs.regs.psw.addr = regs->psw.addr; | 122 | user_sregs.regs.psw.addr = regs->psw.addr; |
121 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); | 123 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); |
122 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, | 124 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, |
@@ -143,9 +145,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
143 | err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); | 145 | err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); |
144 | if (err) | 146 | if (err) |
145 | return err; | 147 | return err; |
146 | regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, | 148 | /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ |
147 | user_sregs.regs.psw.mask); | 149 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
148 | regs->psw.addr = PSW_ADDR_AMODE | user_sregs.regs.psw.addr; | 150 | (user_sregs.regs.psw.mask & PSW_MASK_USER); |
151 | /* Check for invalid amode */ | ||
152 | if (regs->psw.mask & PSW_MASK_EA) | ||
153 | regs->psw.mask |= PSW_MASK_BA; | ||
154 | regs->psw.addr = user_sregs.regs.psw.addr; | ||
149 | memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); | 155 | memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); |
150 | memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, | 156 | memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, |
151 | sizeof(sregs->regs.acrs)); | 157 | sizeof(sregs->regs.acrs)); |
@@ -156,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
156 | current->thread.fp_regs.fpc &= FPC_VALID_MASK; | 162 | current->thread.fp_regs.fpc &= FPC_VALID_MASK; |
157 | 163 | ||
158 | restore_fp_regs(¤t->thread.fp_regs); | 164 | restore_fp_regs(¤t->thread.fp_regs); |
159 | regs->svcnr = 0; /* disable syscall checks */ | 165 | clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ |
160 | return 0; | 166 | return 0; |
161 | } | 167 | } |
162 | 168 | ||
@@ -288,6 +294,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
288 | 294 | ||
289 | /* Set up registers for signal handler */ | 295 | /* Set up registers for signal handler */ |
290 | regs->gprs[15] = (unsigned long) frame; | 296 | regs->gprs[15] = (unsigned long) frame; |
297 | regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ | ||
291 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 298 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; |
292 | 299 | ||
293 | regs->gprs[2] = map_signal(sig); | 300 | regs->gprs[2] = map_signal(sig); |
@@ -356,6 +363,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
356 | 363 | ||
357 | /* Set up registers for signal handler */ | 364 | /* Set up registers for signal handler */ |
358 | regs->gprs[15] = (unsigned long) frame; | 365 | regs->gprs[15] = (unsigned long) frame; |
366 | regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ | ||
359 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 367 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; |
360 | 368 | ||
361 | regs->gprs[2] = map_signal(sig); | 369 | regs->gprs[2] = map_signal(sig); |
@@ -401,7 +409,6 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
401 | */ | 409 | */ |
402 | void do_signal(struct pt_regs *regs) | 410 | void do_signal(struct pt_regs *regs) |
403 | { | 411 | { |
404 | unsigned long retval = 0, continue_addr = 0, restart_addr = 0; | ||
405 | siginfo_t info; | 412 | siginfo_t info; |
406 | int signr; | 413 | int signr; |
407 | struct k_sigaction ka; | 414 | struct k_sigaction ka; |
@@ -421,54 +428,45 @@ void do_signal(struct pt_regs *regs) | |||
421 | else | 428 | else |
422 | oldset = ¤t->blocked; | 429 | oldset = ¤t->blocked; |
423 | 430 | ||
424 | /* Are we from a system call? */ | 431 | /* |
425 | if (regs->svcnr) { | 432 | * Get signal to deliver. When running under ptrace, at this point |
426 | continue_addr = regs->psw.addr; | 433 | * the debugger may change all our registers, including the system |
427 | restart_addr = continue_addr - regs->ilc; | 434 | * call information. |
428 | retval = regs->gprs[2]; | 435 | */ |
429 | 436 | current_thread_info()->system_call = | |
430 | /* Prepare for system call restart. We do this here so that a | 437 | test_thread_flag(TIF_SYSCALL) ? regs->svc_code : 0; |
431 | debugger will see the already changed PSW. */ | ||
432 | switch (retval) { | ||
433 | case -ERESTARTNOHAND: | ||
434 | case -ERESTARTSYS: | ||
435 | case -ERESTARTNOINTR: | ||
436 | regs->gprs[2] = regs->orig_gpr2; | ||
437 | regs->psw.addr = restart_addr; | ||
438 | break; | ||
439 | case -ERESTART_RESTARTBLOCK: | ||
440 | regs->gprs[2] = -EINTR; | ||
441 | } | ||
442 | regs->svcnr = 0; /* Don't deal with this again. */ | ||
443 | } | ||
444 | |||
445 | /* Get signal to deliver. When running under ptrace, at this point | ||
446 | the debugger may change all our registers ... */ | ||
447 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 438 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
448 | 439 | ||
449 | /* Depending on the signal settings we may need to revert the | ||
450 | decision to restart the system call. */ | ||
451 | if (signr > 0 && regs->psw.addr == restart_addr) { | ||
452 | if (retval == -ERESTARTNOHAND | ||
453 | || (retval == -ERESTARTSYS | ||
454 | && !(current->sighand->action[signr-1].sa.sa_flags | ||
455 | & SA_RESTART))) { | ||
456 | regs->gprs[2] = -EINTR; | ||
457 | regs->psw.addr = continue_addr; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | if (signr > 0) { | 440 | if (signr > 0) { |
462 | /* Whee! Actually deliver the signal. */ | 441 | /* Whee! Actually deliver the signal. */ |
463 | int ret; | 442 | if (current_thread_info()->system_call) { |
464 | #ifdef CONFIG_COMPAT | 443 | regs->svc_code = current_thread_info()->system_call; |
465 | if (is_compat_task()) { | 444 | /* Check for system call restarting. */ |
466 | ret = handle_signal32(signr, &ka, &info, oldset, regs); | 445 | switch (regs->gprs[2]) { |
467 | } | 446 | case -ERESTART_RESTARTBLOCK: |
468 | else | 447 | case -ERESTARTNOHAND: |
469 | #endif | 448 | regs->gprs[2] = -EINTR; |
470 | ret = handle_signal(signr, &ka, &info, oldset, regs); | 449 | break; |
471 | if (!ret) { | 450 | case -ERESTARTSYS: |
451 | if (!(ka.sa.sa_flags & SA_RESTART)) { | ||
452 | regs->gprs[2] = -EINTR; | ||
453 | break; | ||
454 | } | ||
455 | /* fallthrough */ | ||
456 | case -ERESTARTNOINTR: | ||
457 | regs->gprs[2] = regs->orig_gpr2; | ||
458 | regs->psw.addr = | ||
459 | __rewind_psw(regs->psw, | ||
460 | regs->svc_code >> 16); | ||
461 | break; | ||
462 | } | ||
463 | /* No longer in a system call */ | ||
464 | clear_thread_flag(TIF_SYSCALL); | ||
465 | } | ||
466 | |||
467 | if ((is_compat_task() ? | ||
468 | handle_signal32(signr, &ka, &info, oldset, regs) : | ||
469 | handle_signal(signr, &ka, &info, oldset, regs)) == 0) { | ||
472 | /* | 470 | /* |
473 | * A signal was successfully delivered; the saved | 471 | * A signal was successfully delivered; the saved |
474 | * sigmask will have been stored in the signal frame, | 472 | * sigmask will have been stored in the signal frame, |
@@ -482,11 +480,32 @@ void do_signal(struct pt_regs *regs) | |||
482 | * Let tracing know that we've done the handler setup. | 480 | * Let tracing know that we've done the handler setup. |
483 | */ | 481 | */ |
484 | tracehook_signal_handler(signr, &info, &ka, regs, | 482 | tracehook_signal_handler(signr, &info, &ka, regs, |
485 | test_thread_flag(TIF_SINGLE_STEP)); | 483 | test_thread_flag(TIF_SINGLE_STEP)); |
486 | } | 484 | } |
487 | return; | 485 | return; |
488 | } | 486 | } |
489 | 487 | ||
488 | /* No handlers present - check for system call restart */ | ||
489 | if (current_thread_info()->system_call) { | ||
490 | regs->svc_code = current_thread_info()->system_call; | ||
491 | switch (regs->gprs[2]) { | ||
492 | case -ERESTART_RESTARTBLOCK: | ||
493 | /* Restart with sys_restart_syscall */ | ||
494 | regs->svc_code = __NR_restart_syscall; | ||
495 | /* fallthrough */ | ||
496 | case -ERESTARTNOHAND: | ||
497 | case -ERESTARTSYS: | ||
498 | case -ERESTARTNOINTR: | ||
499 | /* Restart system call with magic TIF bit. */ | ||
500 | regs->gprs[2] = regs->orig_gpr2; | ||
501 | set_thread_flag(TIF_SYSCALL); | ||
502 | break; | ||
503 | default: | ||
504 | clear_thread_flag(TIF_SYSCALL); | ||
505 | break; | ||
506 | } | ||
507 | } | ||
508 | |||
490 | /* | 509 | /* |
491 | * If there's no signal to deliver, we just put the saved sigmask back. | 510 | * If there's no signal to deliver, we just put the saved sigmask back. |
492 | */ | 511 | */ |
@@ -494,13 +513,6 @@ void do_signal(struct pt_regs *regs) | |||
494 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 513 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
495 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 514 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
496 | } | 515 | } |
497 | |||
498 | /* Restart a different system call. */ | ||
499 | if (retval == -ERESTART_RESTARTBLOCK | ||
500 | && regs->psw.addr == continue_addr) { | ||
501 | regs->gprs[2] = __NR_restart_syscall; | ||
502 | set_thread_flag(TIF_RESTART_SVC); | ||
503 | } | ||
504 | } | 516 | } |
505 | 517 | ||
506 | void do_notify_resume(struct pt_regs *regs) | 518 | void do_notify_resume(struct pt_regs *regs) |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6ab16ac64d29..3ea872890da2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/timex.h> | 38 | #include <linux/timex.h> |
39 | #include <linux/bootmem.h> | 39 | #include <linux/bootmem.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/crash_dump.h> | ||
41 | #include <asm/asm-offsets.h> | 42 | #include <asm/asm-offsets.h> |
42 | #include <asm/ipl.h> | 43 | #include <asm/ipl.h> |
43 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
@@ -97,6 +98,29 @@ static inline int cpu_stopped(int cpu) | |||
97 | return raw_cpu_stopped(cpu_logical_map(cpu)); | 98 | return raw_cpu_stopped(cpu_logical_map(cpu)); |
98 | } | 99 | } |
99 | 100 | ||
101 | /* | ||
102 | * Ensure that PSW restart is done on an online CPU | ||
103 | */ | ||
104 | void smp_restart_with_online_cpu(void) | ||
105 | { | ||
106 | int cpu; | ||
107 | |||
108 | for_each_online_cpu(cpu) { | ||
109 | if (stap() == __cpu_logical_map[cpu]) { | ||
110 | /* We are online: Enable DAT again and return */ | ||
111 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); | ||
112 | return; | ||
113 | } | ||
114 | } | ||
115 | /* We are not online: Do PSW restart on an online CPU */ | ||
116 | while (sigp(cpu, sigp_restart) == sigp_busy) | ||
117 | cpu_relax(); | ||
118 | /* And stop ourself */ | ||
119 | while (raw_sigp(stap(), sigp_stop) == sigp_busy) | ||
120 | cpu_relax(); | ||
121 | for (;;); | ||
122 | } | ||
123 | |||
100 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | 124 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) |
101 | { | 125 | { |
102 | struct _lowcore *lc, *current_lc; | 126 | struct _lowcore *lc, *current_lc; |
@@ -106,14 +130,16 @@ void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | |||
106 | 130 | ||
107 | if (smp_processor_id() == 0) | 131 | if (smp_processor_id() == 0) |
108 | func(data); | 132 | func(data); |
109 | __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); | 133 | __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | |
134 | PSW_MASK_EA | PSW_MASK_BA); | ||
110 | /* Disable lowcore protection */ | 135 | /* Disable lowcore protection */ |
111 | __ctl_clear_bit(0, 28); | 136 | __ctl_clear_bit(0, 28); |
112 | current_lc = lowcore_ptr[smp_processor_id()]; | 137 | current_lc = lowcore_ptr[smp_processor_id()]; |
113 | lc = lowcore_ptr[0]; | 138 | lc = lowcore_ptr[0]; |
114 | if (!lc) | 139 | if (!lc) |
115 | lc = current_lc; | 140 | lc = current_lc; |
116 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 141 | lc->restart_psw.mask = |
142 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
117 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; | 143 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; |
118 | if (!cpu_online(0)) | 144 | if (!cpu_online(0)) |
119 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); | 145 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); |
@@ -135,7 +161,7 @@ void smp_send_stop(void) | |||
135 | int cpu, rc; | 161 | int cpu, rc; |
136 | 162 | ||
137 | /* Disable all interrupts/machine checks */ | 163 | /* Disable all interrupts/machine checks */ |
138 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); | 164 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); |
139 | trace_hardirqs_off(); | 165 | trace_hardirqs_off(); |
140 | 166 | ||
141 | /* stop all processors */ | 167 | /* stop all processors */ |
@@ -161,7 +187,10 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, | |||
161 | { | 187 | { |
162 | unsigned long bits; | 188 | unsigned long bits; |
163 | 189 | ||
164 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; | 190 | if (ext_int_code == 0x1202) |
191 | kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; | ||
192 | else | ||
193 | kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; | ||
165 | /* | 194 | /* |
166 | * handle bit signal external calls | 195 | * handle bit signal external calls |
167 | */ | 196 | */ |
@@ -183,12 +212,19 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, | |||
183 | */ | 212 | */ |
184 | static void smp_ext_bitcall(int cpu, int sig) | 213 | static void smp_ext_bitcall(int cpu, int sig) |
185 | { | 214 | { |
215 | int order; | ||
216 | |||
186 | /* | 217 | /* |
187 | * Set signaling bit in lowcore of target cpu and kick it | 218 | * Set signaling bit in lowcore of target cpu and kick it |
188 | */ | 219 | */ |
189 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | 220 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
190 | while (sigp(cpu, sigp_emergency_signal) == sigp_busy) | 221 | while (1) { |
222 | order = smp_vcpu_scheduled(cpu) ? | ||
223 | sigp_external_call : sigp_emergency_signal; | ||
224 | if (sigp(cpu, order) != sigp_busy) | ||
225 | break; | ||
191 | udelay(10); | 226 | udelay(10); |
227 | } | ||
192 | } | 228 | } |
193 | 229 | ||
194 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 230 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
@@ -281,11 +317,13 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
281 | } | 317 | } |
282 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 318 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
283 | 319 | ||
284 | #ifdef CONFIG_ZFCPDUMP | 320 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) |
285 | 321 | ||
286 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | 322 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) |
287 | { | 323 | { |
288 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 324 | if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) |
325 | return; | ||
326 | if (is_kdump_kernel()) | ||
289 | return; | 327 | return; |
290 | if (cpu >= NR_CPUS) { | 328 | if (cpu >= NR_CPUS) { |
291 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " | 329 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " |
@@ -403,6 +441,18 @@ static void __init smp_detect_cpus(void) | |||
403 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 441 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
404 | if (!info) | 442 | if (!info) |
405 | panic("smp_detect_cpus failed to allocate memory\n"); | 443 | panic("smp_detect_cpus failed to allocate memory\n"); |
444 | #ifdef CONFIG_CRASH_DUMP | ||
445 | if (OLDMEM_BASE && !is_kdump_kernel()) { | ||
446 | struct save_area *save_area; | ||
447 | |||
448 | save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); | ||
449 | if (!save_area) | ||
450 | panic("could not allocate memory for save area\n"); | ||
451 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), | ||
452 | 0x200, 0); | ||
453 | zfcpdump_save_areas[0] = save_area; | ||
454 | } | ||
455 | #endif | ||
406 | /* Use sigp detection algorithm if sclp doesn't work. */ | 456 | /* Use sigp detection algorithm if sclp doesn't work. */ |
407 | if (sclp_get_cpu_info(info)) { | 457 | if (sclp_get_cpu_info(info)) { |
408 | smp_use_sigp_detection = 1; | 458 | smp_use_sigp_detection = 1; |
@@ -463,7 +513,8 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
463 | set_cpu_online(smp_processor_id(), true); | 513 | set_cpu_online(smp_processor_id(), true); |
464 | ipi_call_unlock(); | 514 | ipi_call_unlock(); |
465 | __ctl_clear_bit(0, 28); /* Disable lowcore protection */ | 515 | __ctl_clear_bit(0, 28); /* Disable lowcore protection */ |
466 | S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 516 | S390_lowcore.restart_psw.mask = |
517 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
467 | S390_lowcore.restart_psw.addr = | 518 | S390_lowcore.restart_psw.addr = |
468 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 519 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
469 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ | 520 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ |
@@ -511,7 +562,8 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
511 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); | 562 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); |
512 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 563 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
513 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 564 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
514 | lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 565 | lowcore->restart_psw.mask = |
566 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
515 | lowcore->restart_psw.addr = | 567 | lowcore->restart_psw.addr = |
516 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 568 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; |
517 | if (user_mode != HOME_SPACE_MODE) | 569 | if (user_mode != HOME_SPACE_MODE) |
@@ -712,6 +764,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
712 | /* request the 0x1201 emergency signal external interrupt */ | 764 | /* request the 0x1201 emergency signal external interrupt */ |
713 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 765 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
714 | panic("Couldn't request external interrupt 0x1201"); | 766 | panic("Couldn't request external interrupt 0x1201"); |
767 | /* request the 0x1202 external call external interrupt */ | ||
768 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) | ||
769 | panic("Couldn't request external interrupt 0x1202"); | ||
715 | 770 | ||
716 | /* Reallocate current lowcore, but keep its contents. */ | 771 | /* Reallocate current lowcore, but keep its contents. */ |
717 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 772 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index cf9e5c6d5527..47df775c844d 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -7,6 +7,8 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/pfn.h> | 9 | #include <linux/pfn.h> |
10 | #include <linux/suspend.h> | ||
11 | #include <linux/mm.h> | ||
10 | #include <asm/system.h> | 12 | #include <asm/system.h> |
11 | 13 | ||
12 | /* | 14 | /* |
@@ -14,6 +16,123 @@ | |||
14 | */ | 16 | */ |
15 | extern const void __nosave_begin, __nosave_end; | 17 | extern const void __nosave_begin, __nosave_end; |
16 | 18 | ||
19 | /* | ||
20 | * The restore of the saved pages in an hibernation image will set | ||
21 | * the change and referenced bits in the storage key for each page. | ||
22 | * Overindication of the referenced bits after an hibernation cycle | ||
23 | * does not cause any harm but the overindication of the change bits | ||
24 | * would cause trouble. | ||
25 | * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each | ||
26 | * page to the most significant byte of the associated page frame | ||
27 | * number in the hibernation image. | ||
28 | */ | ||
29 | |||
30 | /* | ||
31 | * Key storage is allocated as a linked list of pages. | ||
32 | * The size of the keys array is (PAGE_SIZE - sizeof(long)) | ||
33 | */ | ||
34 | struct page_key_data { | ||
35 | struct page_key_data *next; | ||
36 | unsigned char data[]; | ||
37 | }; | ||
38 | |||
39 | #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *)) | ||
40 | |||
41 | static struct page_key_data *page_key_data; | ||
42 | static struct page_key_data *page_key_rp, *page_key_wp; | ||
43 | static unsigned long page_key_rx, page_key_wx; | ||
44 | |||
45 | /* | ||
46 | * For each page in the hibernation image one additional byte is | ||
47 | * stored in the most significant byte of the page frame number. | ||
48 | * On suspend no additional memory is required but on resume the | ||
49 | * keys need to be memorized until the page data has been restored. | ||
50 | * Only then can the storage keys be set to their old state. | ||
51 | */ | ||
52 | unsigned long page_key_additional_pages(unsigned long pages) | ||
53 | { | ||
54 | return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * Free page_key_data list of arrays. | ||
59 | */ | ||
60 | void page_key_free(void) | ||
61 | { | ||
62 | struct page_key_data *pkd; | ||
63 | |||
64 | while (page_key_data) { | ||
65 | pkd = page_key_data; | ||
66 | page_key_data = pkd->next; | ||
67 | free_page((unsigned long) pkd); | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Allocate page_key_data list of arrays with enough room to store | ||
73 | * one byte for each page in the hibernation image. | ||
74 | */ | ||
75 | int page_key_alloc(unsigned long pages) | ||
76 | { | ||
77 | struct page_key_data *pk; | ||
78 | unsigned long size; | ||
79 | |||
80 | size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); | ||
81 | while (size--) { | ||
82 | pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL); | ||
83 | if (!pk) { | ||
84 | page_key_free(); | ||
85 | return -ENOMEM; | ||
86 | } | ||
87 | pk->next = page_key_data; | ||
88 | page_key_data = pk; | ||
89 | } | ||
90 | page_key_rp = page_key_wp = page_key_data; | ||
91 | page_key_rx = page_key_wx = 0; | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Save the storage key into the upper 8 bits of the page frame number. | ||
97 | */ | ||
98 | void page_key_read(unsigned long *pfn) | ||
99 | { | ||
100 | unsigned long addr; | ||
101 | |||
102 | addr = (unsigned long) page_address(pfn_to_page(*pfn)); | ||
103 | *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Extract the storage key from the upper 8 bits of the page frame number | ||
108 | * and store it in the page_key_data list of arrays. | ||
109 | */ | ||
110 | void page_key_memorize(unsigned long *pfn) | ||
111 | { | ||
112 | page_key_wp->data[page_key_wx] = *(unsigned char *) pfn; | ||
113 | *(unsigned char *) pfn = 0; | ||
114 | if (++page_key_wx < PAGE_KEY_DATA_SIZE) | ||
115 | return; | ||
116 | page_key_wp = page_key_wp->next; | ||
117 | page_key_wx = 0; | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Get the next key from the page_key_data list of arrays and set the | ||
122 | * storage key of the page referred by @address. If @address refers to | ||
123 | * a "safe" page the swsusp_arch_resume code will transfer the storage | ||
124 | * key from the buffer page to the original page. | ||
125 | */ | ||
126 | void page_key_write(void *address) | ||
127 | { | ||
128 | page_set_storage_key((unsigned long) address, | ||
129 | page_key_rp->data[page_key_rx], 0); | ||
130 | if (++page_key_rx >= PAGE_KEY_DATA_SIZE) | ||
131 | return; | ||
132 | page_key_rp = page_key_rp->next; | ||
133 | page_key_rx = 0; | ||
134 | } | ||
135 | |||
17 | int pfn_is_nosave(unsigned long pfn) | 136 | int pfn_is_nosave(unsigned long pfn) |
18 | { | 137 | { |
19 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); | 138 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 51bcdb50a230..acb78cdee896 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -136,11 +136,14 @@ ENTRY(swsusp_arch_resume) | |||
136 | 0: | 136 | 0: |
137 | lg %r2,8(%r1) | 137 | lg %r2,8(%r1) |
138 | lg %r4,0(%r1) | 138 | lg %r4,0(%r1) |
139 | iske %r0,%r4 | ||
139 | lghi %r3,PAGE_SIZE | 140 | lghi %r3,PAGE_SIZE |
140 | lghi %r5,PAGE_SIZE | 141 | lghi %r5,PAGE_SIZE |
141 | 1: | 142 | 1: |
142 | mvcle %r2,%r4,0 | 143 | mvcle %r2,%r4,0 |
143 | jo 1b | 144 | jo 1b |
145 | lg %r2,8(%r1) | ||
146 | sske %r0,%r2 | ||
144 | lg %r1,16(%r1) | 147 | lg %r1,16(%r1) |
145 | ltgr %r1,%r1 | 148 | ltgr %r1,%r1 |
146 | jnz 0b | 149 | jnz 0b |
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 5c9e439bf3f6..2a94b774695c 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c | |||
@@ -442,7 +442,7 @@ void s390_adjust_jiffies(void) | |||
442 | */ | 442 | */ |
443 | FP_UNPACK_SP(SA, &fmil); | 443 | FP_UNPACK_SP(SA, &fmil); |
444 | if ((info->capability >> 23) == 0) | 444 | if ((info->capability >> 23) == 0) |
445 | FP_FROM_INT_S(SB, info->capability, 32, int); | 445 | FP_FROM_INT_S(SB, (long) info->capability, 64, long); |
446 | else | 446 | else |
447 | FP_UNPACK_SP(SB, &info->capability); | 447 | FP_UNPACK_SP(SB, &info->capability); |
448 | FP_DIV_S(SR, SA, SB); | 448 | FP_DIV_S(SR, SA, SB); |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index dff933065ab6..ebbfab3c6e5a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <asm/timer.h> | 48 | #include <asm/timer.h> |
49 | #include <asm/etr.h> | 49 | #include <asm/etr.h> |
50 | #include <asm/cio.h> | 50 | #include <asm/cio.h> |
51 | #include "entry.h" | ||
51 | 52 | ||
52 | /* change this if you have some constant time drift */ | 53 | /* change this if you have some constant time drift */ |
53 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 54 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
@@ -109,10 +110,14 @@ static void fixup_clock_comparator(unsigned long long delta) | |||
109 | set_clock_comparator(S390_lowcore.clock_comparator); | 110 | set_clock_comparator(S390_lowcore.clock_comparator); |
110 | } | 111 | } |
111 | 112 | ||
112 | static int s390_next_event(unsigned long delta, | 113 | static int s390_next_ktime(ktime_t expires, |
113 | struct clock_event_device *evt) | 114 | struct clock_event_device *evt) |
114 | { | 115 | { |
115 | S390_lowcore.clock_comparator = get_clock() + delta; | 116 | u64 nsecs; |
117 | |||
118 | nsecs = ktime_to_ns(ktime_sub(expires, ktime_get_monotonic_offset())); | ||
119 | do_div(nsecs, 125); | ||
120 | S390_lowcore.clock_comparator = TOD_UNIX_EPOCH + (nsecs << 9); | ||
116 | set_clock_comparator(S390_lowcore.clock_comparator); | 121 | set_clock_comparator(S390_lowcore.clock_comparator); |
117 | return 0; | 122 | return 0; |
118 | } | 123 | } |
@@ -137,14 +142,15 @@ void init_cpu_timer(void) | |||
137 | cpu = smp_processor_id(); | 142 | cpu = smp_processor_id(); |
138 | cd = &per_cpu(comparators, cpu); | 143 | cd = &per_cpu(comparators, cpu); |
139 | cd->name = "comparator"; | 144 | cd->name = "comparator"; |
140 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | 145 | cd->features = CLOCK_EVT_FEAT_ONESHOT | |
146 | CLOCK_EVT_FEAT_KTIME; | ||
141 | cd->mult = 16777; | 147 | cd->mult = 16777; |
142 | cd->shift = 12; | 148 | cd->shift = 12; |
143 | cd->min_delta_ns = 1; | 149 | cd->min_delta_ns = 1; |
144 | cd->max_delta_ns = LONG_MAX; | 150 | cd->max_delta_ns = LONG_MAX; |
145 | cd->rating = 400; | 151 | cd->rating = 400; |
146 | cd->cpumask = cpumask_of(cpu); | 152 | cd->cpumask = cpumask_of(cpu); |
147 | cd->set_next_event = s390_next_event; | 153 | cd->set_next_ktime = s390_next_ktime; |
148 | cd->set_mode = s390_set_mode; | 154 | cd->set_mode = s390_set_mode; |
149 | 155 | ||
150 | clockevents_register_device(cd); | 156 | clockevents_register_device(cd); |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 0cd340b72632..77b8942b9a15 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -299,8 +299,8 @@ out: | |||
299 | } | 299 | } |
300 | __initcall(init_topology_update); | 300 | __initcall(init_topology_update); |
301 | 301 | ||
302 | static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask, | 302 | static void __init alloc_masks(struct sysinfo_15_1_x *info, |
303 | int offset) | 303 | struct mask_info *mask, int offset) |
304 | { | 304 | { |
305 | int i, nr_masks; | 305 | int i, nr_masks; |
306 | 306 | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index ffabcd9d3363..a9807dd86276 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -200,7 +200,7 @@ void show_registers(struct pt_regs *regs) | |||
200 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), | 200 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), |
201 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); | 201 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); |
202 | #ifdef CONFIG_64BIT | 202 | #ifdef CONFIG_64BIT |
203 | printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); | 203 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); |
204 | #endif | 204 | #endif |
205 | printk("\n%s GPRS: " FOURLONG, mode, | 205 | printk("\n%s GPRS: " FOURLONG, mode, |
206 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | 206 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); |
@@ -334,7 +334,8 @@ void __kprobes do_per_trap(struct pt_regs *regs) | |||
334 | info.si_signo = SIGTRAP; | 334 | info.si_signo = SIGTRAP; |
335 | info.si_errno = 0; | 335 | info.si_errno = 0; |
336 | info.si_code = TRAP_HWBKPT; | 336 | info.si_code = TRAP_HWBKPT; |
337 | info.si_addr = (void *) current->thread.per_event.address; | 337 | info.si_addr = |
338 | (void __force __user *) current->thread.per_event.address; | ||
338 | force_sig_info(SIGTRAP, &info, current); | 339 | force_sig_info(SIGTRAP, &info, current); |
339 | } | 340 | } |
340 | 341 | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 2d6228f60cd6..bb48977f5469 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -170,7 +170,8 @@ void __kprobes vtime_stop_cpu(void) | |||
170 | psw_t psw; | 170 | psw_t psw; |
171 | 171 | ||
172 | /* Wait for external, I/O or machine check interrupt. */ | 172 | /* Wait for external, I/O or machine check interrupt. */ |
173 | psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; | 173 | psw.mask = psw_kernel_bits | PSW_MASK_WAIT | |
174 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
174 | 175 | ||
175 | idle->nohz_delay = 0; | 176 | idle->nohz_delay = 0; |
176 | 177 | ||
@@ -183,7 +184,8 @@ void __kprobes vtime_stop_cpu(void) | |||
183 | * set_cpu_timer(VTIMER_MAX_SLICE); | 184 | * set_cpu_timer(VTIMER_MAX_SLICE); |
184 | * idle->idle_enter = get_clock(); | 185 | * idle->idle_enter = get_clock(); |
185 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 186 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
186 | * PSW_MASK_IO | PSW_MASK_EXT); | 187 | * PSW_MASK_DAT | PSW_MASK_IO | |
188 | * PSW_MASK_EXT | PSW_MASK_MCHECK); | ||
187 | * The difference is that the inline assembly makes sure that | 189 | * The difference is that the inline assembly makes sure that |
188 | * the last three instruction are stpt, stck and lpsw in that | 190 | * the last three instruction are stpt, stck and lpsw in that |
189 | * order. This is done to increase the precision. | 191 | * order. This is done to increase the precision. |
@@ -216,7 +218,8 @@ void __kprobes vtime_stop_cpu(void) | |||
216 | * vq->idle = get_cpu_timer(); | 218 | * vq->idle = get_cpu_timer(); |
217 | * idle->idle_enter = get_clock(); | 219 | * idle->idle_enter = get_clock(); |
218 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 220 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
219 | * PSW_MASK_IO | PSW_MASK_EXT); | 221 | * PSW_MASK_DAT | PSW_MASK_IO | |
222 | * PSW_MASK_EXT | PSW_MASK_MCHECK); | ||
220 | * The difference is that the inline assembly makes sure that | 223 | * The difference is that the inline assembly makes sure that |
221 | * the last three instruction are stpt, stck and lpsw in that | 224 | * the last three instruction are stpt, stck and lpsw in that |
222 | * order. This is done to increase the precision. | 225 | * order. This is done to increase the precision. |
@@ -458,7 +461,7 @@ void add_virt_timer_periodic(void *new) | |||
458 | } | 461 | } |
459 | EXPORT_SYMBOL(add_virt_timer_periodic); | 462 | EXPORT_SYMBOL(add_virt_timer_periodic); |
460 | 463 | ||
461 | int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) | 464 | static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) |
462 | { | 465 | { |
463 | struct vtimer_queue *vq; | 466 | struct vtimer_queue *vq; |
464 | unsigned long flags; | 467 | unsigned long flags; |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 9e4c84187cf5..87cedd61be04 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * diag.c - handling diagnose instructions | 2 | * diag.c - handling diagnose instructions |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008 | 4 | * Copyright IBM Corp. 2008,2011 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -15,6 +15,34 @@ | |||
15 | #include <linux/kvm_host.h> | 15 | #include <linux/kvm_host.h> |
16 | #include "kvm-s390.h" | 16 | #include "kvm-s390.h" |
17 | 17 | ||
18 | static int diag_release_pages(struct kvm_vcpu *vcpu) | ||
19 | { | ||
20 | unsigned long start, end; | ||
21 | unsigned long prefix = vcpu->arch.sie_block->prefix; | ||
22 | |||
23 | start = vcpu->arch.guest_gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; | ||
24 | end = vcpu->arch.guest_gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; | ||
25 | |||
26 | if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end | ||
27 | || start < 2 * PAGE_SIZE) | ||
28 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
29 | |||
30 | VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); | ||
31 | vcpu->stat.diagnose_10++; | ||
32 | |||
33 | /* we checked for start > end above */ | ||
34 | if (end < prefix || start >= prefix + 2 * PAGE_SIZE) { | ||
35 | gmap_discard(start, end, vcpu->arch.gmap); | ||
36 | } else { | ||
37 | if (start < prefix) | ||
38 | gmap_discard(start, prefix, vcpu->arch.gmap); | ||
39 | if (end >= prefix) | ||
40 | gmap_discard(prefix + 2 * PAGE_SIZE, | ||
41 | end, vcpu->arch.gmap); | ||
42 | } | ||
43 | return 0; | ||
44 | } | ||
45 | |||
18 | static int __diag_time_slice_end(struct kvm_vcpu *vcpu) | 46 | static int __diag_time_slice_end(struct kvm_vcpu *vcpu) |
19 | { | 47 | { |
20 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); | 48 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); |
@@ -57,6 +85,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) | |||
57 | int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; | 85 | int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; |
58 | 86 | ||
59 | switch (code) { | 87 | switch (code) { |
88 | case 0x10: | ||
89 | return diag_release_pages(vcpu); | ||
60 | case 0x44: | 90 | case 0x44: |
61 | return __diag_time_slice_end(vcpu); | 91 | return __diag_time_slice_end(vcpu); |
62 | case 0x308: | 92 | case 0x308: |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c9aeb4b4d0b8..87c16705b381 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -38,6 +38,11 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | |||
38 | struct kvm_s390_interrupt_info *inti) | 38 | struct kvm_s390_interrupt_info *inti) |
39 | { | 39 | { |
40 | switch (inti->type) { | 40 | switch (inti->type) { |
41 | case KVM_S390_INT_EXTERNAL_CALL: | ||
42 | if (psw_extint_disabled(vcpu)) | ||
43 | return 0; | ||
44 | if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) | ||
45 | return 1; | ||
41 | case KVM_S390_INT_EMERGENCY: | 46 | case KVM_S390_INT_EMERGENCY: |
42 | if (psw_extint_disabled(vcpu)) | 47 | if (psw_extint_disabled(vcpu)) |
43 | return 0; | 48 | return 0; |
@@ -98,6 +103,7 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | |||
98 | struct kvm_s390_interrupt_info *inti) | 103 | struct kvm_s390_interrupt_info *inti) |
99 | { | 104 | { |
100 | switch (inti->type) { | 105 | switch (inti->type) { |
106 | case KVM_S390_INT_EXTERNAL_CALL: | ||
101 | case KVM_S390_INT_EMERGENCY: | 107 | case KVM_S390_INT_EMERGENCY: |
102 | case KVM_S390_INT_SERVICE: | 108 | case KVM_S390_INT_SERVICE: |
103 | case KVM_S390_INT_VIRTIO: | 109 | case KVM_S390_INT_VIRTIO: |
@@ -143,6 +149,28 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
143 | exception = 1; | 149 | exception = 1; |
144 | break; | 150 | break; |
145 | 151 | ||
152 | case KVM_S390_INT_EXTERNAL_CALL: | ||
153 | VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); | ||
154 | vcpu->stat.deliver_external_call++; | ||
155 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); | ||
156 | if (rc == -EFAULT) | ||
157 | exception = 1; | ||
158 | |||
159 | rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->extcall.code); | ||
160 | if (rc == -EFAULT) | ||
161 | exception = 1; | ||
162 | |||
163 | rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, | ||
164 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
165 | if (rc == -EFAULT) | ||
166 | exception = 1; | ||
167 | |||
168 | rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, | ||
169 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | ||
170 | if (rc == -EFAULT) | ||
171 | exception = 1; | ||
172 | break; | ||
173 | |||
146 | case KVM_S390_INT_SERVICE: | 174 | case KVM_S390_INT_SERVICE: |
147 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", | 175 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", |
148 | inti->ext.ext_params); | 176 | inti->ext.ext_params); |
@@ -522,6 +550,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
522 | break; | 550 | break; |
523 | case KVM_S390_PROGRAM_INT: | 551 | case KVM_S390_PROGRAM_INT: |
524 | case KVM_S390_SIGP_STOP: | 552 | case KVM_S390_SIGP_STOP: |
553 | case KVM_S390_INT_EXTERNAL_CALL: | ||
525 | case KVM_S390_INT_EMERGENCY: | 554 | case KVM_S390_INT_EMERGENCY: |
526 | default: | 555 | default: |
527 | kfree(inti); | 556 | kfree(inti); |
@@ -581,6 +610,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
581 | break; | 610 | break; |
582 | case KVM_S390_SIGP_STOP: | 611 | case KVM_S390_SIGP_STOP: |
583 | case KVM_S390_RESTART: | 612 | case KVM_S390_RESTART: |
613 | case KVM_S390_INT_EXTERNAL_CALL: | ||
584 | case KVM_S390_INT_EMERGENCY: | 614 | case KVM_S390_INT_EMERGENCY: |
585 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); | 615 | VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); |
586 | inti->type = s390int->type; | 616 | inti->type = s390int->type; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index dc2b580e27bc..0bd3bea1e4cd 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -46,6 +46,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
46 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, | 46 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, |
47 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | 47 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
48 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, | 48 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, |
49 | { "deliver_external_call", VCPU_STAT(deliver_external_call) }, | ||
49 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, | 50 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, |
50 | { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, | 51 | { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, |
51 | { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, | 52 | { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, |
@@ -64,11 +65,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
64 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, | 65 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, |
65 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, | 66 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, |
66 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, | 67 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, |
68 | { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, | ||
67 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, | 69 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, |
68 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, | 70 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, |
69 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, | 71 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, |
70 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, | 72 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, |
71 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, | 73 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, |
74 | { "diagnose_10", VCPU_STAT(diagnose_10) }, | ||
72 | { "diagnose_44", VCPU_STAT(diagnose_44) }, | 75 | { "diagnose_44", VCPU_STAT(diagnose_44) }, |
73 | { NULL } | 76 | { NULL } |
74 | }; | 77 | }; |
@@ -175,6 +178,8 @@ int kvm_arch_init_vm(struct kvm *kvm) | |||
175 | if (rc) | 178 | if (rc) |
176 | goto out_err; | 179 | goto out_err; |
177 | 180 | ||
181 | rc = -ENOMEM; | ||
182 | |||
178 | kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); | 183 | kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); |
179 | if (!kvm->arch.sca) | 184 | if (!kvm->arch.sca) |
180 | goto out_err; | 185 | goto out_err; |
@@ -312,11 +317,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
312 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | 317 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
313 | unsigned int id) | 318 | unsigned int id) |
314 | { | 319 | { |
315 | struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | 320 | struct kvm_vcpu *vcpu; |
316 | int rc = -ENOMEM; | 321 | int rc = -EINVAL; |
322 | |||
323 | if (id >= KVM_MAX_VCPUS) | ||
324 | goto out; | ||
317 | 325 | ||
326 | rc = -ENOMEM; | ||
327 | |||
328 | vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | ||
318 | if (!vcpu) | 329 | if (!vcpu) |
319 | goto out_nomem; | 330 | goto out; |
320 | 331 | ||
321 | vcpu->arch.sie_block = (struct kvm_s390_sie_block *) | 332 | vcpu->arch.sie_block = (struct kvm_s390_sie_block *) |
322 | get_zeroed_page(GFP_KERNEL); | 333 | get_zeroed_page(GFP_KERNEL); |
@@ -352,7 +363,7 @@ out_free_sie_block: | |||
352 | free_page((unsigned long)(vcpu->arch.sie_block)); | 363 | free_page((unsigned long)(vcpu->arch.sie_block)); |
353 | out_free_cpu: | 364 | out_free_cpu: |
354 | kfree(vcpu); | 365 | kfree(vcpu); |
355 | out_nomem: | 366 | out: |
356 | return ERR_PTR(rc); | 367 | return ERR_PTR(rc); |
357 | } | 368 | } |
358 | 369 | ||
@@ -386,6 +397,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
386 | { | 397 | { |
387 | memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); | 398 | memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); |
388 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); | 399 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); |
400 | restore_access_regs(vcpu->arch.guest_acrs); | ||
389 | return 0; | 401 | return 0; |
390 | } | 402 | } |
391 | 403 | ||
@@ -401,6 +413,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |||
401 | { | 413 | { |
402 | memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); | 414 | memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); |
403 | vcpu->arch.guest_fpregs.fpc = fpu->fpc; | 415 | vcpu->arch.guest_fpregs.fpc = fpu->fpc; |
416 | restore_fp_regs(&vcpu->arch.guest_fpregs); | ||
404 | return 0; | 417 | return 0; |
405 | } | 418 | } |
406 | 419 | ||
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index d6a50c1fb2e6..f815118835f3 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -87,6 +87,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
87 | return -ENOMEM; | 87 | return -ENOMEM; |
88 | 88 | ||
89 | inti->type = KVM_S390_INT_EMERGENCY; | 89 | inti->type = KVM_S390_INT_EMERGENCY; |
90 | inti->emerg.code = vcpu->vcpu_id; | ||
90 | 91 | ||
91 | spin_lock(&fi->lock); | 92 | spin_lock(&fi->lock); |
92 | li = fi->local_int[cpu_addr]; | 93 | li = fi->local_int[cpu_addr]; |
@@ -103,9 +104,47 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
103 | wake_up_interruptible(&li->wq); | 104 | wake_up_interruptible(&li->wq); |
104 | spin_unlock_bh(&li->lock); | 105 | spin_unlock_bh(&li->lock); |
105 | rc = 0; /* order accepted */ | 106 | rc = 0; /* order accepted */ |
107 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); | ||
108 | unlock: | ||
109 | spin_unlock(&fi->lock); | ||
110 | return rc; | ||
111 | } | ||
112 | |||
113 | static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | ||
114 | { | ||
115 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
116 | struct kvm_s390_local_interrupt *li; | ||
117 | struct kvm_s390_interrupt_info *inti; | ||
118 | int rc; | ||
119 | |||
120 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
121 | return 3; /* not operational */ | ||
122 | |||
123 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | ||
124 | if (!inti) | ||
125 | return -ENOMEM; | ||
126 | |||
127 | inti->type = KVM_S390_INT_EXTERNAL_CALL; | ||
128 | inti->extcall.code = vcpu->vcpu_id; | ||
129 | |||
130 | spin_lock(&fi->lock); | ||
131 | li = fi->local_int[cpu_addr]; | ||
132 | if (li == NULL) { | ||
133 | rc = 3; /* not operational */ | ||
134 | kfree(inti); | ||
135 | goto unlock; | ||
136 | } | ||
137 | spin_lock_bh(&li->lock); | ||
138 | list_add_tail(&inti->list, &li->list); | ||
139 | atomic_set(&li->active, 1); | ||
140 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | ||
141 | if (waitqueue_active(&li->wq)) | ||
142 | wake_up_interruptible(&li->wq); | ||
143 | spin_unlock_bh(&li->lock); | ||
144 | rc = 0; /* order accepted */ | ||
145 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); | ||
106 | unlock: | 146 | unlock: |
107 | spin_unlock(&fi->lock); | 147 | spin_unlock(&fi->lock); |
108 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); | ||
109 | return rc; | 148 | return rc; |
110 | } | 149 | } |
111 | 150 | ||
@@ -267,6 +306,10 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
267 | rc = __sigp_sense(vcpu, cpu_addr, | 306 | rc = __sigp_sense(vcpu, cpu_addr, |
268 | &vcpu->arch.guest_gprs[r1]); | 307 | &vcpu->arch.guest_gprs[r1]); |
269 | break; | 308 | break; |
309 | case SIGP_EXTERNAL_CALL: | ||
310 | vcpu->stat.instruction_sigp_external_call++; | ||
311 | rc = __sigp_external_call(vcpu, cpu_addr); | ||
312 | break; | ||
270 | case SIGP_EMERGENCY: | 313 | case SIGP_EMERGENCY: |
271 | vcpu->stat.instruction_sigp_emergency++; | 314 | vcpu->stat.instruction_sigp_emergency++; |
272 | rc = __sigp_emergency(vcpu, cpu_addr); | 315 | rc = __sigp_emergency(vcpu, cpu_addr); |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index a65229d91c92..db92f044024c 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -32,7 +32,8 @@ static void __udelay_disabled(unsigned long long usecs) | |||
32 | u64 clock_saved; | 32 | u64 clock_saved; |
33 | u64 end; | 33 | u64 end; |
34 | 34 | ||
35 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | 35 | mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_WAIT | |
36 | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
36 | end = get_clock() + (usecs << 12); | 37 | end = get_clock() + (usecs << 12); |
37 | clock_saved = local_tick_disable(); | 38 | clock_saved = local_tick_disable(); |
38 | __ctl_store(cr0_saved, 0, 0); | 39 | __ctl_store(cr0_saved, 0, 0); |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 74833831417f..342ae35a5ba9 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -342,7 +342,8 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) | |||
342 | if (segment_eq(get_fs(), KERNEL_DS)) | 342 | if (segment_eq(get_fs(), KERNEL_DS)) |
343 | return __futex_atomic_op_pt(op, uaddr, oparg, old); | 343 | return __futex_atomic_op_pt(op, uaddr, oparg, old); |
344 | spin_lock(¤t->mm->page_table_lock); | 344 | spin_lock(¤t->mm->page_table_lock); |
345 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 345 | uaddr = (u32 __force __user *) |
346 | __dat_user_addr((__force unsigned long) uaddr); | ||
346 | if (!uaddr) { | 347 | if (!uaddr) { |
347 | spin_unlock(¤t->mm->page_table_lock); | 348 | spin_unlock(¤t->mm->page_table_lock); |
348 | return -EFAULT; | 349 | return -EFAULT; |
@@ -378,7 +379,8 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, | |||
378 | if (segment_eq(get_fs(), KERNEL_DS)) | 379 | if (segment_eq(get_fs(), KERNEL_DS)) |
379 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); | 380 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); |
380 | spin_lock(¤t->mm->page_table_lock); | 381 | spin_lock(¤t->mm->page_table_lock); |
381 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 382 | uaddr = (u32 __force __user *) |
383 | __dat_user_addr((__force unsigned long) uaddr); | ||
382 | if (!uaddr) { | 384 | if (!uaddr) { |
383 | spin_unlock(¤t->mm->page_table_lock); | 385 | spin_unlock(¤t->mm->page_table_lock); |
384 | return -EFAULT; | 386 | return -EFAULT; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 9564fc779b27..1766def5bc3f 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -307,7 +307,7 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
307 | 307 | ||
308 | #ifdef CONFIG_PGSTE | 308 | #ifdef CONFIG_PGSTE |
309 | if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { | 309 | if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { |
310 | address = gmap_fault(address, | 310 | address = __gmap_fault(address, |
311 | (struct gmap *) S390_lowcore.gmap); | 311 | (struct gmap *) S390_lowcore.gmap); |
312 | if (address == -EFAULT) { | 312 | if (address == -EFAULT) { |
313 | fault = VM_FAULT_BADMAP; | 313 | fault = VM_FAULT_BADMAP; |
@@ -393,7 +393,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, | |||
393 | int fault; | 393 | int fault; |
394 | 394 | ||
395 | /* Protection exception is suppressing, decrement psw address. */ | 395 | /* Protection exception is suppressing, decrement psw address. */ |
396 | regs->psw.addr -= (pgm_int_code >> 16); | 396 | regs->psw.addr = __rewind_psw(regs->psw, pgm_int_code >> 16); |
397 | /* | 397 | /* |
398 | * Check for low-address protection. This needs to be treated | 398 | * Check for low-address protection. This needs to be treated |
399 | * as a special case because the translation exception code | 399 | * as a special case because the translation exception code |
@@ -454,7 +454,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | |||
454 | struct pt_regs regs; | 454 | struct pt_regs regs; |
455 | int access, fault; | 455 | int access, fault; |
456 | 456 | ||
457 | regs.psw.mask = psw_kernel_bits; | 457 | regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; |
458 | if (!irqs_disabled()) | 458 | if (!irqs_disabled()) |
459 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; | 459 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; |
460 | regs.psw.addr = (unsigned long) __builtin_return_address(0); | 460 | regs.psw.addr = (unsigned long) __builtin_return_address(0); |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 45b405ca2567..65cb06e2af4e 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -52,7 +52,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
52 | unsigned long end, int write, struct page **pages, int *nr) | 52 | unsigned long end, int write, struct page **pages, int *nr) |
53 | { | 53 | { |
54 | unsigned long mask, result; | 54 | unsigned long mask, result; |
55 | struct page *head, *page; | 55 | struct page *head, *page, *tail; |
56 | int refs; | 56 | int refs; |
57 | 57 | ||
58 | result = write ? 0 : _SEGMENT_ENTRY_RO; | 58 | result = write ? 0 : _SEGMENT_ENTRY_RO; |
@@ -64,6 +64,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
64 | refs = 0; | 64 | refs = 0; |
65 | head = pmd_page(pmd); | 65 | head = pmd_page(pmd); |
66 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | 66 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
67 | tail = page; | ||
67 | do { | 68 | do { |
68 | VM_BUG_ON(compound_head(page) != head); | 69 | VM_BUG_ON(compound_head(page) != head); |
69 | pages[*nr] = page; | 70 | pages[*nr] = page; |
@@ -81,6 +82,17 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
81 | *nr -= refs; | 82 | *nr -= refs; |
82 | while (refs--) | 83 | while (refs--) |
83 | put_page(head); | 84 | put_page(head); |
85 | return 0; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Any tail page need their mapcount reference taken before we | ||
90 | * return. | ||
91 | */ | ||
92 | while (refs--) { | ||
93 | if (PageTail(tail)) | ||
94 | get_huge_page_tail(tail); | ||
95 | tail++; | ||
84 | } | 96 | } |
85 | 97 | ||
86 | return 1; | 98 | return 1; |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 59b663109d90..d4b9fb4d0042 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
27 | #include <linux/poison.h> | 27 | #include <linux/poison.h> |
28 | #include <linux/initrd.h> | 28 | #include <linux/initrd.h> |
29 | #include <linux/export.h> | ||
29 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
30 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
31 | #include <asm/system.h> | 32 | #include <asm/system.h> |
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 5dbbaa6e594c..1cb8427bedfb 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/gfp.h> | ||
14 | #include <asm/system.h> | 15 | #include <asm/system.h> |
15 | 16 | ||
16 | /* | 17 | /* |
@@ -60,6 +61,9 @@ long probe_kernel_write(void *dst, const void *src, size_t size) | |||
60 | return copied < 0 ? -EFAULT : 0; | 61 | return copied < 0 ? -EFAULT : 0; |
61 | } | 62 | } |
62 | 63 | ||
64 | /* | ||
65 | * Copy memory in real mode (kernel to kernel) | ||
66 | */ | ||
63 | int memcpy_real(void *dest, void *src, size_t count) | 67 | int memcpy_real(void *dest, void *src, size_t count) |
64 | { | 68 | { |
65 | register unsigned long _dest asm("2") = (unsigned long) dest; | 69 | register unsigned long _dest asm("2") = (unsigned long) dest; |
@@ -101,3 +105,55 @@ void copy_to_absolute_zero(void *dest, void *src, size_t count) | |||
101 | __ctl_load(cr0, 0, 0); | 105 | __ctl_load(cr0, 0, 0); |
102 | preempt_enable(); | 106 | preempt_enable(); |
103 | } | 107 | } |
108 | |||
109 | /* | ||
110 | * Copy memory from kernel (real) to user (virtual) | ||
111 | */ | ||
112 | int copy_to_user_real(void __user *dest, void *src, size_t count) | ||
113 | { | ||
114 | int offs = 0, size, rc; | ||
115 | char *buf; | ||
116 | |||
117 | buf = (char *) __get_free_page(GFP_KERNEL); | ||
118 | if (!buf) | ||
119 | return -ENOMEM; | ||
120 | rc = -EFAULT; | ||
121 | while (offs < count) { | ||
122 | size = min(PAGE_SIZE, count - offs); | ||
123 | if (memcpy_real(buf, src + offs, size)) | ||
124 | goto out; | ||
125 | if (copy_to_user(dest + offs, buf, size)) | ||
126 | goto out; | ||
127 | offs += size; | ||
128 | } | ||
129 | rc = 0; | ||
130 | out: | ||
131 | free_page((unsigned long) buf); | ||
132 | return rc; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Copy memory from user (virtual) to kernel (real) | ||
137 | */ | ||
138 | int copy_from_user_real(void *dest, void __user *src, size_t count) | ||
139 | { | ||
140 | int offs = 0, size, rc; | ||
141 | char *buf; | ||
142 | |||
143 | buf = (char *) __get_free_page(GFP_KERNEL); | ||
144 | if (!buf) | ||
145 | return -ENOMEM; | ||
146 | rc = -EFAULT; | ||
147 | while (offs < count) { | ||
148 | size = min(PAGE_SIZE, count - offs); | ||
149 | if (copy_from_user(buf, src + offs, size)) | ||
150 | goto out; | ||
151 | if (memcpy_real(dest + offs, buf, size)) | ||
152 | goto out; | ||
153 | offs += size; | ||
154 | } | ||
155 | rc = 0; | ||
156 | out: | ||
157 | free_page((unsigned long) buf); | ||
158 | return rc; | ||
159 | } | ||
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index c9a9f7f18188..f09c74881b7e 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/personality.h> | 27 | #include <linux/personality.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/mman.h> | ||
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/random.h> | 31 | #include <linux/random.h> |
31 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index d013ed39743b..b36537a5f43e 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/hugetlb.h> | 7 | #include <linux/hugetlb.h> |
8 | #include <asm/cacheflush.h> | ||
8 | #include <asm/pgtable.h> | 9 | #include <asm/pgtable.h> |
9 | 10 | ||
10 | static void change_page_attr(unsigned long addr, int numpages, | 11 | static void change_page_attr(unsigned long addr, int numpages, |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5d56c2b95b14..301c84d3b542 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2007,2009 | 2 | * Copyright IBM Corp. 2007,2011 |
3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -222,6 +222,7 @@ void gmap_free(struct gmap *gmap) | |||
222 | 222 | ||
223 | /* Free all segment & region tables. */ | 223 | /* Free all segment & region tables. */ |
224 | down_read(&gmap->mm->mmap_sem); | 224 | down_read(&gmap->mm->mmap_sem); |
225 | spin_lock(&gmap->mm->page_table_lock); | ||
225 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { | 226 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { |
226 | table = (unsigned long *) page_to_phys(page); | 227 | table = (unsigned long *) page_to_phys(page); |
227 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) | 228 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) |
@@ -230,6 +231,7 @@ void gmap_free(struct gmap *gmap) | |||
230 | gmap_unlink_segment(gmap, table); | 231 | gmap_unlink_segment(gmap, table); |
231 | __free_pages(page, ALLOC_ORDER); | 232 | __free_pages(page, ALLOC_ORDER); |
232 | } | 233 | } |
234 | spin_unlock(&gmap->mm->page_table_lock); | ||
233 | up_read(&gmap->mm->mmap_sem); | 235 | up_read(&gmap->mm->mmap_sem); |
234 | list_del(&gmap->list); | 236 | list_del(&gmap->list); |
235 | kfree(gmap); | 237 | kfree(gmap); |
@@ -256,6 +258,9 @@ void gmap_disable(struct gmap *gmap) | |||
256 | } | 258 | } |
257 | EXPORT_SYMBOL_GPL(gmap_disable); | 259 | EXPORT_SYMBOL_GPL(gmap_disable); |
258 | 260 | ||
261 | /* | ||
262 | * gmap_alloc_table is assumed to be called with mmap_sem held | ||
263 | */ | ||
259 | static int gmap_alloc_table(struct gmap *gmap, | 264 | static int gmap_alloc_table(struct gmap *gmap, |
260 | unsigned long *table, unsigned long init) | 265 | unsigned long *table, unsigned long init) |
261 | { | 266 | { |
@@ -267,14 +272,12 @@ static int gmap_alloc_table(struct gmap *gmap, | |||
267 | return -ENOMEM; | 272 | return -ENOMEM; |
268 | new = (unsigned long *) page_to_phys(page); | 273 | new = (unsigned long *) page_to_phys(page); |
269 | crst_table_init(new, init); | 274 | crst_table_init(new, init); |
270 | down_read(&gmap->mm->mmap_sem); | ||
271 | if (*table & _REGION_ENTRY_INV) { | 275 | if (*table & _REGION_ENTRY_INV) { |
272 | list_add(&page->lru, &gmap->crst_list); | 276 | list_add(&page->lru, &gmap->crst_list); |
273 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | | 277 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | |
274 | (*table & _REGION_ENTRY_TYPE_MASK); | 278 | (*table & _REGION_ENTRY_TYPE_MASK); |
275 | } else | 279 | } else |
276 | __free_pages(page, ALLOC_ORDER); | 280 | __free_pages(page, ALLOC_ORDER); |
277 | up_read(&gmap->mm->mmap_sem); | ||
278 | return 0; | 281 | return 0; |
279 | } | 282 | } |
280 | 283 | ||
@@ -299,6 +302,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
299 | 302 | ||
300 | flush = 0; | 303 | flush = 0; |
301 | down_read(&gmap->mm->mmap_sem); | 304 | down_read(&gmap->mm->mmap_sem); |
305 | spin_lock(&gmap->mm->page_table_lock); | ||
302 | for (off = 0; off < len; off += PMD_SIZE) { | 306 | for (off = 0; off < len; off += PMD_SIZE) { |
303 | /* Walk the guest addr space page table */ | 307 | /* Walk the guest addr space page table */ |
304 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 308 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
@@ -320,6 +324,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
320 | *table = _SEGMENT_ENTRY_INV; | 324 | *table = _SEGMENT_ENTRY_INV; |
321 | } | 325 | } |
322 | out: | 326 | out: |
327 | spin_unlock(&gmap->mm->page_table_lock); | ||
323 | up_read(&gmap->mm->mmap_sem); | 328 | up_read(&gmap->mm->mmap_sem); |
324 | if (flush) | 329 | if (flush) |
325 | gmap_flush_tlb(gmap); | 330 | gmap_flush_tlb(gmap); |
@@ -350,6 +355,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, | |||
350 | 355 | ||
351 | flush = 0; | 356 | flush = 0; |
352 | down_read(&gmap->mm->mmap_sem); | 357 | down_read(&gmap->mm->mmap_sem); |
358 | spin_lock(&gmap->mm->page_table_lock); | ||
353 | for (off = 0; off < len; off += PMD_SIZE) { | 359 | for (off = 0; off < len; off += PMD_SIZE) { |
354 | /* Walk the gmap address space page table */ | 360 | /* Walk the gmap address space page table */ |
355 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 361 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
@@ -373,19 +379,24 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, | |||
373 | flush |= gmap_unlink_segment(gmap, table); | 379 | flush |= gmap_unlink_segment(gmap, table); |
374 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); | 380 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); |
375 | } | 381 | } |
382 | spin_unlock(&gmap->mm->page_table_lock); | ||
376 | up_read(&gmap->mm->mmap_sem); | 383 | up_read(&gmap->mm->mmap_sem); |
377 | if (flush) | 384 | if (flush) |
378 | gmap_flush_tlb(gmap); | 385 | gmap_flush_tlb(gmap); |
379 | return 0; | 386 | return 0; |
380 | 387 | ||
381 | out_unmap: | 388 | out_unmap: |
389 | spin_unlock(&gmap->mm->page_table_lock); | ||
382 | up_read(&gmap->mm->mmap_sem); | 390 | up_read(&gmap->mm->mmap_sem); |
383 | gmap_unmap_segment(gmap, to, len); | 391 | gmap_unmap_segment(gmap, to, len); |
384 | return -ENOMEM; | 392 | return -ENOMEM; |
385 | } | 393 | } |
386 | EXPORT_SYMBOL_GPL(gmap_map_segment); | 394 | EXPORT_SYMBOL_GPL(gmap_map_segment); |
387 | 395 | ||
388 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | 396 | /* |
397 | * this function is assumed to be called with mmap_sem held | ||
398 | */ | ||
399 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | ||
389 | { | 400 | { |
390 | unsigned long *table, vmaddr, segment; | 401 | unsigned long *table, vmaddr, segment; |
391 | struct mm_struct *mm; | 402 | struct mm_struct *mm; |
@@ -445,16 +456,75 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | |||
445 | page = pmd_page(*pmd); | 456 | page = pmd_page(*pmd); |
446 | mp = (struct gmap_pgtable *) page->index; | 457 | mp = (struct gmap_pgtable *) page->index; |
447 | rmap->entry = table; | 458 | rmap->entry = table; |
459 | spin_lock(&mm->page_table_lock); | ||
448 | list_add(&rmap->list, &mp->mapper); | 460 | list_add(&rmap->list, &mp->mapper); |
461 | spin_unlock(&mm->page_table_lock); | ||
449 | /* Set gmap segment table entry to page table. */ | 462 | /* Set gmap segment table entry to page table. */ |
450 | *table = pmd_val(*pmd) & PAGE_MASK; | 463 | *table = pmd_val(*pmd) & PAGE_MASK; |
451 | return vmaddr | (address & ~PMD_MASK); | 464 | return vmaddr | (address & ~PMD_MASK); |
452 | } | 465 | } |
453 | return -EFAULT; | 466 | return -EFAULT; |
467 | } | ||
454 | 468 | ||
469 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | ||
470 | { | ||
471 | unsigned long rc; | ||
472 | |||
473 | down_read(&gmap->mm->mmap_sem); | ||
474 | rc = __gmap_fault(address, gmap); | ||
475 | up_read(&gmap->mm->mmap_sem); | ||
476 | |||
477 | return rc; | ||
455 | } | 478 | } |
456 | EXPORT_SYMBOL_GPL(gmap_fault); | 479 | EXPORT_SYMBOL_GPL(gmap_fault); |
457 | 480 | ||
481 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) | ||
482 | { | ||
483 | |||
484 | unsigned long *table, address, size; | ||
485 | struct vm_area_struct *vma; | ||
486 | struct gmap_pgtable *mp; | ||
487 | struct page *page; | ||
488 | |||
489 | down_read(&gmap->mm->mmap_sem); | ||
490 | address = from; | ||
491 | while (address < to) { | ||
492 | /* Walk the gmap address space page table */ | ||
493 | table = gmap->table + ((address >> 53) & 0x7ff); | ||
494 | if (unlikely(*table & _REGION_ENTRY_INV)) { | ||
495 | address = (address + PMD_SIZE) & PMD_MASK; | ||
496 | continue; | ||
497 | } | ||
498 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
499 | table = table + ((address >> 42) & 0x7ff); | ||
500 | if (unlikely(*table & _REGION_ENTRY_INV)) { | ||
501 | address = (address + PMD_SIZE) & PMD_MASK; | ||
502 | continue; | ||
503 | } | ||
504 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
505 | table = table + ((address >> 31) & 0x7ff); | ||
506 | if (unlikely(*table & _REGION_ENTRY_INV)) { | ||
507 | address = (address + PMD_SIZE) & PMD_MASK; | ||
508 | continue; | ||
509 | } | ||
510 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
511 | table = table + ((address >> 20) & 0x7ff); | ||
512 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) { | ||
513 | address = (address + PMD_SIZE) & PMD_MASK; | ||
514 | continue; | ||
515 | } | ||
516 | page = pfn_to_page(*table >> PAGE_SHIFT); | ||
517 | mp = (struct gmap_pgtable *) page->index; | ||
518 | vma = find_vma(gmap->mm, mp->vmaddr); | ||
519 | size = min(to - address, PMD_SIZE - (address & ~PMD_MASK)); | ||
520 | zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK), | ||
521 | size, NULL); | ||
522 | address = (address + PMD_SIZE) & PMD_MASK; | ||
523 | } | ||
524 | up_read(&gmap->mm->mmap_sem); | ||
525 | } | ||
526 | EXPORT_SYMBOL_GPL(gmap_discard); | ||
527 | |||
458 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) | 528 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) |
459 | { | 529 | { |
460 | struct gmap_rmap *rmap, *next; | 530 | struct gmap_rmap *rmap, *next; |
@@ -662,8 +732,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) | |||
662 | 732 | ||
663 | void __tlb_remove_table(void *_table) | 733 | void __tlb_remove_table(void *_table) |
664 | { | 734 | { |
665 | void *table = (void *)((unsigned long) _table & PAGE_MASK); | 735 | const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; |
666 | unsigned type = (unsigned long) _table & ~PAGE_MASK; | 736 | void *table = (void *)((unsigned long) _table & ~mask); |
737 | unsigned type = (unsigned long) _table & mask; | ||
667 | 738 | ||
668 | if (type) | 739 | if (type) |
669 | __page_table_free_rcu(table, type); | 740 | __page_table_free_rcu(table, type); |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 781ff5169560..4799383e2df9 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -335,6 +335,9 @@ void __init vmem_map_init(void) | |||
335 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; | 335 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; |
336 | ro_end = PFN_ALIGN((unsigned long)&_eshared); | 336 | ro_end = PFN_ALIGN((unsigned long)&_eshared); |
337 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 337 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
338 | if (memory_chunk[i].type == CHUNK_CRASHK || | ||
339 | memory_chunk[i].type == CHUNK_OLDMEM) | ||
340 | continue; | ||
338 | start = memory_chunk[i].addr; | 341 | start = memory_chunk[i].addr; |
339 | end = memory_chunk[i].addr + memory_chunk[i].size; | 342 | end = memory_chunk[i].addr + memory_chunk[i].size; |
340 | if (start >= ro_end || end <= ro_start) | 343 | if (start >= ro_end || end <= ro_start) |
@@ -368,6 +371,9 @@ static int __init vmem_convert_memory_chunk(void) | |||
368 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 371 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
369 | if (!memory_chunk[i].size) | 372 | if (!memory_chunk[i].size) |
370 | continue; | 373 | continue; |
374 | if (memory_chunk[i].type == CHUNK_CRASHK || | ||
375 | memory_chunk[i].type == CHUNK_OLDMEM) | ||
376 | continue; | ||
371 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | 377 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
372 | if (!seg) | 378 | if (!seg) |
373 | panic("Out of memory...\n"); | 379 | panic("Out of memory...\n"); |
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 4552ce40c81a..f43c0e4282af 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -994,7 +994,7 @@ allocate_error: | |||
994 | * | 994 | * |
995 | * Returns 0 on success, !0 on failure. | 995 | * Returns 0 on success, !0 on failure. |
996 | */ | 996 | */ |
997 | int hwsampler_deallocate() | 997 | int hwsampler_deallocate(void) |
998 | { | 998 | { |
999 | int rc; | 999 | int rc; |
1000 | 1000 | ||
@@ -1035,7 +1035,7 @@ unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) | |||
1035 | return cb->sample_overflow; | 1035 | return cb->sample_overflow; |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | int hwsampler_setup() | 1038 | int hwsampler_setup(void) |
1039 | { | 1039 | { |
1040 | int rc; | 1040 | int rc; |
1041 | int cpu; | 1041 | int cpu; |
@@ -1102,7 +1102,7 @@ setup_exit: | |||
1102 | return rc; | 1102 | return rc; |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | int hwsampler_shutdown() | 1105 | int hwsampler_shutdown(void) |
1106 | { | 1106 | { |
1107 | int rc; | 1107 | int rc; |
1108 | 1108 | ||
@@ -1203,7 +1203,7 @@ start_all_exit: | |||
1203 | * | 1203 | * |
1204 | * Returns 0 on success, !0 on failure. | 1204 | * Returns 0 on success, !0 on failure. |
1205 | */ | 1205 | */ |
1206 | int hwsampler_stop_all() | 1206 | int hwsampler_stop_all(void) |
1207 | { | 1207 | { |
1208 | int tmp_rc, rc, cpu; | 1208 | int tmp_rc, rc, cpu; |
1209 | struct hws_cpu_buffer *cb; | 1209 | struct hws_cpu_buffer *cb; |