diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-31 19:14:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-31 19:14:20 -0400 |
commit | 32087d4eeca14b82660dab288b1d659963b954bd (patch) | |
tree | 8c131ca9bf08f88d3b02e1937b795a42f8951d79 /arch/s390 | |
parent | b1c907f3b2675ecb01e340948fc62d6535ff5ac3 (diff) | |
parent | 07ea815b22b9f70ec8de6ddf8db63a1dd1585caf (diff) |
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (54 commits)
[S390] Remove error checking from copy_oldmem_page()
[S390] qdio: prevent dsci access without adapter interrupts
[S390] irqstats: split IPI interrupt accounting
[S390] add missing __tlb_flush_global() for !CONFIG_SMP
[S390] sparse: fix sparse symbol shadow warning
[S390] sparse: fix sparse NULL pointer warnings
[S390] sparse: fix sparse warnings with __user pointers
[S390] sparse: fix sparse warnings in math-emu
[S390] sparse: fix sparse warnings about missing prototypes
[S390] sparse: fix sparse ANSI-C warnings
[S390] sparse: fix sparse static warnings
[S390] sparse: fix access past end of array warnings
[S390] dasd: prevent path verification before resume
[S390] qdio: remove multicast polling
[S390] qdio: reset outbound SBAL error states
[S390] qdio: EQBS retry after CCQ 96
[S390] qdio: add timestamp for last queue scan time
[S390] Introduce get_clock_fast()
[S390] kvm: Handle diagnose 0x10 (release pages)
[S390] take mmap_sem when walking guest page table
...
Diffstat (limited to 'arch/s390')
69 files changed, 1786 insertions, 423 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 6b99fc3f9b63..a9fbd43395f7 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -569,6 +569,16 @@ config KEXEC | |||
569 | current kernel, and to start another kernel. It is like a reboot | 569 | current kernel, and to start another kernel. It is like a reboot |
570 | but is independent of hardware/microcode support. | 570 | but is independent of hardware/microcode support. |
571 | 571 | ||
572 | config CRASH_DUMP | ||
573 | bool "kernel crash dumps" | ||
574 | depends on 64BIT | ||
575 | help | ||
576 | Generate crash dump after being started by kexec. | ||
577 | Crash dump kernels are loaded in the main kernel with kexec-tools | ||
578 | into a specially reserved region and then later executed after | ||
579 | a crash by kdump/kexec. | ||
580 | For more details see Documentation/kdump/kdump.txt | ||
581 | |||
572 | config ZFCPDUMP | 582 | config ZFCPDUMP |
573 | def_bool n | 583 | def_bool n |
574 | prompt "zfcpdump support" | 584 | prompt "zfcpdump support" |
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 028f23ea81d1..465eca756feb 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -61,7 +61,7 @@ static unsigned long free_mem_end_ptr; | |||
61 | 61 | ||
62 | extern _sclp_print_early(const char *); | 62 | extern _sclp_print_early(const char *); |
63 | 63 | ||
64 | int puts(const char *s) | 64 | static int puts(const char *s) |
65 | { | 65 | { |
66 | _sclp_print_early(s); | 66 | _sclp_print_early(s); |
67 | return 0; | 67 | return 0; |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 29c82c640a88..6cf8e26b3137 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -68,7 +68,7 @@ CONFIG_NET_CLS_RSVP6=m | |||
68 | CONFIG_NET_CLS_ACT=y | 68 | CONFIG_NET_CLS_ACT=y |
69 | CONFIG_NET_ACT_POLICE=y | 69 | CONFIG_NET_ACT_POLICE=y |
70 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 70 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
71 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 71 | CONFIG_DEVTMPFS=y |
72 | CONFIG_BLK_DEV_LOOP=m | 72 | CONFIG_BLK_DEV_LOOP=m |
73 | CONFIG_BLK_DEV_NBD=m | 73 | CONFIG_BLK_DEV_NBD=m |
74 | CONFIG_BLK_DEV_RAM=y | 74 | CONFIG_BLK_DEV_RAM=y |
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 623f2fb71774..9381c92cc779 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
12 | #include <linux/mod_devicetable.h> | 12 | #include <linux/mod_devicetable.h> |
13 | #include <asm/fcx.h> | 13 | #include <asm/fcx.h> |
14 | #include <asm/irq.h> | ||
14 | 15 | ||
15 | /* structs from asm/cio.h */ | 16 | /* structs from asm/cio.h */ |
16 | struct irb; | 17 | struct irb; |
@@ -127,6 +128,7 @@ enum uc_todo { | |||
127 | * @restore: callback for restoring after hibernation | 128 | * @restore: callback for restoring after hibernation |
128 | * @uc_handler: callback for unit check handler | 129 | * @uc_handler: callback for unit check handler |
129 | * @driver: embedded device driver structure | 130 | * @driver: embedded device driver structure |
131 | * @int_class: interruption class to use for accounting interrupts | ||
130 | */ | 132 | */ |
131 | struct ccw_driver { | 133 | struct ccw_driver { |
132 | struct ccw_device_id *ids; | 134 | struct ccw_device_id *ids; |
@@ -144,6 +146,7 @@ struct ccw_driver { | |||
144 | int (*restore)(struct ccw_device *); | 146 | int (*restore)(struct ccw_device *); |
145 | enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); | 147 | enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); |
146 | struct device_driver driver; | 148 | struct device_driver driver; |
149 | enum interruption_class int_class; | ||
147 | }; | 150 | }; |
148 | 151 | ||
149 | extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, | 152 | extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, |
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index cdb9b78f6c08..2e49748b27da 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define PSW32_MASK_IO 0x02000000UL | 12 | #define PSW32_MASK_IO 0x02000000UL |
13 | #define PSW32_MASK_EXT 0x01000000UL | 13 | #define PSW32_MASK_EXT 0x01000000UL |
14 | #define PSW32_MASK_KEY 0x00F00000UL | 14 | #define PSW32_MASK_KEY 0x00F00000UL |
15 | #define PSW32_MASK_BASE 0x00080000UL /* Always one */ | ||
15 | #define PSW32_MASK_MCHECK 0x00040000UL | 16 | #define PSW32_MASK_MCHECK 0x00040000UL |
16 | #define PSW32_MASK_WAIT 0x00020000UL | 17 | #define PSW32_MASK_WAIT 0x00020000UL |
17 | #define PSW32_MASK_PSTATE 0x00010000UL | 18 | #define PSW32_MASK_PSTATE 0x00010000UL |
@@ -19,21 +20,19 @@ | |||
19 | #define PSW32_MASK_CC 0x00003000UL | 20 | #define PSW32_MASK_CC 0x00003000UL |
20 | #define PSW32_MASK_PM 0x00000f00UL | 21 | #define PSW32_MASK_PM 0x00000f00UL |
21 | 22 | ||
22 | #define PSW32_ADDR_AMODE31 0x80000000UL | 23 | #define PSW32_MASK_USER 0x00003F00UL |
24 | |||
25 | #define PSW32_ADDR_AMODE 0x80000000UL | ||
23 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL | 26 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL |
24 | 27 | ||
25 | #define PSW32_BASE_BITS 0x00080000UL | 28 | #define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20) |
26 | 29 | ||
27 | #define PSW32_ASC_PRIMARY 0x00000000UL | 30 | #define PSW32_ASC_PRIMARY 0x00000000UL |
28 | #define PSW32_ASC_ACCREG 0x00004000UL | 31 | #define PSW32_ASC_ACCREG 0x00004000UL |
29 | #define PSW32_ASC_SECONDARY 0x00008000UL | 32 | #define PSW32_ASC_SECONDARY 0x00008000UL |
30 | #define PSW32_ASC_HOME 0x0000C000UL | 33 | #define PSW32_ASC_HOME 0x0000C000UL |
31 | 34 | ||
32 | #define PSW32_MASK_MERGE(CURRENT,NEW) \ | 35 | extern u32 psw32_user_bits; |
33 | (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \ | ||
34 | ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM))) | ||
35 | |||
36 | extern long psw32_user_bits; | ||
37 | 36 | ||
38 | #define COMPAT_USER_HZ 100 | 37 | #define COMPAT_USER_HZ 100 |
39 | #define COMPAT_UTS_MACHINE "s390\0\0\0\0" | 38 | #define COMPAT_UTS_MACHINE "s390\0\0\0\0" |
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 97cc4403fabf..6940abfbe1d9 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h | |||
@@ -168,5 +168,6 @@ enum diag308_rc { | |||
168 | 168 | ||
169 | extern int diag308(unsigned long subcode, void *addr); | 169 | extern int diag308(unsigned long subcode, void *addr); |
170 | extern void diag308_reset(void); | 170 | extern void diag308_reset(void); |
171 | extern void store_status(void); | ||
171 | 172 | ||
172 | #endif /* _ASM_S390_IPL_H */ | 173 | #endif /* _ASM_S390_IPL_H */ |
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index ba7b01c726a3..ba6d85f88d50 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h | |||
@@ -8,7 +8,8 @@ enum interruption_class { | |||
8 | EXTERNAL_INTERRUPT, | 8 | EXTERNAL_INTERRUPT, |
9 | IO_INTERRUPT, | 9 | IO_INTERRUPT, |
10 | EXTINT_CLK, | 10 | EXTINT_CLK, |
11 | EXTINT_IPI, | 11 | EXTINT_EXC, |
12 | EXTINT_EMS, | ||
12 | EXTINT_TMR, | 13 | EXTINT_TMR, |
13 | EXTINT_TLA, | 14 | EXTINT_TLA, |
14 | EXTINT_PFL, | 15 | EXTINT_PFL, |
@@ -17,8 +18,8 @@ enum interruption_class { | |||
17 | EXTINT_SCP, | 18 | EXTINT_SCP, |
18 | EXTINT_IUC, | 19 | EXTINT_IUC, |
19 | EXTINT_CPM, | 20 | EXTINT_CPM, |
21 | IOINT_CIO, | ||
20 | IOINT_QAI, | 22 | IOINT_QAI, |
21 | IOINT_QDI, | ||
22 | IOINT_DAS, | 23 | IOINT_DAS, |
23 | IOINT_C15, | 24 | IOINT_C15, |
24 | IOINT_C70, | 25 | IOINT_C70, |
@@ -28,6 +29,7 @@ enum interruption_class { | |||
28 | IOINT_CLW, | 29 | IOINT_CLW, |
29 | IOINT_CTC, | 30 | IOINT_CTC, |
30 | IOINT_APB, | 31 | IOINT_APB, |
32 | IOINT_CSC, | ||
31 | NMI_NMI, | 33 | NMI_NMI, |
32 | NR_IRQS, | 34 | NR_IRQS, |
33 | }; | 35 | }; |
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index bb729b84a21e..cf4e47b0948c 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h | |||
@@ -30,9 +30,15 @@ | |||
30 | /* Not more than 2GB */ | 30 | /* Not more than 2GB */ |
31 | #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) | 31 | #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) |
32 | 32 | ||
33 | /* Maximum address we can use for the crash control pages */ | ||
34 | #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) | ||
35 | |||
33 | /* Allocate one page for the pdp and the second for the code */ | 36 | /* Allocate one page for the pdp and the second for the code */ |
34 | #define KEXEC_CONTROL_PAGE_SIZE 4096 | 37 | #define KEXEC_CONTROL_PAGE_SIZE 4096 |
35 | 38 | ||
39 | /* Alignment of crashkernel memory */ | ||
40 | #define KEXEC_CRASH_MEM_ALIGN HPAGE_SIZE | ||
41 | |||
36 | /* The native architecture */ | 42 | /* The native architecture */ |
37 | #define KEXEC_ARCH KEXEC_ARCH_S390 | 43 | #define KEXEC_ARCH KEXEC_ARCH_S390 |
38 | 44 | ||
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 1ca5de07ac36..24e18473d926 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -145,6 +145,7 @@ struct kvm_vcpu_stat { | |||
145 | u32 instruction_sigp_arch; | 145 | u32 instruction_sigp_arch; |
146 | u32 instruction_sigp_prefix; | 146 | u32 instruction_sigp_prefix; |
147 | u32 instruction_sigp_restart; | 147 | u32 instruction_sigp_restart; |
148 | u32 diagnose_10; | ||
148 | u32 diagnose_44; | 149 | u32 diagnose_44; |
149 | }; | 150 | }; |
150 | 151 | ||
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index e85c911aabf0..9e13c7d56cc1 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -151,10 +151,8 @@ struct _lowcore { | |||
151 | */ | 151 | */ |
152 | __u32 ipib; /* 0x0e00 */ | 152 | __u32 ipib; /* 0x0e00 */ |
153 | __u32 ipib_checksum; /* 0x0e04 */ | 153 | __u32 ipib_checksum; /* 0x0e04 */ |
154 | 154 | __u32 vmcore_info; /* 0x0e08 */ | |
155 | /* 64 bit save area */ | 155 | __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */ |
156 | __u64 save_area_64; /* 0x0e08 */ | ||
157 | __u8 pad_0x0e10[0x0f00-0x0e10]; /* 0x0e10 */ | ||
158 | 156 | ||
159 | /* Extended facility list */ | 157 | /* Extended facility list */ |
160 | __u64 stfle_fac_list[32]; /* 0x0f00 */ | 158 | __u64 stfle_fac_list[32]; /* 0x0f00 */ |
@@ -290,9 +288,7 @@ struct _lowcore { | |||
290 | */ | 288 | */ |
291 | __u64 ipib; /* 0x0e00 */ | 289 | __u64 ipib; /* 0x0e00 */ |
292 | __u32 ipib_checksum; /* 0x0e08 */ | 290 | __u32 ipib_checksum; /* 0x0e08 */ |
293 | 291 | __u64 vmcore_info; /* 0x0e0c */ | |
294 | /* 64 bit save area */ | ||
295 | __u64 save_area_64; /* 0x0e0c */ | ||
296 | __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ | 292 | __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ |
297 | 293 | ||
298 | /* Extended facility list */ | 294 | /* Extended facility list */ |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index accb372ddc7e..f7ec548c2b9d 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -177,6 +177,7 @@ static inline int page_test_and_clear_young(unsigned long pfn) | |||
177 | struct page; | 177 | struct page; |
178 | void arch_free_page(struct page *page, int order); | 178 | void arch_free_page(struct page *page, int order); |
179 | void arch_alloc_page(struct page *page, int order); | 179 | void arch_alloc_page(struct page *page, int order); |
180 | void arch_set_page_states(int make_stable); | ||
180 | 181 | ||
181 | static inline int devmem_is_allowed(unsigned long pfn) | 182 | static inline int devmem_is_allowed(unsigned long pfn) |
182 | { | 183 | { |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index c0cb794bb365..34ede0ea85a9 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -696,7 +696,9 @@ void gmap_disable(struct gmap *gmap); | |||
696 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | 696 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
697 | unsigned long to, unsigned long length); | 697 | unsigned long to, unsigned long length); |
698 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); | 698 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); |
699 | unsigned long __gmap_fault(unsigned long address, struct gmap *); | ||
699 | unsigned long gmap_fault(unsigned long address, struct gmap *); | 700 | unsigned long gmap_fault(unsigned long address, struct gmap *); |
701 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); | ||
700 | 702 | ||
701 | /* | 703 | /* |
702 | * Certain architectures need to do special things when PTEs | 704 | * Certain architectures need to do special things when PTEs |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index a4b6229e5d4b..5f33d37d032c 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -33,6 +33,8 @@ static inline void get_cpu_id(struct cpuid *ptr) | |||
33 | 33 | ||
34 | extern void s390_adjust_jiffies(void); | 34 | extern void s390_adjust_jiffies(void); |
35 | extern int get_cpu_capability(unsigned int *); | 35 | extern int get_cpu_capability(unsigned int *); |
36 | extern const struct seq_operations cpuinfo_op; | ||
37 | extern int sysctl_ieee_emulation_warnings; | ||
36 | 38 | ||
37 | /* | 39 | /* |
38 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. | 40 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. |
@@ -118,17 +120,17 @@ struct stack_frame { | |||
118 | /* | 120 | /* |
119 | * Do necessary setup to start up a new thread. | 121 | * Do necessary setup to start up a new thread. |
120 | */ | 122 | */ |
121 | #define start_thread(regs, new_psw, new_stackp) do { \ | 123 | #define start_thread(regs, new_psw, new_stackp) do { \ |
122 | regs->psw.mask = psw_user_bits; \ | 124 | regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \ |
123 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 125 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ |
124 | regs->gprs[15] = new_stackp; \ | 126 | regs->gprs[15] = new_stackp; \ |
125 | } while (0) | 127 | } while (0) |
126 | 128 | ||
127 | #define start_thread31(regs, new_psw, new_stackp) do { \ | 129 | #define start_thread31(regs, new_psw, new_stackp) do { \ |
128 | regs->psw.mask = psw_user32_bits; \ | 130 | regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ |
129 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 131 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ |
130 | regs->gprs[15] = new_stackp; \ | 132 | regs->gprs[15] = new_stackp; \ |
131 | crst_table_downgrade(current->mm, 1UL << 31); \ | 133 | crst_table_downgrade(current->mm, 1UL << 31); \ |
132 | } while (0) | 134 | } while (0) |
133 | 135 | ||
134 | /* Forward declaration, a strange C thing */ | 136 | /* Forward declaration, a strange C thing */ |
@@ -187,7 +189,6 @@ static inline void __load_psw(psw_t psw) | |||
187 | * Set PSW mask to specified value, while leaving the | 189 | * Set PSW mask to specified value, while leaving the |
188 | * PSW addr pointing to the next instruction. | 190 | * PSW addr pointing to the next instruction. |
189 | */ | 191 | */ |
190 | |||
191 | static inline void __load_psw_mask (unsigned long mask) | 192 | static inline void __load_psw_mask (unsigned long mask) |
192 | { | 193 | { |
193 | unsigned long addr; | 194 | unsigned long addr; |
@@ -212,26 +213,37 @@ static inline void __load_psw_mask (unsigned long mask) | |||
212 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); | 213 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
213 | #endif /* __s390x__ */ | 214 | #endif /* __s390x__ */ |
214 | } | 215 | } |
215 | 216 | ||
216 | /* | 217 | /* |
217 | * Function to stop a processor until an interruption occurred | 218 | * Rewind PSW instruction address by specified number of bytes. |
218 | */ | 219 | */ |
219 | static inline void enabled_wait(void) | 220 | static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) |
220 | { | 221 | { |
221 | __load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | | 222 | #ifndef __s390x__ |
222 | PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY); | 223 | if (psw.addr & PSW_ADDR_AMODE) |
223 | } | 224 | /* 31 bit mode */ |
225 | return (psw.addr - ilc) | PSW_ADDR_AMODE; | ||
226 | /* 24 bit mode */ | ||
227 | return (psw.addr - ilc) & ((1UL << 24) - 1); | ||
228 | #else | ||
229 | unsigned long mask; | ||
224 | 230 | ||
231 | mask = (psw.mask & PSW_MASK_EA) ? -1UL : | ||
232 | (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : | ||
233 | (1UL << 24) - 1; | ||
234 | return (psw.addr - ilc) & mask; | ||
235 | #endif | ||
236 | } | ||
237 | |||
225 | /* | 238 | /* |
226 | * Function to drop a processor into disabled wait state | 239 | * Function to drop a processor into disabled wait state |
227 | */ | 240 | */ |
228 | |||
229 | static inline void ATTRIB_NORET disabled_wait(unsigned long code) | 241 | static inline void ATTRIB_NORET disabled_wait(unsigned long code) |
230 | { | 242 | { |
231 | unsigned long ctl_buf; | 243 | unsigned long ctl_buf; |
232 | psw_t dw_psw; | 244 | psw_t dw_psw; |
233 | 245 | ||
234 | dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; | 246 | dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA; |
235 | dw_psw.addr = code; | 247 | dw_psw.addr = code; |
236 | /* | 248 | /* |
237 | * Store status and then load disabled wait psw, | 249 | * Store status and then load disabled wait psw, |
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 62fd80c9e98c..a65846340d51 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -230,17 +230,21 @@ typedef struct | |||
230 | #define PSW_MASK_IO 0x02000000UL | 230 | #define PSW_MASK_IO 0x02000000UL |
231 | #define PSW_MASK_EXT 0x01000000UL | 231 | #define PSW_MASK_EXT 0x01000000UL |
232 | #define PSW_MASK_KEY 0x00F00000UL | 232 | #define PSW_MASK_KEY 0x00F00000UL |
233 | #define PSW_MASK_BASE 0x00080000UL /* always one */ | ||
233 | #define PSW_MASK_MCHECK 0x00040000UL | 234 | #define PSW_MASK_MCHECK 0x00040000UL |
234 | #define PSW_MASK_WAIT 0x00020000UL | 235 | #define PSW_MASK_WAIT 0x00020000UL |
235 | #define PSW_MASK_PSTATE 0x00010000UL | 236 | #define PSW_MASK_PSTATE 0x00010000UL |
236 | #define PSW_MASK_ASC 0x0000C000UL | 237 | #define PSW_MASK_ASC 0x0000C000UL |
237 | #define PSW_MASK_CC 0x00003000UL | 238 | #define PSW_MASK_CC 0x00003000UL |
238 | #define PSW_MASK_PM 0x00000F00UL | 239 | #define PSW_MASK_PM 0x00000F00UL |
240 | #define PSW_MASK_EA 0x00000000UL | ||
241 | #define PSW_MASK_BA 0x00000000UL | ||
242 | |||
243 | #define PSW_MASK_USER 0x00003F00UL | ||
239 | 244 | ||
240 | #define PSW_ADDR_AMODE 0x80000000UL | 245 | #define PSW_ADDR_AMODE 0x80000000UL |
241 | #define PSW_ADDR_INSN 0x7FFFFFFFUL | 246 | #define PSW_ADDR_INSN 0x7FFFFFFFUL |
242 | 247 | ||
243 | #define PSW_BASE_BITS 0x00080000UL | ||
244 | #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20) | 248 | #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20) |
245 | 249 | ||
246 | #define PSW_ASC_PRIMARY 0x00000000UL | 250 | #define PSW_ASC_PRIMARY 0x00000000UL |
@@ -254,6 +258,7 @@ typedef struct | |||
254 | #define PSW_MASK_DAT 0x0400000000000000UL | 258 | #define PSW_MASK_DAT 0x0400000000000000UL |
255 | #define PSW_MASK_IO 0x0200000000000000UL | 259 | #define PSW_MASK_IO 0x0200000000000000UL |
256 | #define PSW_MASK_EXT 0x0100000000000000UL | 260 | #define PSW_MASK_EXT 0x0100000000000000UL |
261 | #define PSW_MASK_BASE 0x0000000000000000UL | ||
257 | #define PSW_MASK_KEY 0x00F0000000000000UL | 262 | #define PSW_MASK_KEY 0x00F0000000000000UL |
258 | #define PSW_MASK_MCHECK 0x0004000000000000UL | 263 | #define PSW_MASK_MCHECK 0x0004000000000000UL |
259 | #define PSW_MASK_WAIT 0x0002000000000000UL | 264 | #define PSW_MASK_WAIT 0x0002000000000000UL |
@@ -261,12 +266,14 @@ typedef struct | |||
261 | #define PSW_MASK_ASC 0x0000C00000000000UL | 266 | #define PSW_MASK_ASC 0x0000C00000000000UL |
262 | #define PSW_MASK_CC 0x0000300000000000UL | 267 | #define PSW_MASK_CC 0x0000300000000000UL |
263 | #define PSW_MASK_PM 0x00000F0000000000UL | 268 | #define PSW_MASK_PM 0x00000F0000000000UL |
269 | #define PSW_MASK_EA 0x0000000100000000UL | ||
270 | #define PSW_MASK_BA 0x0000000080000000UL | ||
271 | |||
272 | #define PSW_MASK_USER 0x00003F0180000000UL | ||
264 | 273 | ||
265 | #define PSW_ADDR_AMODE 0x0000000000000000UL | 274 | #define PSW_ADDR_AMODE 0x0000000000000000UL |
266 | #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL | 275 | #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL |
267 | 276 | ||
268 | #define PSW_BASE_BITS 0x0000000180000000UL | ||
269 | #define PSW_BASE32_BITS 0x0000000080000000UL | ||
270 | #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52) | 277 | #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52) |
271 | 278 | ||
272 | #define PSW_ASC_PRIMARY 0x0000000000000000UL | 279 | #define PSW_ASC_PRIMARY 0x0000000000000000UL |
@@ -279,18 +286,7 @@ typedef struct | |||
279 | #ifdef __KERNEL__ | 286 | #ifdef __KERNEL__ |
280 | extern long psw_kernel_bits; | 287 | extern long psw_kernel_bits; |
281 | extern long psw_user_bits; | 288 | extern long psw_user_bits; |
282 | #ifdef CONFIG_64BIT | ||
283 | extern long psw_user32_bits; | ||
284 | #endif | 289 | #endif |
285 | #endif | ||
286 | |||
287 | /* This macro merges a NEW PSW mask specified by the user into | ||
288 | the currently active PSW mask CURRENT, modifying only those | ||
289 | bits in CURRENT that the user may be allowed to change: this | ||
290 | is the condition code and the program mask bits. */ | ||
291 | #define PSW_MASK_MERGE(CURRENT,NEW) \ | ||
292 | (((CURRENT) & ~(PSW_MASK_CC|PSW_MASK_PM)) | \ | ||
293 | ((NEW) & (PSW_MASK_CC|PSW_MASK_PM))) | ||
294 | 290 | ||
295 | /* | 291 | /* |
296 | * The s390_regs structure is used to define the elf_gregset_t. | 292 | * The s390_regs structure is used to define the elf_gregset_t. |
@@ -328,8 +324,7 @@ struct pt_regs | |||
328 | psw_t psw; | 324 | psw_t psw; |
329 | unsigned long gprs[NUM_GPRS]; | 325 | unsigned long gprs[NUM_GPRS]; |
330 | unsigned long orig_gpr2; | 326 | unsigned long orig_gpr2; |
331 | unsigned short ilc; | 327 | unsigned int svc_code; |
332 | unsigned short svcnr; | ||
333 | }; | 328 | }; |
334 | 329 | ||
335 | /* | 330 | /* |
@@ -487,6 +482,8 @@ typedef struct | |||
487 | #define PTRACE_POKETEXT_AREA 0x5004 | 482 | #define PTRACE_POKETEXT_AREA 0x5004 |
488 | #define PTRACE_POKEDATA_AREA 0x5005 | 483 | #define PTRACE_POKEDATA_AREA 0x5005 |
489 | #define PTRACE_GET_LAST_BREAK 0x5006 | 484 | #define PTRACE_GET_LAST_BREAK 0x5006 |
485 | #define PTRACE_PEEK_SYSTEM_CALL 0x5007 | ||
486 | #define PTRACE_POKE_SYSTEM_CALL 0x5008 | ||
490 | 487 | ||
491 | /* | 488 | /* |
492 | * PT_PROT definition is loosely based on hppa bsd definition in | 489 | * PT_PROT definition is loosely based on hppa bsd definition in |
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h index f584f4a52581..3d6ad4ad2a3f 100644 --- a/arch/s390/include/asm/reset.h +++ b/arch/s390/include/asm/reset.h | |||
@@ -17,5 +17,5 @@ struct reset_call { | |||
17 | 17 | ||
18 | extern void register_reset_call(struct reset_call *reset); | 18 | extern void register_reset_call(struct reset_call *reset); |
19 | extern void unregister_reset_call(struct reset_call *reset); | 19 | extern void unregister_reset_call(struct reset_call *reset); |
20 | extern void s390_reset_system(void); | 20 | extern void s390_reset_system(void (*func)(void *), void *data); |
21 | #endif /* _ASM_S390_RESET_H */ | 21 | #endif /* _ASM_S390_RESET_H */ |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index d5e2ef10537d..5a099714df04 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -26,15 +26,21 @@ | |||
26 | #define IPL_DEVICE (*(unsigned long *) (0x10404)) | 26 | #define IPL_DEVICE (*(unsigned long *) (0x10404)) |
27 | #define INITRD_START (*(unsigned long *) (0x1040C)) | 27 | #define INITRD_START (*(unsigned long *) (0x1040C)) |
28 | #define INITRD_SIZE (*(unsigned long *) (0x10414)) | 28 | #define INITRD_SIZE (*(unsigned long *) (0x10414)) |
29 | #define OLDMEM_BASE (*(unsigned long *) (0x1041C)) | ||
30 | #define OLDMEM_SIZE (*(unsigned long *) (0x10424)) | ||
29 | #else /* __s390x__ */ | 31 | #else /* __s390x__ */ |
30 | #define IPL_DEVICE (*(unsigned long *) (0x10400)) | 32 | #define IPL_DEVICE (*(unsigned long *) (0x10400)) |
31 | #define INITRD_START (*(unsigned long *) (0x10408)) | 33 | #define INITRD_START (*(unsigned long *) (0x10408)) |
32 | #define INITRD_SIZE (*(unsigned long *) (0x10410)) | 34 | #define INITRD_SIZE (*(unsigned long *) (0x10410)) |
35 | #define OLDMEM_BASE (*(unsigned long *) (0x10418)) | ||
36 | #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) | ||
33 | #endif /* __s390x__ */ | 37 | #endif /* __s390x__ */ |
34 | #define COMMAND_LINE ((char *) (0x10480)) | 38 | #define COMMAND_LINE ((char *) (0x10480)) |
35 | 39 | ||
36 | #define CHUNK_READ_WRITE 0 | 40 | #define CHUNK_READ_WRITE 0 |
37 | #define CHUNK_READ_ONLY 1 | 41 | #define CHUNK_READ_ONLY 1 |
42 | #define CHUNK_OLDMEM 4 | ||
43 | #define CHUNK_CRASHK 5 | ||
38 | 44 | ||
39 | struct mem_chunk { | 45 | struct mem_chunk { |
40 | unsigned long addr; | 46 | unsigned long addr; |
@@ -48,6 +54,8 @@ extern int memory_end_set; | |||
48 | extern unsigned long memory_end; | 54 | extern unsigned long memory_end; |
49 | 55 | ||
50 | void detect_memory_layout(struct mem_chunk chunk[]); | 56 | void detect_memory_layout(struct mem_chunk chunk[]); |
57 | void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr, | ||
58 | unsigned long size, int type); | ||
51 | 59 | ||
52 | #define PRIMARY_SPACE_MODE 0 | 60 | #define PRIMARY_SPACE_MODE 0 |
53 | #define ACCESS_REGISTER_MODE 1 | 61 | #define ACCESS_REGISTER_MODE 1 |
@@ -106,6 +114,7 @@ extern unsigned int user_mode; | |||
106 | #endif /* __s390x__ */ | 114 | #endif /* __s390x__ */ |
107 | 115 | ||
108 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) | 116 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) |
117 | #define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20) | ||
109 | 118 | ||
110 | /* | 119 | /* |
111 | * Console mode. Override with conmode= | 120 | * Console mode. Override with conmode= |
@@ -134,10 +143,14 @@ extern char kernel_nss_name[]; | |||
134 | #define IPL_DEVICE 0x10404 | 143 | #define IPL_DEVICE 0x10404 |
135 | #define INITRD_START 0x1040C | 144 | #define INITRD_START 0x1040C |
136 | #define INITRD_SIZE 0x10414 | 145 | #define INITRD_SIZE 0x10414 |
146 | #define OLDMEM_BASE 0x1041C | ||
147 | #define OLDMEM_SIZE 0x10424 | ||
137 | #else /* __s390x__ */ | 148 | #else /* __s390x__ */ |
138 | #define IPL_DEVICE 0x10400 | 149 | #define IPL_DEVICE 0x10400 |
139 | #define INITRD_START 0x10408 | 150 | #define INITRD_START 0x10408 |
140 | #define INITRD_SIZE 0x10410 | 151 | #define INITRD_SIZE 0x10410 |
152 | #define OLDMEM_BASE 0x10418 | ||
153 | #define OLDMEM_SIZE 0x10420 | ||
141 | #endif /* __s390x__ */ | 154 | #endif /* __s390x__ */ |
142 | #define COMMAND_LINE 0x10480 | 155 | #define COMMAND_LINE 0x10480 |
143 | 156 | ||
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h index 0addc6466d95..ca3f8814e361 100644 --- a/arch/s390/include/asm/sfp-util.h +++ b/arch/s390/include/asm/sfp-util.h | |||
@@ -72,6 +72,6 @@ extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int, | |||
72 | 72 | ||
73 | #define UDIV_NEEDS_NORMALIZATION 0 | 73 | #define UDIV_NEEDS_NORMALIZATION 0 |
74 | 74 | ||
75 | #define abort() return 0 | 75 | #define abort() BUG() |
76 | 76 | ||
77 | #define __BYTE_ORDER __BIG_ENDIAN | 77 | #define __BYTE_ORDER __BIG_ENDIAN |
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 045e009fc164..ab47a69fdf07 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -33,6 +33,7 @@ extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; | |||
33 | extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *); | 33 | extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *); |
34 | extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, | 34 | extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, |
35 | int from, int to); | 35 | int from, int to); |
36 | extern void smp_restart_with_online_cpu(void); | ||
36 | extern void smp_restart_cpu(void); | 37 | extern void smp_restart_cpu(void); |
37 | 38 | ||
38 | /* | 39 | /* |
@@ -64,6 +65,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | |||
64 | func(data); | 65 | func(data); |
65 | } | 66 | } |
66 | 67 | ||
68 | static inline void smp_restart_with_online_cpu(void) | ||
69 | { | ||
70 | } | ||
71 | |||
67 | #define smp_vcpu_scheduled (1) | 72 | #define smp_vcpu_scheduled (1) |
68 | 73 | ||
69 | #endif /* CONFIG_SMP */ | 74 | #endif /* CONFIG_SMP */ |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 56612fc8186e..fd94dfec8d08 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -13,6 +13,8 @@ | |||
13 | 13 | ||
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | 15 | ||
16 | extern int spin_retry; | ||
17 | |||
16 | static inline int | 18 | static inline int |
17 | _raw_compare_and_swap(volatile unsigned int *lock, | 19 | _raw_compare_and_swap(volatile unsigned int *lock, |
18 | unsigned int old, unsigned int new) | 20 | unsigned int old, unsigned int new) |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 5c0246b955d8..b239ff53b189 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define _ASM_SYSCALL_H 1 | 13 | #define _ASM_SYSCALL_H 1 |
14 | 14 | ||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/err.h> | ||
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -25,7 +26,8 @@ extern const unsigned int sys_call_table[]; | |||
25 | static inline long syscall_get_nr(struct task_struct *task, | 26 | static inline long syscall_get_nr(struct task_struct *task, |
26 | struct pt_regs *regs) | 27 | struct pt_regs *regs) |
27 | { | 28 | { |
28 | return regs->svcnr ? regs->svcnr : -1; | 29 | return test_tsk_thread_flag(task, TIF_SYSCALL) ? |
30 | (regs->svc_code & 0xffff) : -1; | ||
29 | } | 31 | } |
30 | 32 | ||
31 | static inline void syscall_rollback(struct task_struct *task, | 33 | static inline void syscall_rollback(struct task_struct *task, |
@@ -37,7 +39,7 @@ static inline void syscall_rollback(struct task_struct *task, | |||
37 | static inline long syscall_get_error(struct task_struct *task, | 39 | static inline long syscall_get_error(struct task_struct *task, |
38 | struct pt_regs *regs) | 40 | struct pt_regs *regs) |
39 | { | 41 | { |
40 | return (regs->gprs[2] >= -4096UL) ? -regs->gprs[2] : 0; | 42 | return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; |
41 | } | 43 | } |
42 | 44 | ||
43 | static inline long syscall_get_return_value(struct task_struct *task, | 45 | static inline long syscall_get_return_value(struct task_struct *task, |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 6582f69f2389..ef573c1d71a7 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | struct task_struct; | 21 | struct task_struct; |
22 | 22 | ||
23 | extern int sysctl_userprocess_debug; | ||
24 | |||
23 | extern struct task_struct *__switch_to(void *, void *); | 25 | extern struct task_struct *__switch_to(void *, void *); |
24 | extern void update_per_regs(struct task_struct *task); | 26 | extern void update_per_regs(struct task_struct *task); |
25 | 27 | ||
@@ -114,6 +116,8 @@ extern void pfault_fini(void); | |||
114 | extern void cmma_init(void); | 116 | extern void cmma_init(void); |
115 | extern int memcpy_real(void *, void *, size_t); | 117 | extern int memcpy_real(void *, void *, size_t); |
116 | extern void copy_to_absolute_zero(void *dest, void *src, size_t count); | 118 | extern void copy_to_absolute_zero(void *dest, void *src, size_t count); |
119 | extern int copy_to_user_real(void __user *dest, void *src, size_t count); | ||
120 | extern int copy_from_user_real(void *dest, void __user *src, size_t count); | ||
117 | 121 | ||
118 | #define finish_arch_switch(prev) do { \ | 122 | #define finish_arch_switch(prev) do { \ |
119 | set_fs(current->thread.mm_segment); \ | 123 | set_fs(current->thread.mm_segment); \ |
@@ -210,8 +214,10 @@ __set_psw_mask(unsigned long mask) | |||
210 | __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); | 214 | __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); |
211 | } | 215 | } |
212 | 216 | ||
213 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) | 217 | #define local_mcck_enable() \ |
214 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | 218 | __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) |
219 | #define local_mcck_disable() \ | ||
220 | __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) | ||
215 | 221 | ||
216 | #ifdef CONFIG_SMP | 222 | #ifdef CONFIG_SMP |
217 | 223 | ||
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 1a5dbb6f1495..a23183423b14 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -48,6 +48,7 @@ struct thread_info { | |||
48 | unsigned int cpu; /* current CPU */ | 48 | unsigned int cpu; /* current CPU */ |
49 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 49 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
50 | struct restart_block restart_block; | 50 | struct restart_block restart_block; |
51 | unsigned int system_call; | ||
51 | __u64 user_timer; | 52 | __u64 user_timer; |
52 | __u64 system_timer; | 53 | __u64 system_timer; |
53 | unsigned long last_break; /* last breaking-event-address. */ | 54 | unsigned long last_break; /* last breaking-event-address. */ |
@@ -84,10 +85,10 @@ static inline struct thread_info *current_thread_info(void) | |||
84 | /* | 85 | /* |
85 | * thread information flags bit numbers | 86 | * thread information flags bit numbers |
86 | */ | 87 | */ |
88 | #define TIF_SYSCALL 0 /* inside a system call */ | ||
87 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ | 89 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ |
88 | #define TIF_SIGPENDING 2 /* signal pending */ | 90 | #define TIF_SIGPENDING 2 /* signal pending */ |
89 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 91 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
90 | #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ | ||
91 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ | 92 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ |
92 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ | 93 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ |
93 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 94 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
@@ -103,11 +104,11 @@ static inline struct thread_info *current_thread_info(void) | |||
103 | #define TIF_SINGLE_STEP 20 /* This task is single stepped */ | 104 | #define TIF_SINGLE_STEP 20 /* This task is single stepped */ |
104 | #define TIF_FREEZE 21 /* thread is freezing for suspend */ | 105 | #define TIF_FREEZE 21 /* thread is freezing for suspend */ |
105 | 106 | ||
107 | #define _TIF_SYSCALL (1<<TIF_SYSCALL) | ||
106 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 108 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
107 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 109 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
108 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 110 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
109 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 111 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
110 | #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) | ||
111 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) | 112 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) |
112 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) | 113 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) |
113 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 114 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -117,7 +118,7 @@ static inline struct thread_info *current_thread_info(void) | |||
117 | #define _TIF_SIE (1<<TIF_SIE) | 118 | #define _TIF_SIE (1<<TIF_SIE) |
118 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 119 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
119 | #define _TIF_31BIT (1<<TIF_31BIT) | 120 | #define _TIF_31BIT (1<<TIF_31BIT) |
120 | #define _TIF_SINGLE_STEP (1<<TIF_FREEZE) | 121 | #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) |
121 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 122 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
122 | 123 | ||
123 | #ifdef CONFIG_64BIT | 124 | #ifdef CONFIG_64BIT |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 88829a40af6f..d610bef9c5e9 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -86,6 +86,17 @@ static inline void get_clock_ext(char *clk) | |||
86 | asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); | 86 | asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline unsigned long long get_clock_fast(void) | ||
90 | { | ||
91 | unsigned long long clk; | ||
92 | |||
93 | if (test_facility(25)) | ||
94 | asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); | ||
95 | else | ||
96 | clk = get_clock(); | ||
97 | return clk; | ||
98 | } | ||
99 | |||
89 | static inline unsigned long long get_clock_xt(void) | 100 | static inline unsigned long long get_clock_xt(void) |
90 | { | 101 | { |
91 | unsigned char clk[16]; | 102 | unsigned char clk[16]; |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 304445382382..1d8648cf2fea 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -59,6 +59,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm) | |||
59 | } | 59 | } |
60 | #else | 60 | #else |
61 | #define __tlb_flush_full(mm) __tlb_flush_local() | 61 | #define __tlb_flush_full(mm) __tlb_flush_local() |
62 | #define __tlb_flush_global() __tlb_flush_local() | ||
62 | #endif | 63 | #endif |
63 | 64 | ||
64 | /* | 65 | /* |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index df3732249baa..dd4f07640919 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -48,6 +48,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) | |||
48 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 48 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
49 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 49 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
50 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | 50 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o |
51 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | ||
51 | 52 | ||
52 | # Kexec part | 53 | # Kexec part |
53 | S390_KEXEC_OBJS := machine_kexec.o crash.o | 54 | S390_KEXEC_OBJS := machine_kexec.o crash.o |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 2b45591e1582..751318765e2e 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -45,8 +45,7 @@ int main(void) | |||
45 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); | 45 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); |
46 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); | 46 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); |
47 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); | 47 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); |
48 | DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc)); | 48 | DEFINE(__PT_SVC_CODE, offsetof(struct pt_regs, svc_code)); |
49 | DEFINE(__PT_SVCNR, offsetof(struct pt_regs, svcnr)); | ||
50 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); | 49 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); |
51 | BLANK(); | 50 | BLANK(); |
52 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); | 51 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); |
@@ -141,7 +140,6 @@ int main(void) | |||
141 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); | 140 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); |
142 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); | 141 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); |
143 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); | 142 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); |
144 | DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64)); | ||
145 | #ifdef CONFIG_32BIT | 143 | #ifdef CONFIG_32BIT |
146 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | 144 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); |
147 | #else /* CONFIG_32BIT */ | 145 | #else /* CONFIG_32BIT */ |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 255435663bf8..f8828d38fa6e 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -86,6 +86,8 @@ s390_base_pgm_handler_fn: | |||
86 | ENTRY(diag308_reset) | 86 | ENTRY(diag308_reset) |
87 | larl %r4,.Lctlregs # Save control registers | 87 | larl %r4,.Lctlregs # Save control registers |
88 | stctg %c0,%c15,0(%r4) | 88 | stctg %c0,%c15,0(%r4) |
89 | larl %r4,.Lfpctl # Floating point control register | ||
90 | stfpc 0(%r4) | ||
89 | larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 | 91 | larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 |
90 | lghi %r3,0 | 92 | lghi %r3,0 |
91 | lg %r4,0(%r4) # Save PSW | 93 | lg %r4,0(%r4) # Save PSW |
@@ -99,6 +101,8 @@ ENTRY(diag308_reset) | |||
99 | sam64 # Switch to 64 bit addressing mode | 101 | sam64 # Switch to 64 bit addressing mode |
100 | larl %r4,.Lctlregs # Restore control registers | 102 | larl %r4,.Lctlregs # Restore control registers |
101 | lctlg %c0,%c15,0(%r4) | 103 | lctlg %c0,%c15,0(%r4) |
104 | larl %r4,.Lfpctl # Restore floating point ctl register | ||
105 | lfpc 0(%r4) | ||
102 | br %r14 | 106 | br %r14 |
103 | .align 16 | 107 | .align 16 |
104 | .Lrestart_psw: | 108 | .Lrestart_psw: |
@@ -110,6 +114,8 @@ ENTRY(diag308_reset) | |||
110 | .rept 16 | 114 | .rept 16 |
111 | .quad 0 | 115 | .quad 0 |
112 | .endr | 116 | .endr |
117 | .Lfpctl: | ||
118 | .long 0 | ||
113 | .previous | 119 | .previous |
114 | 120 | ||
115 | #else /* CONFIG_64BIT */ | 121 | #else /* CONFIG_64BIT */ |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 53acaa86dd94..84a982898448 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -60,12 +60,9 @@ | |||
60 | 60 | ||
61 | #include "compat_linux.h" | 61 | #include "compat_linux.h" |
62 | 62 | ||
63 | long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 63 | u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | |
64 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 64 | PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK | |
65 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | 65 | PSW32_MASK_PSTATE | PSW32_ASC_HOME; |
66 | long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | | ||
67 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
68 | PSW32_MASK_PSTATE); | ||
69 | 66 | ||
70 | /* For this source file, we want overflow handling. */ | 67 | /* For this source file, we want overflow handling. */ |
71 | 68 | ||
@@ -365,12 +362,7 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
365 | if (set) { | 362 | if (set) { |
366 | if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) | 363 | if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) |
367 | return -EFAULT; | 364 | return -EFAULT; |
368 | switch (_NSIG_WORDS) { | 365 | s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); |
369 | case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); | ||
370 | case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); | ||
371 | case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); | ||
372 | case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); | ||
373 | } | ||
374 | } | 366 | } |
375 | set_fs (KERNEL_DS); | 367 | set_fs (KERNEL_DS); |
376 | ret = sys_rt_sigprocmask(how, | 368 | ret = sys_rt_sigprocmask(how, |
@@ -380,12 +372,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
380 | set_fs (old_fs); | 372 | set_fs (old_fs); |
381 | if (ret) return ret; | 373 | if (ret) return ret; |
382 | if (oset) { | 374 | if (oset) { |
383 | switch (_NSIG_WORDS) { | 375 | s32.sig[1] = (s.sig[0] >> 32); |
384 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | 376 | s32.sig[0] = s.sig[0]; |
385 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
386 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
387 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
388 | } | ||
389 | if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) | 377 | if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) |
390 | return -EFAULT; | 378 | return -EFAULT; |
391 | } | 379 | } |
@@ -404,12 +392,8 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, | |||
404 | ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); | 392 | ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); |
405 | set_fs (old_fs); | 393 | set_fs (old_fs); |
406 | if (!ret) { | 394 | if (!ret) { |
407 | switch (_NSIG_WORDS) { | 395 | s32.sig[1] = (s.sig[0] >> 32); |
408 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | 396 | s32.sig[0] = s.sig[0]; |
409 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
410 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
411 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
412 | } | ||
413 | if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) | 397 | if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) |
414 | return -EFAULT; | 398 | return -EFAULT; |
415 | } | 399 | } |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index a9a285b8c4ad..4f68c81d3ffa 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -141,7 +141,8 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | |||
141 | break; | 141 | break; |
142 | case __SI_FAULT >> 16: | 142 | case __SI_FAULT >> 16: |
143 | err |= __get_user(tmp, &from->si_addr); | 143 | err |= __get_user(tmp, &from->si_addr); |
144 | to->si_addr = (void __user *)(u64) (tmp & PSW32_ADDR_INSN); | 144 | to->si_addr = (void __force __user *) |
145 | (u64) (tmp & PSW32_ADDR_INSN); | ||
145 | break; | 146 | break; |
146 | case __SI_POLL >> 16: | 147 | case __SI_POLL >> 16: |
147 | err |= __get_user(to->si_band, &from->si_band); | 148 | err |= __get_user(to->si_band, &from->si_band); |
@@ -213,16 +214,8 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | |||
213 | ret = get_user(sa_handler, &act->sa_handler); | 214 | ret = get_user(sa_handler, &act->sa_handler); |
214 | ret |= __copy_from_user(&set32, &act->sa_mask, | 215 | ret |= __copy_from_user(&set32, &act->sa_mask, |
215 | sizeof(compat_sigset_t)); | 216 | sizeof(compat_sigset_t)); |
216 | switch (_NSIG_WORDS) { | 217 | new_ka.sa.sa_mask.sig[0] = |
217 | case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | 218 | set32.sig[0] | (((long)set32.sig[1]) << 32); |
218 | | (((long)set32.sig[7]) << 32); | ||
219 | case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | ||
220 | | (((long)set32.sig[5]) << 32); | ||
221 | case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | ||
222 | | (((long)set32.sig[3]) << 32); | ||
223 | case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | ||
224 | | (((long)set32.sig[1]) << 32); | ||
225 | } | ||
226 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | 219 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); |
227 | 220 | ||
228 | if (ret) | 221 | if (ret) |
@@ -233,20 +226,8 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | |||
233 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | 226 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
234 | 227 | ||
235 | if (!ret && oact) { | 228 | if (!ret && oact) { |
236 | switch (_NSIG_WORDS) { | 229 | set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); |
237 | case 4: | 230 | set32.sig[0] = old_ka.sa.sa_mask.sig[0]; |
238 | set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); | ||
239 | set32.sig[6] = old_ka.sa.sa_mask.sig[3]; | ||
240 | case 3: | ||
241 | set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); | ||
242 | set32.sig[4] = old_ka.sa.sa_mask.sig[2]; | ||
243 | case 2: | ||
244 | set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); | ||
245 | set32.sig[2] = old_ka.sa.sa_mask.sig[1]; | ||
246 | case 1: | ||
247 | set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); | ||
248 | set32.sig[0] = old_ka.sa.sa_mask.sig[0]; | ||
249 | } | ||
250 | ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); | 231 | ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); |
251 | ret |= __copy_to_user(&oact->sa_mask, &set32, | 232 | ret |= __copy_to_user(&oact->sa_mask, &set32, |
252 | sizeof(compat_sigset_t)); | 233 | sizeof(compat_sigset_t)); |
@@ -300,9 +281,10 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) | |||
300 | _s390_regs_common32 regs32; | 281 | _s390_regs_common32 regs32; |
301 | int err, i; | 282 | int err, i; |
302 | 283 | ||
303 | regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits, | 284 | regs32.psw.mask = psw32_user_bits | |
304 | (__u32)(regs->psw.mask >> 32)); | 285 | ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER); |
305 | regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; | 286 | regs32.psw.addr = (__u32) regs->psw.addr | |
287 | (__u32)(regs->psw.mask & PSW_MASK_BA); | ||
306 | for (i = 0; i < NUM_GPRS; i++) | 288 | for (i = 0; i < NUM_GPRS; i++) |
307 | regs32.gprs[i] = (__u32) regs->gprs[i]; | 289 | regs32.gprs[i] = (__u32) regs->gprs[i]; |
308 | save_access_regs(current->thread.acrs); | 290 | save_access_regs(current->thread.acrs); |
@@ -327,8 +309,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | |||
327 | err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32)); | 309 | err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32)); |
328 | if (err) | 310 | if (err) |
329 | return err; | 311 | return err; |
330 | regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, | 312 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
331 | (__u64)regs32.psw.mask << 32); | 313 | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | |
314 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); | ||
332 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); | 315 | regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); |
333 | for (i = 0; i < NUM_GPRS; i++) | 316 | for (i = 0; i < NUM_GPRS; i++) |
334 | regs->gprs[i] = (__u64) regs32.gprs[i]; | 317 | regs->gprs[i] = (__u64) regs32.gprs[i]; |
@@ -342,7 +325,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | |||
342 | return err; | 325 | return err; |
343 | 326 | ||
344 | restore_fp_regs(¤t->thread.fp_regs); | 327 | restore_fp_regs(¤t->thread.fp_regs); |
345 | regs->svcnr = 0; /* disable syscall checks */ | 328 | clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ |
346 | return 0; | 329 | return 0; |
347 | } | 330 | } |
348 | 331 | ||
@@ -496,11 +479,11 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
496 | /* Set up to return from userspace. If provided, use a stub | 479 | /* Set up to return from userspace. If provided, use a stub |
497 | already in userspace. */ | 480 | already in userspace. */ |
498 | if (ka->sa.sa_flags & SA_RESTORER) { | 481 | if (ka->sa.sa_flags & SA_RESTORER) { |
499 | regs->gprs[14] = (__u64) ka->sa.sa_restorer; | 482 | regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; |
500 | } else { | 483 | } else { |
501 | regs->gprs[14] = (__u64) frame->retcode; | 484 | regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; |
502 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, | 485 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, |
503 | (u16 __user *)(frame->retcode))) | 486 | (u16 __force __user *)(frame->retcode))) |
504 | goto give_sigsegv; | 487 | goto give_sigsegv; |
505 | } | 488 | } |
506 | 489 | ||
@@ -509,11 +492,12 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
509 | goto give_sigsegv; | 492 | goto give_sigsegv; |
510 | 493 | ||
511 | /* Set up registers for signal handler */ | 494 | /* Set up registers for signal handler */ |
512 | regs->gprs[15] = (__u64) frame; | 495 | regs->gprs[15] = (__force __u64) frame; |
513 | regs->psw.addr = (__u64) ka->sa.sa_handler; | 496 | regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ |
497 | regs->psw.addr = (__force __u64) ka->sa.sa_handler; | ||
514 | 498 | ||
515 | regs->gprs[2] = map_signal(sig); | 499 | regs->gprs[2] = map_signal(sig); |
516 | regs->gprs[3] = (__u64) &frame->sc; | 500 | regs->gprs[3] = (__force __u64) &frame->sc; |
517 | 501 | ||
518 | /* We forgot to include these in the sigcontext. | 502 | /* We forgot to include these in the sigcontext. |
519 | To avoid breaking binary compatibility, they are passed as args. */ | 503 | To avoid breaking binary compatibility, they are passed as args. */ |
@@ -521,7 +505,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
521 | regs->gprs[5] = current->thread.prot_addr; | 505 | regs->gprs[5] = current->thread.prot_addr; |
522 | 506 | ||
523 | /* Place signal number on stack to allow backtrace from handler. */ | 507 | /* Place signal number on stack to allow backtrace from handler. */ |
524 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) | 508 | if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo)) |
525 | goto give_sigsegv; | 509 | goto give_sigsegv; |
526 | return 0; | 510 | return 0; |
527 | 511 | ||
@@ -564,20 +548,21 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
564 | } else { | 548 | } else { |
565 | regs->gprs[14] = (__u64) frame->retcode; | 549 | regs->gprs[14] = (__u64) frame->retcode; |
566 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, | 550 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, |
567 | (u16 __user *)(frame->retcode)); | 551 | (u16 __force __user *)(frame->retcode)); |
568 | } | 552 | } |
569 | 553 | ||
570 | /* Set up backchain. */ | 554 | /* Set up backchain. */ |
571 | if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) | 555 | if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) |
572 | goto give_sigsegv; | 556 | goto give_sigsegv; |
573 | 557 | ||
574 | /* Set up registers for signal handler */ | 558 | /* Set up registers for signal handler */ |
575 | regs->gprs[15] = (__u64) frame; | 559 | regs->gprs[15] = (__force __u64) frame; |
560 | regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ | ||
576 | regs->psw.addr = (__u64) ka->sa.sa_handler; | 561 | regs->psw.addr = (__u64) ka->sa.sa_handler; |
577 | 562 | ||
578 | regs->gprs[2] = map_signal(sig); | 563 | regs->gprs[2] = map_signal(sig); |
579 | regs->gprs[3] = (__u64) &frame->info; | 564 | regs->gprs[3] = (__force __u64) &frame->info; |
580 | regs->gprs[4] = (__u64) &frame->uc; | 565 | regs->gprs[4] = (__force __u64) &frame->uc; |
581 | return 0; | 566 | return 0; |
582 | 567 | ||
583 | give_sigsegv: | 568 | give_sigsegv: |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 7526db6bf501..5006a1d9f5d0 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -1623,8 +1623,7 @@ ENTRY(sys_syncfs_wrapper) | |||
1623 | lgfr %r2,%r2 # int | 1623 | lgfr %r2,%r2 # int |
1624 | jg sys_syncfs | 1624 | jg sys_syncfs |
1625 | 1625 | ||
1626 | .globl sys_setns_wrapper | 1626 | ENTRY(sys_setns_wrapper) |
1627 | sys_setns_wrapper: | ||
1628 | lgfr %r2,%r2 # int | 1627 | lgfr %r2,%r2 # int |
1629 | lgfr %r3,%r3 # int | 1628 | lgfr %r3,%r3 # int |
1630 | jg sys_setns | 1629 | jg sys_setns |
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c new file mode 100644 index 000000000000..39f8fd4438fc --- /dev/null +++ b/arch/s390/kernel/crash_dump.c | |||
@@ -0,0 +1,426 @@ | |||
1 | /* | ||
2 | * S390 kdump implementation | ||
3 | * | ||
4 | * Copyright IBM Corp. 2011 | ||
5 | * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/crash_dump.h> | ||
9 | #include <asm/lowcore.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/crash_dump.h> | ||
15 | #include <linux/bootmem.h> | ||
16 | #include <linux/elf.h> | ||
17 | #include <asm/ipl.h> | ||
18 | |||
19 | #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) | ||
20 | #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) | ||
21 | #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) | ||
22 | |||
23 | /* | ||
24 | * Copy one page from "oldmem" | ||
25 | * | ||
26 | * For the kdump reserved memory this functions performs a swap operation: | ||
27 | * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE]. | ||
28 | * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] | ||
29 | */ | ||
30 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||
31 | size_t csize, unsigned long offset, int userbuf) | ||
32 | { | ||
33 | unsigned long src; | ||
34 | |||
35 | if (!csize) | ||
36 | return 0; | ||
37 | |||
38 | src = (pfn << PAGE_SHIFT) + offset; | ||
39 | if (src < OLDMEM_SIZE) | ||
40 | src += OLDMEM_BASE; | ||
41 | else if (src > OLDMEM_BASE && | ||
42 | src < OLDMEM_BASE + OLDMEM_SIZE) | ||
43 | src -= OLDMEM_BASE; | ||
44 | if (userbuf) | ||
45 | copy_to_user_real((void __force __user *) buf, (void *) src, | ||
46 | csize); | ||
47 | else | ||
48 | memcpy_real(buf, (void *) src, csize); | ||
49 | return csize; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Copy memory from old kernel | ||
54 | */ | ||
55 | static int copy_from_oldmem(void *dest, void *src, size_t count) | ||
56 | { | ||
57 | unsigned long copied = 0; | ||
58 | int rc; | ||
59 | |||
60 | if ((unsigned long) src < OLDMEM_SIZE) { | ||
61 | copied = min(count, OLDMEM_SIZE - (unsigned long) src); | ||
62 | rc = memcpy_real(dest, src + OLDMEM_BASE, copied); | ||
63 | if (rc) | ||
64 | return rc; | ||
65 | } | ||
66 | return memcpy_real(dest + copied, src + copied, count - copied); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Alloc memory and panic in case of ENOMEM | ||
71 | */ | ||
72 | static void *kzalloc_panic(int len) | ||
73 | { | ||
74 | void *rc; | ||
75 | |||
76 | rc = kzalloc(len, GFP_KERNEL); | ||
77 | if (!rc) | ||
78 | panic("s390 kdump kzalloc (%d) failed", len); | ||
79 | return rc; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Get memory layout and create hole for oldmem | ||
84 | */ | ||
85 | static struct mem_chunk *get_memory_layout(void) | ||
86 | { | ||
87 | struct mem_chunk *chunk_array; | ||
88 | |||
89 | chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); | ||
90 | detect_memory_layout(chunk_array); | ||
91 | create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK); | ||
92 | return chunk_array; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Initialize ELF note | ||
97 | */ | ||
98 | static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, | ||
99 | const char *name) | ||
100 | { | ||
101 | Elf64_Nhdr *note; | ||
102 | u64 len; | ||
103 | |||
104 | note = (Elf64_Nhdr *)buf; | ||
105 | note->n_namesz = strlen(name) + 1; | ||
106 | note->n_descsz = d_len; | ||
107 | note->n_type = type; | ||
108 | len = sizeof(Elf64_Nhdr); | ||
109 | |||
110 | memcpy(buf + len, name, note->n_namesz); | ||
111 | len = roundup(len + note->n_namesz, 4); | ||
112 | |||
113 | memcpy(buf + len, desc, note->n_descsz); | ||
114 | len = roundup(len + note->n_descsz, 4); | ||
115 | |||
116 | return PTR_ADD(buf, len); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Initialize prstatus note | ||
121 | */ | ||
122 | static void *nt_prstatus(void *ptr, struct save_area *sa) | ||
123 | { | ||
124 | struct elf_prstatus nt_prstatus; | ||
125 | static int cpu_nr = 1; | ||
126 | |||
127 | memset(&nt_prstatus, 0, sizeof(nt_prstatus)); | ||
128 | memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs)); | ||
129 | memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw)); | ||
130 | memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs)); | ||
131 | nt_prstatus.pr_pid = cpu_nr; | ||
132 | cpu_nr++; | ||
133 | |||
134 | return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus), | ||
135 | "CORE"); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Initialize fpregset (floating point) note | ||
140 | */ | ||
141 | static void *nt_fpregset(void *ptr, struct save_area *sa) | ||
142 | { | ||
143 | elf_fpregset_t nt_fpregset; | ||
144 | |||
145 | memset(&nt_fpregset, 0, sizeof(nt_fpregset)); | ||
146 | memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg)); | ||
147 | memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs)); | ||
148 | |||
149 | return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset), | ||
150 | "CORE"); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Initialize timer note | ||
155 | */ | ||
156 | static void *nt_s390_timer(void *ptr, struct save_area *sa) | ||
157 | { | ||
158 | return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer), | ||
159 | KEXEC_CORE_NOTE_NAME); | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Initialize TOD clock comparator note | ||
164 | */ | ||
165 | static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa) | ||
166 | { | ||
167 | return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp, | ||
168 | sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Initialize TOD programmable register note | ||
173 | */ | ||
174 | static void *nt_s390_tod_preg(void *ptr, struct save_area *sa) | ||
175 | { | ||
176 | return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg, | ||
177 | sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Initialize control register note | ||
182 | */ | ||
183 | static void *nt_s390_ctrs(void *ptr, struct save_area *sa) | ||
184 | { | ||
185 | return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs, | ||
186 | sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Initialize prefix register note | ||
191 | */ | ||
192 | static void *nt_s390_prefix(void *ptr, struct save_area *sa) | ||
193 | { | ||
194 | return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg, | ||
195 | sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME); | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Fill ELF notes for one CPU with save area registers | ||
200 | */ | ||
201 | void *fill_cpu_elf_notes(void *ptr, struct save_area *sa) | ||
202 | { | ||
203 | ptr = nt_prstatus(ptr, sa); | ||
204 | ptr = nt_fpregset(ptr, sa); | ||
205 | ptr = nt_s390_timer(ptr, sa); | ||
206 | ptr = nt_s390_tod_cmp(ptr, sa); | ||
207 | ptr = nt_s390_tod_preg(ptr, sa); | ||
208 | ptr = nt_s390_ctrs(ptr, sa); | ||
209 | ptr = nt_s390_prefix(ptr, sa); | ||
210 | return ptr; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Initialize prpsinfo note (new kernel) | ||
215 | */ | ||
216 | static void *nt_prpsinfo(void *ptr) | ||
217 | { | ||
218 | struct elf_prpsinfo prpsinfo; | ||
219 | |||
220 | memset(&prpsinfo, 0, sizeof(prpsinfo)); | ||
221 | prpsinfo.pr_sname = 'R'; | ||
222 | strcpy(prpsinfo.pr_fname, "vmlinux"); | ||
223 | return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo), | ||
224 | KEXEC_CORE_NOTE_NAME); | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * Initialize vmcoreinfo note (new kernel) | ||
229 | */ | ||
230 | static void *nt_vmcoreinfo(void *ptr) | ||
231 | { | ||
232 | char nt_name[11], *vmcoreinfo; | ||
233 | Elf64_Nhdr note; | ||
234 | void *addr; | ||
235 | |||
236 | if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) | ||
237 | return ptr; | ||
238 | memset(nt_name, 0, sizeof(nt_name)); | ||
239 | if (copy_from_oldmem(¬e, addr, sizeof(note))) | ||
240 | return ptr; | ||
241 | if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1)) | ||
242 | return ptr; | ||
243 | if (strcmp(nt_name, "VMCOREINFO") != 0) | ||
244 | return ptr; | ||
245 | vmcoreinfo = kzalloc_panic(note.n_descsz + 1); | ||
246 | if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz)) | ||
247 | return ptr; | ||
248 | vmcoreinfo[note.n_descsz + 1] = 0; | ||
249 | return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO"); | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Initialize ELF header (new kernel) | ||
254 | */ | ||
255 | static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) | ||
256 | { | ||
257 | memset(ehdr, 0, sizeof(*ehdr)); | ||
258 | memcpy(ehdr->e_ident, ELFMAG, SELFMAG); | ||
259 | ehdr->e_ident[EI_CLASS] = ELFCLASS64; | ||
260 | ehdr->e_ident[EI_DATA] = ELFDATA2MSB; | ||
261 | ehdr->e_ident[EI_VERSION] = EV_CURRENT; | ||
262 | memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); | ||
263 | ehdr->e_type = ET_CORE; | ||
264 | ehdr->e_machine = EM_S390; | ||
265 | ehdr->e_version = EV_CURRENT; | ||
266 | ehdr->e_phoff = sizeof(Elf64_Ehdr); | ||
267 | ehdr->e_ehsize = sizeof(Elf64_Ehdr); | ||
268 | ehdr->e_phentsize = sizeof(Elf64_Phdr); | ||
269 | ehdr->e_phnum = mem_chunk_cnt + 1; | ||
270 | return ehdr + 1; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Return CPU count for ELF header (new kernel) | ||
275 | */ | ||
276 | static int get_cpu_cnt(void) | ||
277 | { | ||
278 | int i, cpus = 0; | ||
279 | |||
280 | for (i = 0; zfcpdump_save_areas[i]; i++) { | ||
281 | if (zfcpdump_save_areas[i]->pref_reg == 0) | ||
282 | continue; | ||
283 | cpus++; | ||
284 | } | ||
285 | return cpus; | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * Return memory chunk count for ELF header (new kernel) | ||
290 | */ | ||
291 | static int get_mem_chunk_cnt(void) | ||
292 | { | ||
293 | struct mem_chunk *chunk_array, *mem_chunk; | ||
294 | int i, cnt = 0; | ||
295 | |||
296 | chunk_array = get_memory_layout(); | ||
297 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
298 | mem_chunk = &chunk_array[i]; | ||
299 | if (chunk_array[i].type != CHUNK_READ_WRITE && | ||
300 | chunk_array[i].type != CHUNK_READ_ONLY) | ||
301 | continue; | ||
302 | if (mem_chunk->size == 0) | ||
303 | continue; | ||
304 | cnt++; | ||
305 | } | ||
306 | kfree(chunk_array); | ||
307 | return cnt; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Relocate pointer in order to allow vmcore code access the data | ||
312 | */ | ||
313 | static inline unsigned long relocate(unsigned long addr) | ||
314 | { | ||
315 | return OLDMEM_BASE + addr; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Initialize ELF loads (new kernel) | ||
320 | */ | ||
321 | static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) | ||
322 | { | ||
323 | struct mem_chunk *chunk_array, *mem_chunk; | ||
324 | int i; | ||
325 | |||
326 | chunk_array = get_memory_layout(); | ||
327 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
328 | mem_chunk = &chunk_array[i]; | ||
329 | if (mem_chunk->size == 0) | ||
330 | break; | ||
331 | if (chunk_array[i].type != CHUNK_READ_WRITE && | ||
332 | chunk_array[i].type != CHUNK_READ_ONLY) | ||
333 | continue; | ||
334 | else | ||
335 | phdr->p_filesz = mem_chunk->size; | ||
336 | phdr->p_type = PT_LOAD; | ||
337 | phdr->p_offset = mem_chunk->addr; | ||
338 | phdr->p_vaddr = mem_chunk->addr; | ||
339 | phdr->p_paddr = mem_chunk->addr; | ||
340 | phdr->p_memsz = mem_chunk->size; | ||
341 | phdr->p_flags = PF_R | PF_W | PF_X; | ||
342 | phdr->p_align = PAGE_SIZE; | ||
343 | phdr++; | ||
344 | } | ||
345 | kfree(chunk_array); | ||
346 | return i; | ||
347 | } | ||
348 | |||
349 | /* | ||
350 | * Initialize notes (new kernel) | ||
351 | */ | ||
352 | static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) | ||
353 | { | ||
354 | struct save_area *sa; | ||
355 | void *ptr_start = ptr; | ||
356 | int i; | ||
357 | |||
358 | ptr = nt_prpsinfo(ptr); | ||
359 | |||
360 | for (i = 0; zfcpdump_save_areas[i]; i++) { | ||
361 | sa = zfcpdump_save_areas[i]; | ||
362 | if (sa->pref_reg == 0) | ||
363 | continue; | ||
364 | ptr = fill_cpu_elf_notes(ptr, sa); | ||
365 | } | ||
366 | ptr = nt_vmcoreinfo(ptr); | ||
367 | memset(phdr, 0, sizeof(*phdr)); | ||
368 | phdr->p_type = PT_NOTE; | ||
369 | phdr->p_offset = relocate(notes_offset); | ||
370 | phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start); | ||
371 | phdr->p_memsz = phdr->p_filesz; | ||
372 | return ptr; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Create ELF core header (new kernel) | ||
377 | */ | ||
378 | static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz) | ||
379 | { | ||
380 | Elf64_Phdr *phdr_notes, *phdr_loads; | ||
381 | int mem_chunk_cnt; | ||
382 | void *ptr, *hdr; | ||
383 | u32 alloc_size; | ||
384 | u64 hdr_off; | ||
385 | |||
386 | mem_chunk_cnt = get_mem_chunk_cnt(); | ||
387 | |||
388 | alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + | ||
389 | mem_chunk_cnt * sizeof(Elf64_Phdr); | ||
390 | hdr = kzalloc_panic(alloc_size); | ||
391 | /* Init elf header */ | ||
392 | ptr = ehdr_init(hdr, mem_chunk_cnt); | ||
393 | /* Init program headers */ | ||
394 | phdr_notes = ptr; | ||
395 | ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr)); | ||
396 | phdr_loads = ptr; | ||
397 | ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt); | ||
398 | /* Init notes */ | ||
399 | hdr_off = PTR_DIFF(ptr, hdr); | ||
400 | ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off); | ||
401 | /* Init loads */ | ||
402 | hdr_off = PTR_DIFF(ptr, hdr); | ||
403 | loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off); | ||
404 | *elfcorebuf_sz = hdr_off; | ||
405 | *elfcorebuf = (void *) relocate((unsigned long) hdr); | ||
406 | BUG_ON(*elfcorebuf_sz > alloc_size); | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Create kdump ELF core header in new kernel, if it has not been passed via | ||
411 | * the "elfcorehdr" kernel parameter | ||
412 | */ | ||
413 | static int setup_kdump_elfcorehdr(void) | ||
414 | { | ||
415 | size_t elfcorebuf_sz; | ||
416 | char *elfcorebuf; | ||
417 | |||
418 | if (!OLDMEM_BASE || is_kdump_kernel()) | ||
419 | return -EINVAL; | ||
420 | s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz); | ||
421 | elfcorehdr_addr = (unsigned long long) elfcorebuf; | ||
422 | elfcorehdr_size = elfcorebuf_sz; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | subsys_initcall(setup_kdump_elfcorehdr); | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index f297456dba7a..37394b3413e2 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -252,7 +252,7 @@ static noinline __init void setup_lowcore_early(void) | |||
252 | { | 252 | { |
253 | psw_t psw; | 253 | psw_t psw; |
254 | 254 | ||
255 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 255 | psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; |
256 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; | 256 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; |
257 | S390_lowcore.external_new_psw = psw; | 257 | S390_lowcore.external_new_psw = psw; |
258 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 258 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 02ec8fe7d03f..b13157057e02 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -43,16 +43,15 @@ SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 | |||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 | 43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 |
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 | 44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 |
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | 45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 |
46 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | 46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE |
47 | SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR | ||
48 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | 47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE |
49 | 48 | ||
50 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 49 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
51 | _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) | 50 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
52 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 51 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
53 | _TIF_MCCK_PENDING) | 52 | _TIF_MCCK_PENDING) |
54 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 53 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
55 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) | 54 | _TIF_SYSCALL_TRACEPOINT) |
56 | 55 | ||
57 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 56 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
58 | STACK_SIZE = 1 << STACK_SHIFT | 57 | STACK_SIZE = 1 << STACK_SHIFT |
@@ -228,9 +227,10 @@ ENTRY(system_call) | |||
228 | sysc_saveall: | 227 | sysc_saveall: |
229 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 228 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
230 | CREATE_STACK_FRAME __LC_SAVE_AREA | 229 | CREATE_STACK_FRAME __LC_SAVE_AREA |
231 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
232 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
233 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 230 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
231 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
232 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
233 | oi __TI_flags+3(%r12),_TIF_SYSCALL | ||
234 | sysc_vtime: | 234 | sysc_vtime: |
235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
236 | sysc_stime: | 236 | sysc_stime: |
@@ -239,17 +239,17 @@ sysc_update: | |||
239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
240 | sysc_do_svc: | 240 | sysc_do_svc: |
241 | xr %r7,%r7 | 241 | xr %r7,%r7 |
242 | icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0 | 242 | icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0 |
243 | bnz BASED(sysc_nr_ok) # svc number > 0 | 243 | bnz BASED(sysc_nr_ok) # svc number > 0 |
244 | # svc 0: system call number in %r1 | 244 | # svc 0: system call number in %r1 |
245 | cl %r1,BASED(.Lnr_syscalls) | 245 | cl %r1,BASED(.Lnr_syscalls) |
246 | bnl BASED(sysc_nr_ok) | 246 | bnl BASED(sysc_nr_ok) |
247 | sth %r1,SP_SVCNR(%r15) | 247 | sth %r1,SP_SVC_CODE+2(%r15) |
248 | lr %r7,%r1 # copy svc number to %r7 | 248 | lr %r7,%r1 # copy svc number to %r7 |
249 | sysc_nr_ok: | 249 | sysc_nr_ok: |
250 | sll %r7,2 # svc number *4 | 250 | sll %r7,2 # svc number *4 |
251 | l %r10,BASED(.Lsysc_table) | 251 | l %r10,BASED(.Lsysc_table) |
252 | tm __TI_flags+2(%r12),_TIF_SYSCALL | 252 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
253 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | 253 | mvc SP_ARGS(4,%r15),SP_R7(%r15) |
254 | l %r8,0(%r7,%r10) # get system call addr. | 254 | l %r8,0(%r7,%r10) # get system call addr. |
255 | bnz BASED(sysc_tracesys) | 255 | bnz BASED(sysc_tracesys) |
@@ -259,23 +259,19 @@ sysc_nr_ok: | |||
259 | sysc_return: | 259 | sysc_return: |
260 | LOCKDEP_SYS_EXIT | 260 | LOCKDEP_SYS_EXIT |
261 | sysc_tif: | 261 | sysc_tif: |
262 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
263 | bno BASED(sysc_restore) | ||
262 | tm __TI_flags+3(%r12),_TIF_WORK_SVC | 264 | tm __TI_flags+3(%r12),_TIF_WORK_SVC |
263 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 265 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
266 | ni __TI_flags+3(%r12),255-_TIF_SYSCALL | ||
264 | sysc_restore: | 267 | sysc_restore: |
265 | RESTORE_ALL __LC_RETURN_PSW,1 | 268 | RESTORE_ALL __LC_RETURN_PSW,1 |
266 | sysc_done: | 269 | sysc_done: |
267 | 270 | ||
268 | # | 271 | # |
269 | # There is work to do, but first we need to check if we return to userspace. | ||
270 | # | ||
271 | sysc_work: | ||
272 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
273 | bno BASED(sysc_restore) | ||
274 | |||
275 | # | ||
276 | # One of the work bits is on. Find out which one. | 272 | # One of the work bits is on. Find out which one. |
277 | # | 273 | # |
278 | sysc_work_tif: | 274 | sysc_work: |
279 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 275 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING |
280 | bo BASED(sysc_mcck_pending) | 276 | bo BASED(sysc_mcck_pending) |
281 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 277 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
@@ -284,8 +280,6 @@ sysc_work_tif: | |||
284 | bo BASED(sysc_sigpending) | 280 | bo BASED(sysc_sigpending) |
285 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 281 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
286 | bo BASED(sysc_notify_resume) | 282 | bo BASED(sysc_notify_resume) |
287 | tm __TI_flags+3(%r12),_TIF_RESTART_SVC | ||
288 | bo BASED(sysc_restart) | ||
289 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 283 | tm __TI_flags+3(%r12),_TIF_PER_TRAP |
290 | bo BASED(sysc_singlestep) | 284 | bo BASED(sysc_singlestep) |
291 | b BASED(sysc_return) # beware of critical section cleanup | 285 | b BASED(sysc_return) # beware of critical section cleanup |
@@ -314,11 +308,14 @@ sysc_sigpending: | |||
314 | la %r2,SP_PTREGS(%r15) # load pt_regs | 308 | la %r2,SP_PTREGS(%r15) # load pt_regs |
315 | l %r1,BASED(.Ldo_signal) | 309 | l %r1,BASED(.Ldo_signal) |
316 | basr %r14,%r1 # call do_signal | 310 | basr %r14,%r1 # call do_signal |
317 | tm __TI_flags+3(%r12),_TIF_RESTART_SVC | 311 | tm __TI_flags+3(%r12),_TIF_SYSCALL |
318 | bo BASED(sysc_restart) | 312 | bno BASED(sysc_return) |
319 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 313 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
320 | bo BASED(sysc_singlestep) | 314 | xr %r7,%r7 # svc 0 returns -ENOSYS |
321 | b BASED(sysc_return) | 315 | clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2) |
316 | bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0 | ||
317 | icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number | ||
318 | b BASED(sysc_nr_ok) # restart svc | ||
322 | 319 | ||
323 | # | 320 | # |
324 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 321 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
@@ -329,24 +326,11 @@ sysc_notify_resume: | |||
329 | la %r14,BASED(sysc_return) | 326 | la %r14,BASED(sysc_return) |
330 | br %r1 # call do_notify_resume | 327 | br %r1 # call do_notify_resume |
331 | 328 | ||
332 | |||
333 | # | ||
334 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
335 | # | ||
336 | sysc_restart: | ||
337 | ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
338 | l %r7,SP_R2(%r15) # load new svc number | ||
339 | mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
340 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
341 | sth %r7,SP_SVCNR(%r15) | ||
342 | b BASED(sysc_nr_ok) # restart svc | ||
343 | |||
344 | # | 329 | # |
345 | # _TIF_PER_TRAP is set, call do_per_trap | 330 | # _TIF_PER_TRAP is set, call do_per_trap |
346 | # | 331 | # |
347 | sysc_singlestep: | 332 | sysc_singlestep: |
348 | ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 333 | ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
349 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
350 | la %r2,SP_PTREGS(%r15) # address of register-save area | 334 | la %r2,SP_PTREGS(%r15) # address of register-save area |
351 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 335 | l %r1,BASED(.Lhandle_per) # load adr. of per handler |
352 | la %r14,BASED(sysc_return) # load adr. of system return | 336 | la %r14,BASED(sysc_return) # load adr. of system return |
@@ -361,7 +345,7 @@ sysc_tracesys: | |||
361 | la %r2,SP_PTREGS(%r15) # load pt_regs | 345 | la %r2,SP_PTREGS(%r15) # load pt_regs |
362 | la %r3,0 | 346 | la %r3,0 |
363 | xr %r0,%r0 | 347 | xr %r0,%r0 |
364 | icm %r0,3,SP_SVCNR(%r15) | 348 | icm %r0,3,SP_SVC_CODE(%r15) |
365 | st %r0,SP_R2(%r15) | 349 | st %r0,SP_R2(%r15) |
366 | basr %r14,%r1 | 350 | basr %r14,%r1 |
367 | cl %r2,BASED(.Lnr_syscalls) | 351 | cl %r2,BASED(.Lnr_syscalls) |
@@ -376,7 +360,7 @@ sysc_tracego: | |||
376 | basr %r14,%r8 # call sys_xxx | 360 | basr %r14,%r8 # call sys_xxx |
377 | st %r2,SP_R2(%r15) # store return value | 361 | st %r2,SP_R2(%r15) # store return value |
378 | sysc_tracenogo: | 362 | sysc_tracenogo: |
379 | tm __TI_flags+2(%r12),_TIF_SYSCALL | 363 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 |
380 | bz BASED(sysc_return) | 364 | bz BASED(sysc_return) |
381 | l %r1,BASED(.Ltrace_exit) | 365 | l %r1,BASED(.Ltrace_exit) |
382 | la %r2,SP_PTREGS(%r15) # load pt_regs | 366 | la %r2,SP_PTREGS(%r15) # load pt_regs |
@@ -454,7 +438,6 @@ ENTRY(pgm_check_handler) | |||
454 | bnz BASED(pgm_per) # got per exception -> special case | 438 | bnz BASED(pgm_per) # got per exception -> special case |
455 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 439 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
456 | CREATE_STACK_FRAME __LC_SAVE_AREA | 440 | CREATE_STACK_FRAME __LC_SAVE_AREA |
457 | xc SP_ILC(4,%r15),SP_ILC(%r15) | ||
458 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW | 441 | mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW |
459 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 442 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
460 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 443 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -530,9 +513,10 @@ pgm_exit2: | |||
530 | pgm_svcper: | 513 | pgm_svcper: |
531 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 514 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
532 | CREATE_STACK_FRAME __LC_SAVE_AREA | 515 | CREATE_STACK_FRAME __LC_SAVE_AREA |
533 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
534 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
535 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 516 | l %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
517 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
518 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
519 | oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | ||
536 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 520 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
537 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 521 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
538 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 522 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
@@ -540,7 +524,6 @@ pgm_svcper: | |||
540 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | 524 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE |
541 | mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS | 525 | mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS |
542 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | 526 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID |
543 | oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | ||
544 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 527 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
545 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | 528 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
546 | b BASED(sysc_do_svc) | 529 | b BASED(sysc_do_svc) |
@@ -550,7 +533,6 @@ pgm_svcper: | |||
550 | # | 533 | # |
551 | kernel_per: | 534 | kernel_per: |
552 | REENABLE_IRQS | 535 | REENABLE_IRQS |
553 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) | ||
554 | la %r2,SP_PTREGS(%r15) # address of register-save area | 536 | la %r2,SP_PTREGS(%r15) # address of register-save area |
555 | l %r1,BASED(.Lhandle_per) # load adr. of per handler | 537 | l %r1,BASED(.Lhandle_per) # load adr. of per handler |
556 | basr %r14,%r1 # branch to do_single_step | 538 | basr %r14,%r1 # branch to do_single_step |
@@ -853,13 +835,13 @@ restart_go: | |||
853 | # PSW restart interrupt handler | 835 | # PSW restart interrupt handler |
854 | # | 836 | # |
855 | ENTRY(psw_restart_int_handler) | 837 | ENTRY(psw_restart_int_handler) |
856 | st %r15,__LC_SAVE_AREA_64(%r0) # save r15 | 838 | st %r15,__LC_SAVE_AREA+48(%r0) # save r15 |
857 | basr %r15,0 | 839 | basr %r15,0 |
858 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack | 840 | 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack |
859 | l %r15,0(%r15) | 841 | l %r15,0(%r15) |
860 | ahi %r15,-SP_SIZE # make room for pt_regs | 842 | ahi %r15,-SP_SIZE # make room for pt_regs |
861 | stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 843 | stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack |
862 | mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack | 844 | mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack |
863 | mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw | 845 | mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw |
864 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 846 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 |
865 | basr %r14,0 | 847 | basr %r14,0 |
@@ -965,9 +947,11 @@ cleanup_system_call: | |||
965 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 947 | s %r15,BASED(.Lc_spsize) # make room for registers & psw |
966 | st %r15,12(%r12) | 948 | st %r15,12(%r12) |
967 | CREATE_STACK_FRAME __LC_SAVE_AREA | 949 | CREATE_STACK_FRAME __LC_SAVE_AREA |
968 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
969 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
970 | mvc 0(4,%r12),__LC_THREAD_INFO | 950 | mvc 0(4,%r12),__LC_THREAD_INFO |
951 | l %r12,__LC_THREAD_INFO | ||
952 | mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW | ||
953 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
954 | oi __TI_flags+3(%r12),_TIF_SYSCALL | ||
971 | cleanup_vtime: | 955 | cleanup_vtime: |
972 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | 956 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) |
973 | bhe BASED(cleanup_stime) | 957 | bhe BASED(cleanup_stime) |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 66729eb7bbc5..ef8fb1d6e8d7 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
@@ -5,24 +5,33 @@ | |||
5 | #include <linux/signal.h> | 5 | #include <linux/signal.h> |
6 | #include <asm/ptrace.h> | 6 | #include <asm/ptrace.h> |
7 | 7 | ||
8 | |||
9 | extern void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long); | ||
10 | extern void *restart_stack; | ||
11 | |||
12 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); | ||
13 | asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); | ||
14 | |||
8 | void do_protection_exception(struct pt_regs *, long, unsigned long); | 15 | void do_protection_exception(struct pt_regs *, long, unsigned long); |
9 | void do_dat_exception(struct pt_regs *, long, unsigned long); | 16 | void do_dat_exception(struct pt_regs *, long, unsigned long); |
10 | void do_asce_exception(struct pt_regs *, long, unsigned long); | 17 | void do_asce_exception(struct pt_regs *, long, unsigned long); |
11 | 18 | ||
12 | extern int sysctl_userprocess_debug; | ||
13 | |||
14 | void do_per_trap(struct pt_regs *regs); | 19 | void do_per_trap(struct pt_regs *regs); |
15 | void syscall_trace(struct pt_regs *regs, int entryexit); | 20 | void syscall_trace(struct pt_regs *regs, int entryexit); |
16 | void kernel_stack_overflow(struct pt_regs * regs); | 21 | void kernel_stack_overflow(struct pt_regs * regs); |
17 | void do_signal(struct pt_regs *regs); | 22 | void do_signal(struct pt_regs *regs); |
18 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, | 23 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, |
19 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); | 24 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); |
25 | void do_notify_resume(struct pt_regs *regs); | ||
20 | 26 | ||
21 | void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); | 27 | void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); |
28 | void do_restart(void); | ||
22 | int __cpuinit start_secondary(void *cpuvoid); | 29 | int __cpuinit start_secondary(void *cpuvoid); |
23 | void __init startup_init(void); | 30 | void __init startup_init(void); |
24 | void die(const char * str, struct pt_regs * regs, long err); | 31 | void die(const char * str, struct pt_regs * regs, long err); |
25 | 32 | ||
33 | void __init time_init(void); | ||
34 | |||
26 | struct s390_mmap_arg_struct; | 35 | struct s390_mmap_arg_struct; |
27 | struct fadvise64_64_args; | 36 | struct fadvise64_64_args; |
28 | struct old_sigaction; | 37 | struct old_sigaction; |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 713da0760538..83a93747e2fd 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -43,19 +43,18 @@ SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 | |||
43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 | 43 | SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 |
44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 | 44 | SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 |
45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 | 45 | SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 |
46 | SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC | 46 | SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE |
47 | SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR | ||
48 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE | 47 | SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE |
49 | 48 | ||
50 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 49 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
51 | STACK_SIZE = 1 << STACK_SHIFT | 50 | STACK_SIZE = 1 << STACK_SHIFT |
52 | 51 | ||
53 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 52 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
54 | _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) | 53 | _TIF_MCCK_PENDING | _TIF_PER_TRAP ) |
55 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 54 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
56 | _TIF_MCCK_PENDING) | 55 | _TIF_MCCK_PENDING) |
57 | _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | 56 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
58 | _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) | 57 | _TIF_SYSCALL_TRACEPOINT) |
59 | _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) | 58 | _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) |
60 | 59 | ||
61 | #define BASED(name) name-system_call(%r13) | 60 | #define BASED(name) name-system_call(%r13) |
@@ -249,9 +248,10 @@ ENTRY(system_call) | |||
249 | sysc_saveall: | 248 | sysc_saveall: |
250 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 249 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
251 | CREATE_STACK_FRAME __LC_SAVE_AREA | 250 | CREATE_STACK_FRAME __LC_SAVE_AREA |
252 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
253 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
254 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 251 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
252 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
253 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
254 | oi __TI_flags+7(%r12),_TIF_SYSCALL | ||
255 | sysc_vtime: | 255 | sysc_vtime: |
256 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 256 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
257 | sysc_stime: | 257 | sysc_stime: |
@@ -260,14 +260,14 @@ sysc_update: | |||
260 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 260 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
261 | LAST_BREAK | 261 | LAST_BREAK |
262 | sysc_do_svc: | 262 | sysc_do_svc: |
263 | llgh %r7,SP_SVCNR(%r15) | 263 | llgh %r7,SP_SVC_CODE+2(%r15) |
264 | slag %r7,%r7,2 # shift and test for svc 0 | 264 | slag %r7,%r7,2 # shift and test for svc 0 |
265 | jnz sysc_nr_ok | 265 | jnz sysc_nr_ok |
266 | # svc 0: system call number in %r1 | 266 | # svc 0: system call number in %r1 |
267 | llgfr %r1,%r1 # clear high word in r1 | 267 | llgfr %r1,%r1 # clear high word in r1 |
268 | cghi %r1,NR_syscalls | 268 | cghi %r1,NR_syscalls |
269 | jnl sysc_nr_ok | 269 | jnl sysc_nr_ok |
270 | sth %r1,SP_SVCNR(%r15) | 270 | sth %r1,SP_SVC_CODE+2(%r15) |
271 | slag %r7,%r1,2 # shift and test for svc 0 | 271 | slag %r7,%r1,2 # shift and test for svc 0 |
272 | sysc_nr_ok: | 272 | sysc_nr_ok: |
273 | larl %r10,sys_call_table | 273 | larl %r10,sys_call_table |
@@ -277,7 +277,7 @@ sysc_nr_ok: | |||
277 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls | 277 | larl %r10,sys_call_table_emu # use 31 bit emulation system calls |
278 | sysc_noemu: | 278 | sysc_noemu: |
279 | #endif | 279 | #endif |
280 | tm __TI_flags+6(%r12),_TIF_SYSCALL | 280 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 |
281 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | 281 | mvc SP_ARGS(8,%r15),SP_R7(%r15) |
282 | lgf %r8,0(%r7,%r10) # load address of system call routine | 282 | lgf %r8,0(%r7,%r10) # load address of system call routine |
283 | jnz sysc_tracesys | 283 | jnz sysc_tracesys |
@@ -287,23 +287,19 @@ sysc_noemu: | |||
287 | sysc_return: | 287 | sysc_return: |
288 | LOCKDEP_SYS_EXIT | 288 | LOCKDEP_SYS_EXIT |
289 | sysc_tif: | 289 | sysc_tif: |
290 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
291 | jno sysc_restore | ||
290 | tm __TI_flags+7(%r12),_TIF_WORK_SVC | 292 | tm __TI_flags+7(%r12),_TIF_WORK_SVC |
291 | jnz sysc_work # there is work to do (signals etc.) | 293 | jnz sysc_work # there is work to do (signals etc.) |
294 | ni __TI_flags+7(%r12),255-_TIF_SYSCALL | ||
292 | sysc_restore: | 295 | sysc_restore: |
293 | RESTORE_ALL __LC_RETURN_PSW,1 | 296 | RESTORE_ALL __LC_RETURN_PSW,1 |
294 | sysc_done: | 297 | sysc_done: |
295 | 298 | ||
296 | # | 299 | # |
297 | # There is work to do, but first we need to check if we return to userspace. | ||
298 | # | ||
299 | sysc_work: | ||
300 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
301 | jno sysc_restore | ||
302 | |||
303 | # | ||
304 | # One of the work bits is on. Find out which one. | 300 | # One of the work bits is on. Find out which one. |
305 | # | 301 | # |
306 | sysc_work_tif: | 302 | sysc_work: |
307 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING | 303 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING |
308 | jo sysc_mcck_pending | 304 | jo sysc_mcck_pending |
309 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 305 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
@@ -312,8 +308,6 @@ sysc_work_tif: | |||
312 | jo sysc_sigpending | 308 | jo sysc_sigpending |
313 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 309 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
314 | jo sysc_notify_resume | 310 | jo sysc_notify_resume |
315 | tm __TI_flags+7(%r12),_TIF_RESTART_SVC | ||
316 | jo sysc_restart | ||
317 | tm __TI_flags+7(%r12),_TIF_PER_TRAP | 311 | tm __TI_flags+7(%r12),_TIF_PER_TRAP |
318 | jo sysc_singlestep | 312 | jo sysc_singlestep |
319 | j sysc_return # beware of critical section cleanup | 313 | j sysc_return # beware of critical section cleanup |
@@ -339,11 +333,15 @@ sysc_sigpending: | |||
339 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 333 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP |
340 | la %r2,SP_PTREGS(%r15) # load pt_regs | 334 | la %r2,SP_PTREGS(%r15) # load pt_regs |
341 | brasl %r14,do_signal # call do_signal | 335 | brasl %r14,do_signal # call do_signal |
342 | tm __TI_flags+7(%r12),_TIF_RESTART_SVC | 336 | tm __TI_flags+7(%r12),_TIF_SYSCALL |
343 | jo sysc_restart | 337 | jno sysc_return |
344 | tm __TI_flags+7(%r12),_TIF_PER_TRAP | 338 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
345 | jo sysc_singlestep | 339 | lghi %r7,0 # svc 0 returns -ENOSYS |
346 | j sysc_return | 340 | lh %r1,SP_SVC_CODE+2(%r15) # load new svc number |
341 | cghi %r1,NR_syscalls | ||
342 | jnl sysc_nr_ok # invalid svc number -> do svc 0 | ||
343 | slag %r7,%r1,2 | ||
344 | j sysc_nr_ok # restart svc | ||
347 | 345 | ||
348 | # | 346 | # |
349 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 347 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
@@ -354,23 +352,10 @@ sysc_notify_resume: | |||
354 | jg do_notify_resume # call do_notify_resume | 352 | jg do_notify_resume # call do_notify_resume |
355 | 353 | ||
356 | # | 354 | # |
357 | # _TIF_RESTART_SVC is set, set up registers and restart svc | ||
358 | # | ||
359 | sysc_restart: | ||
360 | ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC | ||
361 | lg %r7,SP_R2(%r15) # load new svc number | ||
362 | mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument | ||
363 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
364 | sth %r7,SP_SVCNR(%r15) | ||
365 | slag %r7,%r7,2 | ||
366 | j sysc_nr_ok # restart svc | ||
367 | |||
368 | # | ||
369 | # _TIF_PER_TRAP is set, call do_per_trap | 355 | # _TIF_PER_TRAP is set, call do_per_trap |
370 | # | 356 | # |
371 | sysc_singlestep: | 357 | sysc_singlestep: |
372 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP | 358 | ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) |
373 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
374 | la %r2,SP_PTREGS(%r15) # address of register-save area | 359 | la %r2,SP_PTREGS(%r15) # address of register-save area |
375 | larl %r14,sysc_return # load adr. of system return | 360 | larl %r14,sysc_return # load adr. of system return |
376 | jg do_per_trap | 361 | jg do_per_trap |
@@ -382,7 +367,7 @@ sysc_singlestep: | |||
382 | sysc_tracesys: | 367 | sysc_tracesys: |
383 | la %r2,SP_PTREGS(%r15) # load pt_regs | 368 | la %r2,SP_PTREGS(%r15) # load pt_regs |
384 | la %r3,0 | 369 | la %r3,0 |
385 | llgh %r0,SP_SVCNR(%r15) | 370 | llgh %r0,SP_SVC_CODE+2(%r15) |
386 | stg %r0,SP_R2(%r15) | 371 | stg %r0,SP_R2(%r15) |
387 | brasl %r14,do_syscall_trace_enter | 372 | brasl %r14,do_syscall_trace_enter |
388 | lghi %r0,NR_syscalls | 373 | lghi %r0,NR_syscalls |
@@ -397,7 +382,7 @@ sysc_tracego: | |||
397 | basr %r14,%r8 # call sys_xxx | 382 | basr %r14,%r8 # call sys_xxx |
398 | stg %r2,SP_R2(%r15) # store return value | 383 | stg %r2,SP_R2(%r15) # store return value |
399 | sysc_tracenogo: | 384 | sysc_tracenogo: |
400 | tm __TI_flags+6(%r12),_TIF_SYSCALL | 385 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 |
401 | jz sysc_return | 386 | jz sysc_return |
402 | la %r2,SP_PTREGS(%r15) # load pt_regs | 387 | la %r2,SP_PTREGS(%r15) # load pt_regs |
403 | larl %r14,sysc_return # return point is sysc_return | 388 | larl %r14,sysc_return # return point is sysc_return |
@@ -470,7 +455,6 @@ ENTRY(pgm_check_handler) | |||
470 | jnz pgm_per # got per exception -> special case | 455 | jnz pgm_per # got per exception -> special case |
471 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 456 | SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
472 | CREATE_STACK_FRAME __LC_SAVE_AREA | 457 | CREATE_STACK_FRAME __LC_SAVE_AREA |
473 | xc SP_ILC(4,%r15),SP_ILC(%r15) | ||
474 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW | 458 | mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW |
475 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 459 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
476 | HANDLE_SIE_INTERCEPT | 460 | HANDLE_SIE_INTERCEPT |
@@ -550,9 +534,10 @@ pgm_exit2: | |||
550 | pgm_svcper: | 534 | pgm_svcper: |
551 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 535 | SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
552 | CREATE_STACK_FRAME __LC_SAVE_AREA | 536 | CREATE_STACK_FRAME __LC_SAVE_AREA |
553 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
554 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
555 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 537 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
538 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
539 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
540 | oi __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) | ||
556 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 541 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
557 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 542 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
558 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 543 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
@@ -561,7 +546,6 @@ pgm_svcper: | |||
561 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE | 546 | mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE |
562 | mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS | 547 | mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS |
563 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID | 548 | mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID |
564 | oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP | ||
565 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 549 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
566 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | 550 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
567 | j sysc_do_svc | 551 | j sysc_do_svc |
@@ -571,7 +555,6 @@ pgm_svcper: | |||
571 | # | 555 | # |
572 | kernel_per: | 556 | kernel_per: |
573 | REENABLE_IRQS | 557 | REENABLE_IRQS |
574 | xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number | ||
575 | la %r2,SP_PTREGS(%r15) # address of register-save area | 558 | la %r2,SP_PTREGS(%r15) # address of register-save area |
576 | brasl %r14,do_per_trap | 559 | brasl %r14,do_per_trap |
577 | j pgm_exit | 560 | j pgm_exit |
@@ -869,12 +852,12 @@ restart_go: | |||
869 | # PSW restart interrupt handler | 852 | # PSW restart interrupt handler |
870 | # | 853 | # |
871 | ENTRY(psw_restart_int_handler) | 854 | ENTRY(psw_restart_int_handler) |
872 | stg %r15,__LC_SAVE_AREA_64(%r0) # save r15 | 855 | stg %r15,__LC_SAVE_AREA+120(%r0) # save r15 |
873 | larl %r15,restart_stack # load restart stack | 856 | larl %r15,restart_stack # load restart stack |
874 | lg %r15,0(%r15) | 857 | lg %r15,0(%r15) |
875 | aghi %r15,-SP_SIZE # make room for pt_regs | 858 | aghi %r15,-SP_SIZE # make room for pt_regs |
876 | stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack | 859 | stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack |
877 | mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack | 860 | mvc SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack |
878 | mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw | 861 | mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw |
879 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 | 862 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 |
880 | brasl %r14,do_restart | 863 | brasl %r14,do_restart |
@@ -972,9 +955,11 @@ cleanup_system_call: | |||
972 | stg %r15,32(%r12) | 955 | stg %r15,32(%r12) |
973 | stg %r11,0(%r12) | 956 | stg %r11,0(%r12) |
974 | CREATE_STACK_FRAME __LC_SAVE_AREA | 957 | CREATE_STACK_FRAME __LC_SAVE_AREA |
975 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
976 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | ||
977 | mvc 8(8,%r12),__LC_THREAD_INFO | 958 | mvc 8(8,%r12),__LC_THREAD_INFO |
959 | lg %r12,__LC_THREAD_INFO | ||
960 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | ||
961 | mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC | ||
962 | oi __TI_flags+7(%r12),_TIF_SYSCALL | ||
978 | cleanup_vtime: | 963 | cleanup_vtime: |
979 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | 964 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) |
980 | jhe cleanup_stime | 965 | jhe cleanup_stime |
@@ -1096,6 +1081,7 @@ sie_exit: | |||
1096 | lghi %r2,0 | 1081 | lghi %r2,0 |
1097 | br %r14 | 1082 | br %r14 |
1098 | sie_fault: | 1083 | sie_fault: |
1084 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
1099 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct | 1085 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct |
1100 | ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) | 1086 | ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) |
1101 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | 1087 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 2d781bab37bb..900068d2bf92 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -449,10 +449,28 @@ ENTRY(start) | |||
449 | # | 449 | # |
450 | .org 0x10000 | 450 | .org 0x10000 |
451 | ENTRY(startup) | 451 | ENTRY(startup) |
452 | j .Lep_startup_normal | ||
453 | .org 0x10008 | ||
454 | # | ||
455 | # This is a list of s390 kernel entry points. At address 0x1000f the number of | ||
456 | # valid entry points is stored. | ||
457 | # | ||
458 | # IMPORTANT: Do not change this table, it is s390 kernel ABI! | ||
459 | # | ||
460 | .ascii "S390EP" | ||
461 | .byte 0x00,0x01 | ||
462 | # | ||
463 | # kdump startup-code at 0x10010, running in 64 bit absolute addressing mode | ||
464 | # | ||
465 | .org 0x10010 | ||
466 | ENTRY(startup_kdump) | ||
467 | j .Lep_startup_kdump | ||
468 | .Lep_startup_normal: | ||
452 | basr %r13,0 # get base | 469 | basr %r13,0 # get base |
453 | .LPG0: | 470 | .LPG0: |
454 | xc 0x200(256),0x200 # partially clear lowcore | 471 | xc 0x200(256),0x200 # partially clear lowcore |
455 | xc 0x300(256),0x300 | 472 | xc 0x300(256),0x300 |
473 | xc 0xe00(256),0xe00 | ||
456 | stck __LC_LAST_UPDATE_CLOCK | 474 | stck __LC_LAST_UPDATE_CLOCK |
457 | spt 5f-.LPG0(%r13) | 475 | spt 5f-.LPG0(%r13) |
458 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) | 476 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) |
@@ -534,6 +552,8 @@ ENTRY(startup) | |||
534 | .align 8 | 552 | .align 8 |
535 | 5: .long 0x7fffffff,0xffffffff | 553 | 5: .long 0x7fffffff,0xffffffff |
536 | 554 | ||
555 | #include "head_kdump.S" | ||
556 | |||
537 | # | 557 | # |
538 | # params at 10400 (setup.h) | 558 | # params at 10400 (setup.h) |
539 | # | 559 | # |
@@ -541,6 +561,8 @@ ENTRY(startup) | |||
541 | .long 0,0 # IPL_DEVICE | 561 | .long 0,0 # IPL_DEVICE |
542 | .long 0,0 # INITRD_START | 562 | .long 0,0 # INITRD_START |
543 | .long 0,0 # INITRD_SIZE | 563 | .long 0,0 # INITRD_SIZE |
564 | .long 0,0 # OLDMEM_BASE | ||
565 | .long 0,0 # OLDMEM_SIZE | ||
544 | 566 | ||
545 | .org COMMAND_LINE | 567 | .org COMMAND_LINE |
546 | .byte "root=/dev/ram0 ro" | 568 | .byte "root=/dev/ram0 ro" |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index f21954b44dc1..d3f1ab7d90ad 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -92,7 +92,7 @@ ENTRY(_stext) | |||
92 | .LPG3: | 92 | .LPG3: |
93 | # check control registers | 93 | # check control registers |
94 | stctl %c0,%c15,0(%r15) | 94 | stctl %c0,%c15,0(%r15) |
95 | oi 2(%r15),0x40 # enable sigp emergency signal | 95 | oi 2(%r15),0x60 # enable sigp emergency & external call |
96 | oi 0(%r15),0x10 # switch on low address protection | 96 | oi 0(%r15),0x10 # switch on low address protection |
97 | lctl %c0,%c15,0(%r15) | 97 | lctl %c0,%c15,0(%r15) |
98 | 98 | ||
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index ae5d492b069e..99348c0eaa41 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -90,7 +90,7 @@ ENTRY(_stext) | |||
90 | .LPG3: | 90 | .LPG3: |
91 | # check control registers | 91 | # check control registers |
92 | stctg %c0,%c15,0(%r15) | 92 | stctg %c0,%c15,0(%r15) |
93 | oi 6(%r15),0x40 # enable sigp emergency signal | 93 | oi 6(%r15),0x60 # enable sigp emergency & external call |
94 | oi 4(%r15),0x10 # switch on low address proctection | 94 | oi 4(%r15),0x10 # switch on low address proctection |
95 | lctlg %c0,%c15,0(%r15) | 95 | lctlg %c0,%c15,0(%r15) |
96 | 96 | ||
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S new file mode 100644 index 000000000000..e1ac3893e972 --- /dev/null +++ b/arch/s390/kernel/head_kdump.S | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * S390 kdump lowlevel functions (new kernel) | ||
3 | * | ||
4 | * Copyright IBM Corp. 2011 | ||
5 | * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #define DATAMOVER_ADDR 0x4000 | ||
9 | #define COPY_PAGE_ADDR 0x6000 | ||
10 | |||
11 | #ifdef CONFIG_CRASH_DUMP | ||
12 | |||
13 | # | ||
14 | # kdump entry (new kernel - not yet relocated) | ||
15 | # | ||
16 | # Note: This code has to be position independent | ||
17 | # | ||
18 | |||
19 | .align 2 | ||
20 | .Lep_startup_kdump: | ||
21 | lhi %r1,2 # mode 2 = esame (dump) | ||
22 | sigp %r1,%r0,0x12 # Switch to esame mode | ||
23 | sam64 # Switch to 64 bit addressing | ||
24 | basr %r13,0 | ||
25 | .Lbase: | ||
26 | larl %r2,.Lbase_addr # Check, if we have been | ||
27 | lg %r2,0(%r2) # already relocated: | ||
28 | clgr %r2,%r13 # | ||
29 | jne .Lrelocate # No : Start data mover | ||
30 | lghi %r2,0 # Yes: Start kdump kernel | ||
31 | brasl %r14,startup_kdump_relocated | ||
32 | |||
33 | .Lrelocate: | ||
34 | larl %r4,startup | ||
35 | lg %r2,0x418(%r4) # Get kdump base | ||
36 | lg %r3,0x420(%r4) # Get kdump size | ||
37 | |||
38 | larl %r10,.Lcopy_start # Source of data mover | ||
39 | lghi %r8,DATAMOVER_ADDR # Target of data mover | ||
40 | mvc 0(256,%r8),0(%r10) # Copy data mover code | ||
41 | |||
42 | agr %r8,%r2 # Copy data mover to | ||
43 | mvc 0(256,%r8),0(%r10) # reserved mem | ||
44 | |||
45 | lghi %r14,DATAMOVER_ADDR # Jump to copied data mover | ||
46 | basr %r14,%r14 | ||
47 | .Lbase_addr: | ||
48 | .quad .Lbase | ||
49 | |||
50 | # | ||
51 | # kdump data mover code (runs at address DATAMOVER_ADDR) | ||
52 | # | ||
53 | # r2: kdump base address | ||
54 | # r3: kdump size | ||
55 | # | ||
56 | .Lcopy_start: | ||
57 | basr %r13,0 # Base | ||
58 | 0: | ||
59 | lgr %r11,%r2 # Save kdump base address | ||
60 | lgr %r12,%r2 | ||
61 | agr %r12,%r3 # Compute kdump end address | ||
62 | |||
63 | lghi %r5,0 | ||
64 | lghi %r10,COPY_PAGE_ADDR # Load copy page address | ||
65 | 1: | ||
66 | mvc 0(256,%r10),0(%r5) # Copy old kernel to tmp | ||
67 | mvc 0(256,%r5),0(%r11) # Copy new kernel to old | ||
68 | mvc 0(256,%r11),0(%r10) # Copy tmp to new | ||
69 | aghi %r11,256 | ||
70 | aghi %r5,256 | ||
71 | clgr %r11,%r12 | ||
72 | jl 1b | ||
73 | |||
74 | lg %r14,.Lstartup_kdump-0b(%r13) | ||
75 | basr %r14,%r14 # Start relocated kernel | ||
76 | .Lstartup_kdump: | ||
77 | .long 0x00000000,0x00000000 + startup_kdump_relocated | ||
78 | .Lcopy_end: | ||
79 | |||
80 | # | ||
81 | # Startup of kdump (relocated new kernel) | ||
82 | # | ||
83 | .align 2 | ||
84 | startup_kdump_relocated: | ||
85 | basr %r13,0 | ||
86 | 0: | ||
87 | mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW | ||
88 | mvc 464(16,%r0),.Lpgm_psw-0b(%r13) # Setup pgm check PSW | ||
89 | lhi %r1,1 # Start new kernel | ||
90 | diag %r1,%r1,0x308 # with diag 308 | ||
91 | |||
92 | .Lno_diag308: # No diag 308 | ||
93 | sam31 # Switch to 31 bit addr mode | ||
94 | sr %r1,%r1 # Erase register r1 | ||
95 | sr %r2,%r2 # Erase register r2 | ||
96 | sigp %r1,%r2,0x12 # Switch to 31 bit arch mode | ||
97 | lpsw 0 # Start new kernel... | ||
98 | .align 8 | ||
99 | .Lrestart_psw: | ||
100 | .long 0x00080000,0x80000000 + startup | ||
101 | .Lpgm_psw: | ||
102 | .quad 0x0000000180000000,0x0000000000000000 + .Lno_diag308 | ||
103 | #else | ||
104 | .align 2 | ||
105 | .Lep_startup_kdump: | ||
106 | #ifdef CONFIG_64BIT | ||
107 | larl %r13,startup_kdump_crash | ||
108 | lpswe 0(%r13) | ||
109 | .align 8 | ||
110 | startup_kdump_crash: | ||
111 | .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash | ||
112 | #else | ||
113 | basr %r13,0 | ||
114 | 0: lpsw startup_kdump_crash-0b(%r13) | ||
115 | .align 8 | ||
116 | startup_kdump_crash: | ||
117 | .long 0x000a0000,0x00000000 + startup_kdump_crash | ||
118 | #endif /* CONFIG_64BIT */ | ||
119 | #endif /* CONFIG_CRASH_DUMP */ | ||
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 48c710206366..affa8e68124a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/gfp.h> | 18 | #include <linux/gfp.h> |
19 | #include <linux/crash_dump.h> | ||
19 | #include <asm/ipl.h> | 20 | #include <asm/ipl.h> |
20 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
21 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
@@ -26,6 +27,7 @@ | |||
26 | #include <asm/sclp.h> | 27 | #include <asm/sclp.h> |
27 | #include <asm/sigp.h> | 28 | #include <asm/sigp.h> |
28 | #include <asm/checksum.h> | 29 | #include <asm/checksum.h> |
30 | #include "entry.h" | ||
29 | 31 | ||
30 | #define IPL_PARM_BLOCK_VERSION 0 | 32 | #define IPL_PARM_BLOCK_VERSION 0 |
31 | 33 | ||
@@ -275,8 +277,8 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
275 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); | 277 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); |
276 | 278 | ||
277 | /* VM IPL PARM routines */ | 279 | /* VM IPL PARM routines */ |
278 | size_t reipl_get_ascii_vmparm(char *dest, size_t size, | 280 | static size_t reipl_get_ascii_vmparm(char *dest, size_t size, |
279 | const struct ipl_parameter_block *ipb) | 281 | const struct ipl_parameter_block *ipb) |
280 | { | 282 | { |
281 | int i; | 283 | int i; |
282 | size_t len; | 284 | size_t len; |
@@ -338,8 +340,8 @@ static size_t scpdata_length(const char* buf, size_t count) | |||
338 | return count; | 340 | return count; |
339 | } | 341 | } |
340 | 342 | ||
341 | size_t reipl_append_ascii_scpdata(char *dest, size_t size, | 343 | static size_t reipl_append_ascii_scpdata(char *dest, size_t size, |
342 | const struct ipl_parameter_block *ipb) | 344 | const struct ipl_parameter_block *ipb) |
343 | { | 345 | { |
344 | size_t count; | 346 | size_t count; |
345 | size_t i; | 347 | size_t i; |
@@ -1738,7 +1740,11 @@ static struct kobj_attribute on_restart_attr = | |||
1738 | 1740 | ||
1739 | void do_restart(void) | 1741 | void do_restart(void) |
1740 | { | 1742 | { |
1743 | smp_restart_with_online_cpu(); | ||
1741 | smp_send_stop(); | 1744 | smp_send_stop(); |
1745 | #ifdef CONFIG_CRASH_DUMP | ||
1746 | crash_kexec(NULL); | ||
1747 | #endif | ||
1742 | on_restart_trigger.action->fn(&on_restart_trigger); | 1748 | on_restart_trigger.action->fn(&on_restart_trigger); |
1743 | stop_run(&on_restart_trigger); | 1749 | stop_run(&on_restart_trigger); |
1744 | } | 1750 | } |
@@ -2009,7 +2015,7 @@ static void do_reset_calls(void) | |||
2009 | 2015 | ||
2010 | u32 dump_prefix_page; | 2016 | u32 dump_prefix_page; |
2011 | 2017 | ||
2012 | void s390_reset_system(void) | 2018 | void s390_reset_system(void (*func)(void *), void *data) |
2013 | { | 2019 | { |
2014 | struct _lowcore *lc; | 2020 | struct _lowcore *lc; |
2015 | 2021 | ||
@@ -2028,15 +2034,19 @@ void s390_reset_system(void) | |||
2028 | __ctl_clear_bit(0,28); | 2034 | __ctl_clear_bit(0,28); |
2029 | 2035 | ||
2030 | /* Set new machine check handler */ | 2036 | /* Set new machine check handler */ |
2031 | S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 2037 | S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; |
2032 | S390_lowcore.mcck_new_psw.addr = | 2038 | S390_lowcore.mcck_new_psw.addr = |
2033 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; | 2039 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; |
2034 | 2040 | ||
2035 | /* Set new program check handler */ | 2041 | /* Set new program check handler */ |
2036 | S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 2042 | S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; |
2037 | S390_lowcore.program_new_psw.addr = | 2043 | S390_lowcore.program_new_psw.addr = |
2038 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 2044 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
2039 | 2045 | ||
2046 | /* Store status at absolute zero */ | ||
2047 | store_status(); | ||
2048 | |||
2040 | do_reset_calls(); | 2049 | do_reset_calls(); |
2050 | if (func) | ||
2051 | func(data); | ||
2041 | } | 2052 | } |
2042 | |||
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1f4050d45f78..b9a7fdd9c814 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -33,7 +33,8 @@ static const struct irq_class intrclass_names[] = { | |||
33 | {.name = "EXT" }, | 33 | {.name = "EXT" }, |
34 | {.name = "I/O" }, | 34 | {.name = "I/O" }, |
35 | {.name = "CLK", .desc = "[EXT] Clock Comparator" }, | 35 | {.name = "CLK", .desc = "[EXT] Clock Comparator" }, |
36 | {.name = "IPI", .desc = "[EXT] Signal Processor" }, | 36 | {.name = "EXC", .desc = "[EXT] External Call" }, |
37 | {.name = "EMS", .desc = "[EXT] Emergency Signal" }, | ||
37 | {.name = "TMR", .desc = "[EXT] CPU Timer" }, | 38 | {.name = "TMR", .desc = "[EXT] CPU Timer" }, |
38 | {.name = "TAL", .desc = "[EXT] Timing Alert" }, | 39 | {.name = "TAL", .desc = "[EXT] Timing Alert" }, |
39 | {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, | 40 | {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, |
@@ -42,8 +43,8 @@ static const struct irq_class intrclass_names[] = { | |||
42 | {.name = "SCP", .desc = "[EXT] Service Call" }, | 43 | {.name = "SCP", .desc = "[EXT] Service Call" }, |
43 | {.name = "IUC", .desc = "[EXT] IUCV" }, | 44 | {.name = "IUC", .desc = "[EXT] IUCV" }, |
44 | {.name = "CPM", .desc = "[EXT] CPU Measurement" }, | 45 | {.name = "CPM", .desc = "[EXT] CPU Measurement" }, |
46 | {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" }, | ||
45 | {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, | 47 | {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, |
46 | {.name = "QDI", .desc = "[I/O] QDIO Interrupt" }, | ||
47 | {.name = "DAS", .desc = "[I/O] DASD" }, | 48 | {.name = "DAS", .desc = "[I/O] DASD" }, |
48 | {.name = "C15", .desc = "[I/O] 3215" }, | 49 | {.name = "C15", .desc = "[I/O] 3215" }, |
49 | {.name = "C70", .desc = "[I/O] 3270" }, | 50 | {.name = "C70", .desc = "[I/O] 3270" }, |
@@ -53,6 +54,7 @@ static const struct irq_class intrclass_names[] = { | |||
53 | {.name = "CLW", .desc = "[I/O] CLAW" }, | 54 | {.name = "CLW", .desc = "[I/O] CLAW" }, |
54 | {.name = "CTC", .desc = "[I/O] CTC" }, | 55 | {.name = "CTC", .desc = "[I/O] CTC" }, |
55 | {.name = "APB", .desc = "[I/O] AP Bus" }, | 56 | {.name = "APB", .desc = "[I/O] AP Bus" }, |
57 | {.name = "CSC", .desc = "[I/O] CHSC Subchannel" }, | ||
56 | {.name = "NMI", .desc = "[NMI] Machine Check" }, | 58 | {.name = "NMI", .desc = "[NMI] Machine Check" }, |
57 | }; | 59 | }; |
58 | 60 | ||
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 1d05d669107c..64b761aef004 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -635,7 +635,7 @@ void __kprobes jprobe_return(void) | |||
635 | asm volatile(".word 0x0002"); | 635 | asm volatile(".word 0x0002"); |
636 | } | 636 | } |
637 | 637 | ||
638 | void __kprobes jprobe_return_end(void) | 638 | static void __used __kprobes jprobe_return_end(void) |
639 | { | 639 | { |
640 | asm volatile("bcr 0,0"); | 640 | asm volatile("bcr 0,0"); |
641 | } | 641 | } |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b09b9c62573e..3cd0f25ab015 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -1,10 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/machine_kexec.c | 2 | * arch/s390/kernel/machine_kexec.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2005,2006 | 4 | * Copyright IBM Corp. 2005,2011 |
5 | * | 5 | * |
6 | * Author(s): Rolf Adelsberger, | 6 | * Author(s): Rolf Adelsberger, |
7 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 7 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
8 | * Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
8 | */ | 9 | */ |
9 | 10 | ||
10 | #include <linux/device.h> | 11 | #include <linux/device.h> |
@@ -21,12 +22,162 @@ | |||
21 | #include <asm/smp.h> | 22 | #include <asm/smp.h> |
22 | #include <asm/reset.h> | 23 | #include <asm/reset.h> |
23 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
25 | #include <asm/diag.h> | ||
26 | #include <asm/asm-offsets.h> | ||
24 | 27 | ||
25 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); | 28 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); |
26 | 29 | ||
27 | extern const unsigned char relocate_kernel[]; | 30 | extern const unsigned char relocate_kernel[]; |
28 | extern const unsigned long long relocate_kernel_len; | 31 | extern const unsigned long long relocate_kernel_len; |
29 | 32 | ||
33 | #ifdef CONFIG_CRASH_DUMP | ||
34 | |||
35 | void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); | ||
36 | |||
37 | /* | ||
38 | * Create ELF notes for one CPU | ||
39 | */ | ||
40 | static void add_elf_notes(int cpu) | ||
41 | { | ||
42 | struct save_area *sa = (void *) 4608 + store_prefix(); | ||
43 | void *ptr; | ||
44 | |||
45 | memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa)); | ||
46 | ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); | ||
47 | ptr = fill_cpu_elf_notes(ptr, sa); | ||
48 | memset(ptr, 0, sizeof(struct elf_note)); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Store status of next available physical CPU | ||
53 | */ | ||
54 | static int store_status_next(int start_cpu, int this_cpu) | ||
55 | { | ||
56 | struct save_area *sa = (void *) 4608 + store_prefix(); | ||
57 | int cpu, rc; | ||
58 | |||
59 | for (cpu = start_cpu; cpu < 65536; cpu++) { | ||
60 | if (cpu == this_cpu) | ||
61 | continue; | ||
62 | do { | ||
63 | rc = raw_sigp(cpu, sigp_stop_and_store_status); | ||
64 | } while (rc == sigp_busy); | ||
65 | if (rc != sigp_order_code_accepted) | ||
66 | continue; | ||
67 | if (sa->pref_reg) | ||
68 | return cpu; | ||
69 | } | ||
70 | return -1; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Initialize CPU ELF notes | ||
75 | */ | ||
76 | void setup_regs(void) | ||
77 | { | ||
78 | unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; | ||
79 | int cpu, this_cpu, phys_cpu = 0, first = 1; | ||
80 | |||
81 | this_cpu = stap(); | ||
82 | |||
83 | if (!S390_lowcore.prefixreg_save_area) | ||
84 | first = 0; | ||
85 | for_each_online_cpu(cpu) { | ||
86 | if (first) { | ||
87 | add_elf_notes(cpu); | ||
88 | first = 0; | ||
89 | continue; | ||
90 | } | ||
91 | phys_cpu = store_status_next(phys_cpu, this_cpu); | ||
92 | if (phys_cpu == -1) | ||
93 | break; | ||
94 | add_elf_notes(cpu); | ||
95 | phys_cpu++; | ||
96 | } | ||
97 | /* Copy dump CPU store status info to absolute zero */ | ||
98 | memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); | ||
99 | } | ||
100 | |||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * Start kdump: We expect here that a store status has been done on our CPU | ||
105 | */ | ||
106 | static void __do_machine_kdump(void *image) | ||
107 | { | ||
108 | #ifdef CONFIG_CRASH_DUMP | ||
109 | int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; | ||
110 | |||
111 | __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); | ||
112 | setup_regs(); | ||
113 | start_kdump(1); | ||
114 | #endif | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Check if kdump checksums are valid: We call purgatory with parameter "0" | ||
119 | */ | ||
120 | static int kdump_csum_valid(struct kimage *image) | ||
121 | { | ||
122 | #ifdef CONFIG_CRASH_DUMP | ||
123 | int (*start_kdump)(int) = (void *)image->start; | ||
124 | int rc; | ||
125 | |||
126 | __arch_local_irq_stnsm(0xfb); /* disable DAT */ | ||
127 | rc = start_kdump(0); | ||
128 | __arch_local_irq_stosm(0x04); /* enable DAT */ | ||
129 | return rc ? 0 : -EINVAL; | ||
130 | #else | ||
131 | return -EINVAL; | ||
132 | #endif | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Map or unmap crashkernel memory | ||
137 | */ | ||
138 | static void crash_map_pages(int enable) | ||
139 | { | ||
140 | unsigned long size = resource_size(&crashk_res); | ||
141 | |||
142 | BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN || | ||
143 | size % KEXEC_CRASH_MEM_ALIGN); | ||
144 | if (enable) | ||
145 | vmem_add_mapping(crashk_res.start, size); | ||
146 | else | ||
147 | vmem_remove_mapping(crashk_res.start, size); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Map crashkernel memory | ||
152 | */ | ||
153 | void crash_map_reserved_pages(void) | ||
154 | { | ||
155 | crash_map_pages(1); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Unmap crashkernel memory | ||
160 | */ | ||
161 | void crash_unmap_reserved_pages(void) | ||
162 | { | ||
163 | crash_map_pages(0); | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Give back memory to hypervisor before new kdump is loaded | ||
168 | */ | ||
169 | static int machine_kexec_prepare_kdump(void) | ||
170 | { | ||
171 | #ifdef CONFIG_CRASH_DUMP | ||
172 | if (MACHINE_IS_VM) | ||
173 | diag10_range(PFN_DOWN(crashk_res.start), | ||
174 | PFN_DOWN(crashk_res.end - crashk_res.start + 1)); | ||
175 | return 0; | ||
176 | #else | ||
177 | return -EINVAL; | ||
178 | #endif | ||
179 | } | ||
180 | |||
30 | int machine_kexec_prepare(struct kimage *image) | 181 | int machine_kexec_prepare(struct kimage *image) |
31 | { | 182 | { |
32 | void *reboot_code_buffer; | 183 | void *reboot_code_buffer; |
@@ -35,6 +186,9 @@ int machine_kexec_prepare(struct kimage *image) | |||
35 | if (ipl_flags & IPL_NSS_VALID) | 186 | if (ipl_flags & IPL_NSS_VALID) |
36 | return -ENOSYS; | 187 | return -ENOSYS; |
37 | 188 | ||
189 | if (image->type == KEXEC_TYPE_CRASH) | ||
190 | return machine_kexec_prepare_kdump(); | ||
191 | |||
38 | /* We don't support anything but the default image type for now. */ | 192 | /* We don't support anything but the default image type for now. */ |
39 | if (image->type != KEXEC_TYPE_DEFAULT) | 193 | if (image->type != KEXEC_TYPE_DEFAULT) |
40 | return -EINVAL; | 194 | return -EINVAL; |
@@ -51,27 +205,53 @@ void machine_kexec_cleanup(struct kimage *image) | |||
51 | { | 205 | { |
52 | } | 206 | } |
53 | 207 | ||
208 | void arch_crash_save_vmcoreinfo(void) | ||
209 | { | ||
210 | VMCOREINFO_SYMBOL(lowcore_ptr); | ||
211 | VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); | ||
212 | } | ||
213 | |||
54 | void machine_shutdown(void) | 214 | void machine_shutdown(void) |
55 | { | 215 | { |
56 | } | 216 | } |
57 | 217 | ||
58 | static void __machine_kexec(void *data) | 218 | /* |
219 | * Do normal kexec | ||
220 | */ | ||
221 | static void __do_machine_kexec(void *data) | ||
59 | { | 222 | { |
60 | relocate_kernel_t data_mover; | 223 | relocate_kernel_t data_mover; |
61 | struct kimage *image = data; | 224 | struct kimage *image = data; |
62 | 225 | ||
63 | pfault_fini(); | ||
64 | s390_reset_system(); | ||
65 | |||
66 | data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); | 226 | data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); |
67 | 227 | ||
68 | /* Call the moving routine */ | 228 | /* Call the moving routine */ |
69 | (*data_mover)(&image->head, image->start); | 229 | (*data_mover)(&image->head, image->start); |
70 | for (;;); | ||
71 | } | 230 | } |
72 | 231 | ||
232 | /* | ||
233 | * Reset system and call either kdump or normal kexec | ||
234 | */ | ||
235 | static void __machine_kexec(void *data) | ||
236 | { | ||
237 | struct kimage *image = data; | ||
238 | |||
239 | pfault_fini(); | ||
240 | if (image->type == KEXEC_TYPE_CRASH) | ||
241 | s390_reset_system(__do_machine_kdump, data); | ||
242 | else | ||
243 | s390_reset_system(__do_machine_kexec, data); | ||
244 | disabled_wait((unsigned long) __builtin_return_address(0)); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Do either kdump or normal kexec. In case of kdump we first ask | ||
249 | * purgatory, if kdump checksums are valid. | ||
250 | */ | ||
73 | void machine_kexec(struct kimage *image) | 251 | void machine_kexec(struct kimage *image) |
74 | { | 252 | { |
253 | if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image)) | ||
254 | return; | ||
75 | tracer_disable(); | 255 | tracer_disable(); |
76 | smp_send_stop(); | 256 | smp_send_stop(); |
77 | smp_switch_to_ipl_cpu(__machine_kexec, image); | 257 | smp_switch_to_ipl_cpu(__machine_kexec, image); |
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c index 0fbe4e32f7ba..19b4568f4cee 100644 --- a/arch/s390/kernel/mem_detect.c +++ b/arch/s390/kernel/mem_detect.c | |||
@@ -62,3 +62,72 @@ void detect_memory_layout(struct mem_chunk chunk[]) | |||
62 | arch_local_irq_restore(flags); | 62 | arch_local_irq_restore(flags); |
63 | } | 63 | } |
64 | EXPORT_SYMBOL(detect_memory_layout); | 64 | EXPORT_SYMBOL(detect_memory_layout); |
65 | |||
66 | /* | ||
67 | * Create memory hole with given address, size, and type | ||
68 | */ | ||
69 | void create_mem_hole(struct mem_chunk chunks[], unsigned long addr, | ||
70 | unsigned long size, int type) | ||
71 | { | ||
72 | unsigned long start, end, new_size; | ||
73 | int i; | ||
74 | |||
75 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
76 | if (chunks[i].size == 0) | ||
77 | continue; | ||
78 | if (addr + size < chunks[i].addr) | ||
79 | continue; | ||
80 | if (addr >= chunks[i].addr + chunks[i].size) | ||
81 | continue; | ||
82 | start = max(addr, chunks[i].addr); | ||
83 | end = min(addr + size, chunks[i].addr + chunks[i].size); | ||
84 | new_size = end - start; | ||
85 | if (new_size == 0) | ||
86 | continue; | ||
87 | if (start == chunks[i].addr && | ||
88 | end == chunks[i].addr + chunks[i].size) { | ||
89 | /* Remove chunk */ | ||
90 | chunks[i].type = type; | ||
91 | } else if (start == chunks[i].addr) { | ||
92 | /* Make chunk smaller at start */ | ||
93 | if (i >= MEMORY_CHUNKS - 1) | ||
94 | panic("Unable to create memory hole"); | ||
95 | memmove(&chunks[i + 1], &chunks[i], | ||
96 | sizeof(struct mem_chunk) * | ||
97 | (MEMORY_CHUNKS - (i + 1))); | ||
98 | chunks[i + 1].addr = chunks[i].addr + new_size; | ||
99 | chunks[i + 1].size = chunks[i].size - new_size; | ||
100 | chunks[i].size = new_size; | ||
101 | chunks[i].type = type; | ||
102 | i += 1; | ||
103 | } else if (end == chunks[i].addr + chunks[i].size) { | ||
104 | /* Make chunk smaller at end */ | ||
105 | if (i >= MEMORY_CHUNKS - 1) | ||
106 | panic("Unable to create memory hole"); | ||
107 | memmove(&chunks[i + 1], &chunks[i], | ||
108 | sizeof(struct mem_chunk) * | ||
109 | (MEMORY_CHUNKS - (i + 1))); | ||
110 | chunks[i + 1].addr = start; | ||
111 | chunks[i + 1].size = new_size; | ||
112 | chunks[i + 1].type = type; | ||
113 | chunks[i].size -= new_size; | ||
114 | i += 1; | ||
115 | } else { | ||
116 | /* Create memory hole */ | ||
117 | if (i >= MEMORY_CHUNKS - 2) | ||
118 | panic("Unable to create memory hole"); | ||
119 | memmove(&chunks[i + 2], &chunks[i], | ||
120 | sizeof(struct mem_chunk) * | ||
121 | (MEMORY_CHUNKS - (i + 2))); | ||
122 | chunks[i + 1].addr = addr; | ||
123 | chunks[i + 1].size = size; | ||
124 | chunks[i + 1].type = type; | ||
125 | chunks[i + 2].addr = addr + size; | ||
126 | chunks[i + 2].size = | ||
127 | chunks[i].addr + chunks[i].size - (addr + size); | ||
128 | chunks[i + 2].type = chunks[i].type; | ||
129 | chunks[i].size = addr - chunks[i].addr; | ||
130 | i += 2; | ||
131 | } | ||
132 | } | ||
133 | } | ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 541a7509faeb..9451b210a1b4 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/elfcore.h> | ||
15 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
16 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
17 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
@@ -117,7 +118,8 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
117 | struct pt_regs regs; | 118 | struct pt_regs regs; |
118 | 119 | ||
119 | memset(®s, 0, sizeof(regs)); | 120 | memset(®s, 0, sizeof(regs)); |
120 | regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; | 121 | regs.psw.mask = psw_kernel_bits | |
122 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
121 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; | 123 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; |
122 | regs.gprs[9] = (unsigned long) fn; | 124 | regs.gprs[9] = (unsigned long) fn; |
123 | regs.gprs[10] = (unsigned long) arg; | 125 | regs.gprs[10] = (unsigned long) arg; |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 311e9d712888..6e0073e43f54 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -74,7 +74,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
74 | 74 | ||
75 | static void *c_start(struct seq_file *m, loff_t *pos) | 75 | static void *c_start(struct seq_file *m, loff_t *pos) |
76 | { | 76 | { |
77 | return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; | 77 | return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; |
78 | } | 78 | } |
79 | 79 | ||
80 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 80 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index ef86ad243986..450931a45b68 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -42,34 +42,37 @@ enum s390_regset { | |||
42 | REGSET_GENERAL, | 42 | REGSET_GENERAL, |
43 | REGSET_FP, | 43 | REGSET_FP, |
44 | REGSET_LAST_BREAK, | 44 | REGSET_LAST_BREAK, |
45 | REGSET_SYSTEM_CALL, | ||
45 | REGSET_GENERAL_EXTENDED, | 46 | REGSET_GENERAL_EXTENDED, |
46 | }; | 47 | }; |
47 | 48 | ||
48 | void update_per_regs(struct task_struct *task) | 49 | void update_per_regs(struct task_struct *task) |
49 | { | 50 | { |
50 | static const struct per_regs per_single_step = { | ||
51 | .control = PER_EVENT_IFETCH, | ||
52 | .start = 0, | ||
53 | .end = PSW_ADDR_INSN, | ||
54 | }; | ||
55 | struct pt_regs *regs = task_pt_regs(task); | 51 | struct pt_regs *regs = task_pt_regs(task); |
56 | struct thread_struct *thread = &task->thread; | 52 | struct thread_struct *thread = &task->thread; |
57 | const struct per_regs *new; | 53 | struct per_regs old, new; |
58 | struct per_regs old; | 54 | |
59 | 55 | /* Copy user specified PER registers */ | |
60 | /* TIF_SINGLE_STEP overrides the user specified PER registers. */ | 56 | new.control = thread->per_user.control; |
61 | new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ? | 57 | new.start = thread->per_user.start; |
62 | &per_single_step : &thread->per_user; | 58 | new.end = thread->per_user.end; |
59 | |||
60 | /* merge TIF_SINGLE_STEP into user specified PER registers. */ | ||
61 | if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { | ||
62 | new.control |= PER_EVENT_IFETCH; | ||
63 | new.start = 0; | ||
64 | new.end = PSW_ADDR_INSN; | ||
65 | } | ||
63 | 66 | ||
64 | /* Take care of the PER enablement bit in the PSW. */ | 67 | /* Take care of the PER enablement bit in the PSW. */ |
65 | if (!(new->control & PER_EVENT_MASK)) { | 68 | if (!(new.control & PER_EVENT_MASK)) { |
66 | regs->psw.mask &= ~PSW_MASK_PER; | 69 | regs->psw.mask &= ~PSW_MASK_PER; |
67 | return; | 70 | return; |
68 | } | 71 | } |
69 | regs->psw.mask |= PSW_MASK_PER; | 72 | regs->psw.mask |= PSW_MASK_PER; |
70 | __ctl_store(old, 9, 11); | 73 | __ctl_store(old, 9, 11); |
71 | if (memcmp(new, &old, sizeof(struct per_regs)) != 0) | 74 | if (memcmp(&new, &old, sizeof(struct per_regs)) != 0) |
72 | __ctl_load(*new, 9, 11); | 75 | __ctl_load(new, 9, 11); |
73 | } | 76 | } |
74 | 77 | ||
75 | void user_enable_single_step(struct task_struct *task) | 78 | void user_enable_single_step(struct task_struct *task) |
@@ -166,8 +169,8 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
166 | */ | 169 | */ |
167 | tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); | 170 | tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); |
168 | if (addr == (addr_t) &dummy->regs.psw.mask) | 171 | if (addr == (addr_t) &dummy->regs.psw.mask) |
169 | /* Remove per bit from user psw. */ | 172 | /* Return a clean psw mask. */ |
170 | tmp &= ~PSW_MASK_PER; | 173 | tmp = psw_user_bits | (tmp & PSW_MASK_USER); |
171 | 174 | ||
172 | } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { | 175 | } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { |
173 | /* | 176 | /* |
@@ -289,18 +292,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
289 | * psw and gprs are stored on the stack | 292 | * psw and gprs are stored on the stack |
290 | */ | 293 | */ |
291 | if (addr == (addr_t) &dummy->regs.psw.mask && | 294 | if (addr == (addr_t) &dummy->regs.psw.mask && |
292 | #ifdef CONFIG_COMPAT | 295 | ((data & ~PSW_MASK_USER) != psw_user_bits || |
293 | data != PSW_MASK_MERGE(psw_user32_bits, data) && | 296 | ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) |
294 | #endif | ||
295 | data != PSW_MASK_MERGE(psw_user_bits, data)) | ||
296 | /* Invalid psw mask. */ | 297 | /* Invalid psw mask. */ |
297 | return -EINVAL; | 298 | return -EINVAL; |
298 | #ifndef CONFIG_64BIT | ||
299 | if (addr == (addr_t) &dummy->regs.psw.addr) | 299 | if (addr == (addr_t) &dummy->regs.psw.addr) |
300 | /* I'd like to reject addresses without the | 300 | /* |
301 | high order bit but older gdb's rely on it */ | 301 | * The debugger changed the instruction address, |
302 | data |= PSW_ADDR_AMODE; | 302 | * reset system call restart, see signal.c:do_signal |
303 | #endif | 303 | */ |
304 | task_thread_info(child)->system_call = 0; | ||
305 | |||
304 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; | 306 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; |
305 | 307 | ||
306 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { | 308 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { |
@@ -495,21 +497,21 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) | |||
495 | __u32 tmp; | 497 | __u32 tmp; |
496 | 498 | ||
497 | if (addr < (addr_t) &dummy32->regs.acrs) { | 499 | if (addr < (addr_t) &dummy32->regs.acrs) { |
500 | struct pt_regs *regs = task_pt_regs(child); | ||
498 | /* | 501 | /* |
499 | * psw and gprs are stored on the stack | 502 | * psw and gprs are stored on the stack |
500 | */ | 503 | */ |
501 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 504 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
502 | /* Fake a 31 bit psw mask. */ | 505 | /* Fake a 31 bit psw mask. */ |
503 | tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); | 506 | tmp = (__u32)(regs->psw.mask >> 32); |
504 | tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp); | 507 | tmp = psw32_user_bits | (tmp & PSW32_MASK_USER); |
505 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 508 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
506 | /* Fake a 31 bit psw address. */ | 509 | /* Fake a 31 bit psw address. */ |
507 | tmp = (__u32) task_pt_regs(child)->psw.addr | | 510 | tmp = (__u32) regs->psw.addr | |
508 | PSW32_ADDR_AMODE31; | 511 | (__u32)(regs->psw.mask & PSW_MASK_BA); |
509 | } else { | 512 | } else { |
510 | /* gpr 0-15 */ | 513 | /* gpr 0-15 */ |
511 | tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw + | 514 | tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4); |
512 | addr*2 + 4); | ||
513 | } | 515 | } |
514 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { | 516 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { |
515 | /* | 517 | /* |
@@ -594,24 +596,32 @@ static int __poke_user_compat(struct task_struct *child, | |||
594 | addr_t offset; | 596 | addr_t offset; |
595 | 597 | ||
596 | if (addr < (addr_t) &dummy32->regs.acrs) { | 598 | if (addr < (addr_t) &dummy32->regs.acrs) { |
599 | struct pt_regs *regs = task_pt_regs(child); | ||
597 | /* | 600 | /* |
598 | * psw, gprs, acrs and orig_gpr2 are stored on the stack | 601 | * psw, gprs, acrs and orig_gpr2 are stored on the stack |
599 | */ | 602 | */ |
600 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 603 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
601 | /* Build a 64 bit psw mask from 31 bit mask. */ | 604 | /* Build a 64 bit psw mask from 31 bit mask. */ |
602 | if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) | 605 | if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits) |
603 | /* Invalid psw mask. */ | 606 | /* Invalid psw mask. */ |
604 | return -EINVAL; | 607 | return -EINVAL; |
605 | task_pt_regs(child)->psw.mask = | 608 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
606 | PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); | 609 | (regs->psw.mask & PSW_MASK_BA) | |
610 | (__u64)(tmp & PSW32_MASK_USER) << 32; | ||
607 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 611 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
608 | /* Build a 64 bit psw address from 31 bit address. */ | 612 | /* Build a 64 bit psw address from 31 bit address. */ |
609 | task_pt_regs(child)->psw.addr = | 613 | regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; |
610 | (__u64) tmp & PSW32_ADDR_INSN; | 614 | /* Transfer 31 bit amode bit to psw mask. */ |
615 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | | ||
616 | (__u64)(tmp & PSW32_ADDR_AMODE); | ||
617 | /* | ||
618 | * The debugger changed the instruction address, | ||
619 | * reset system call restart, see signal.c:do_signal | ||
620 | */ | ||
621 | task_thread_info(child)->system_call = 0; | ||
611 | } else { | 622 | } else { |
612 | /* gpr 0-15 */ | 623 | /* gpr 0-15 */ |
613 | *(__u32*)((addr_t) &task_pt_regs(child)->psw | 624 | *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; |
614 | + addr*2 + 4) = tmp; | ||
615 | } | 625 | } |
616 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { | 626 | } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { |
617 | /* | 627 | /* |
@@ -735,7 +745,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
735 | * debugger stored an invalid system call number. Skip | 745 | * debugger stored an invalid system call number. Skip |
736 | * the system call and the system call restart handling. | 746 | * the system call and the system call restart handling. |
737 | */ | 747 | */ |
738 | regs->svcnr = 0; | 748 | clear_thread_flag(TIF_SYSCALL); |
739 | ret = -1; | 749 | ret = -1; |
740 | } | 750 | } |
741 | 751 | ||
@@ -897,6 +907,26 @@ static int s390_last_break_get(struct task_struct *target, | |||
897 | 907 | ||
898 | #endif | 908 | #endif |
899 | 909 | ||
910 | static int s390_system_call_get(struct task_struct *target, | ||
911 | const struct user_regset *regset, | ||
912 | unsigned int pos, unsigned int count, | ||
913 | void *kbuf, void __user *ubuf) | ||
914 | { | ||
915 | unsigned int *data = &task_thread_info(target)->system_call; | ||
916 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
917 | data, 0, sizeof(unsigned int)); | ||
918 | } | ||
919 | |||
920 | static int s390_system_call_set(struct task_struct *target, | ||
921 | const struct user_regset *regset, | ||
922 | unsigned int pos, unsigned int count, | ||
923 | const void *kbuf, const void __user *ubuf) | ||
924 | { | ||
925 | unsigned int *data = &task_thread_info(target)->system_call; | ||
926 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
927 | data, 0, sizeof(unsigned int)); | ||
928 | } | ||
929 | |||
900 | static const struct user_regset s390_regsets[] = { | 930 | static const struct user_regset s390_regsets[] = { |
901 | [REGSET_GENERAL] = { | 931 | [REGSET_GENERAL] = { |
902 | .core_note_type = NT_PRSTATUS, | 932 | .core_note_type = NT_PRSTATUS, |
@@ -923,6 +953,14 @@ static const struct user_regset s390_regsets[] = { | |||
923 | .get = s390_last_break_get, | 953 | .get = s390_last_break_get, |
924 | }, | 954 | }, |
925 | #endif | 955 | #endif |
956 | [REGSET_SYSTEM_CALL] = { | ||
957 | .core_note_type = NT_S390_SYSTEM_CALL, | ||
958 | .n = 1, | ||
959 | .size = sizeof(unsigned int), | ||
960 | .align = sizeof(unsigned int), | ||
961 | .get = s390_system_call_get, | ||
962 | .set = s390_system_call_set, | ||
963 | }, | ||
926 | }; | 964 | }; |
927 | 965 | ||
928 | static const struct user_regset_view user_s390_view = { | 966 | static const struct user_regset_view user_s390_view = { |
@@ -1102,6 +1140,14 @@ static const struct user_regset s390_compat_regsets[] = { | |||
1102 | .align = sizeof(long), | 1140 | .align = sizeof(long), |
1103 | .get = s390_compat_last_break_get, | 1141 | .get = s390_compat_last_break_get, |
1104 | }, | 1142 | }, |
1143 | [REGSET_SYSTEM_CALL] = { | ||
1144 | .core_note_type = NT_S390_SYSTEM_CALL, | ||
1145 | .n = 1, | ||
1146 | .size = sizeof(compat_uint_t), | ||
1147 | .align = sizeof(compat_uint_t), | ||
1148 | .get = s390_system_call_get, | ||
1149 | .set = s390_system_call_set, | ||
1150 | }, | ||
1105 | [REGSET_GENERAL_EXTENDED] = { | 1151 | [REGSET_GENERAL_EXTENDED] = { |
1106 | .core_note_type = NT_S390_HIGH_GPRS, | 1152 | .core_note_type = NT_S390_HIGH_GPRS, |
1107 | .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), | 1153 | .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 303d961c3bb5..ad67c214be04 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -10,6 +10,12 @@ | |||
10 | #include <asm/asm-offsets.h> | 10 | #include <asm/asm-offsets.h> |
11 | 11 | ||
12 | # | 12 | # |
13 | # store_status: Empty implementation until kdump is supported on 31 bit | ||
14 | # | ||
15 | ENTRY(store_status) | ||
16 | br %r14 | ||
17 | |||
18 | # | ||
13 | # do_reipl_asm | 19 | # do_reipl_asm |
14 | # Parameter: r2 = schid of reipl device | 20 | # Parameter: r2 = schid of reipl device |
15 | # | 21 | # |
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index e690975403f4..732a793ec53a 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S | |||
@@ -17,11 +17,11 @@ | |||
17 | # | 17 | # |
18 | ENTRY(store_status) | 18 | ENTRY(store_status) |
19 | /* Save register one and load save area base */ | 19 | /* Save register one and load save area base */ |
20 | stg %r1,__LC_SAVE_AREA_64(%r0) | 20 | stg %r1,__LC_SAVE_AREA+120(%r0) |
21 | lghi %r1,SAVE_AREA_BASE | 21 | lghi %r1,SAVE_AREA_BASE |
22 | /* General purpose registers */ | 22 | /* General purpose registers */ |
23 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | 23 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) |
24 | lg %r2,__LC_SAVE_AREA_64(%r0) | 24 | lg %r2,__LC_SAVE_AREA+120(%r0) |
25 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) | 25 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) |
26 | /* Control registers */ | 26 | /* Control registers */ |
27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | 27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) |
@@ -62,8 +62,11 @@ ENTRY(store_status) | |||
62 | larl %r2,store_status | 62 | larl %r2,store_status |
63 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) | 63 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) |
64 | br %r14 | 64 | br %r14 |
65 | .align 8 | 65 | |
66 | .section .bss | ||
67 | .align 8 | ||
66 | .Lclkcmp: .quad 0x0000000000000000 | 68 | .Lclkcmp: .quad 0x0000000000000000 |
69 | .previous | ||
67 | 70 | ||
68 | # | 71 | # |
69 | # do_reipl_asm | 72 | # do_reipl_asm |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7b371c37061d..8ac6bfa2786c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -42,6 +42,9 @@ | |||
42 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
43 | #include <linux/topology.h> | 43 | #include <linux/topology.h> |
44 | #include <linux/ftrace.h> | 44 | #include <linux/ftrace.h> |
45 | #include <linux/kexec.h> | ||
46 | #include <linux/crash_dump.h> | ||
47 | #include <linux/memory.h> | ||
45 | 48 | ||
46 | #include <asm/ipl.h> | 49 | #include <asm/ipl.h> |
47 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
@@ -57,12 +60,13 @@ | |||
57 | #include <asm/ebcdic.h> | 60 | #include <asm/ebcdic.h> |
58 | #include <asm/compat.h> | 61 | #include <asm/compat.h> |
59 | #include <asm/kvm_virtio.h> | 62 | #include <asm/kvm_virtio.h> |
63 | #include <asm/diag.h> | ||
60 | 64 | ||
61 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | | 65 | long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | |
62 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); | 66 | PSW_MASK_EA | PSW_MASK_BA; |
63 | long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | 67 | long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | |
64 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 68 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | |
65 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | 69 | PSW_MASK_PSTATE | PSW_ASC_HOME; |
66 | 70 | ||
67 | /* | 71 | /* |
68 | * User copy operations. | 72 | * User copy operations. |
@@ -274,22 +278,14 @@ early_param("mem", early_parse_mem); | |||
274 | unsigned int user_mode = HOME_SPACE_MODE; | 278 | unsigned int user_mode = HOME_SPACE_MODE; |
275 | EXPORT_SYMBOL_GPL(user_mode); | 279 | EXPORT_SYMBOL_GPL(user_mode); |
276 | 280 | ||
277 | static int set_amode_and_uaccess(unsigned long user_amode, | 281 | static int set_amode_primary(void) |
278 | unsigned long user32_amode) | ||
279 | { | 282 | { |
280 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | 283 | psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; |
281 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 284 | psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; |
282 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
283 | #ifdef CONFIG_COMPAT | 285 | #ifdef CONFIG_COMPAT |
284 | psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | | 286 | psw32_user_bits = |
285 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 287 | (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; |
286 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
287 | psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | | ||
288 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
289 | PSW32_MASK_PSTATE; | ||
290 | #endif | 288 | #endif |
291 | psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
292 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | ||
293 | 289 | ||
294 | if (MACHINE_HAS_MVCOS) { | 290 | if (MACHINE_HAS_MVCOS) { |
295 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | 291 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); |
@@ -325,7 +321,7 @@ early_param("user_mode", early_parse_user_mode); | |||
325 | static void setup_addressing_mode(void) | 321 | static void setup_addressing_mode(void) |
326 | { | 322 | { |
327 | if (user_mode == PRIMARY_SPACE_MODE) { | 323 | if (user_mode == PRIMARY_SPACE_MODE) { |
328 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) | 324 | if (set_amode_primary()) |
329 | pr_info("Address spaces switched, " | 325 | pr_info("Address spaces switched, " |
330 | "mvcos available\n"); | 326 | "mvcos available\n"); |
331 | else | 327 | else |
@@ -344,24 +340,25 @@ setup_lowcore(void) | |||
344 | */ | 340 | */ |
345 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); | 341 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); |
346 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); | 342 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); |
347 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 343 | lc->restart_psw.mask = psw_kernel_bits; |
348 | lc->restart_psw.addr = | 344 | lc->restart_psw.addr = |
349 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 345 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
350 | if (user_mode != HOME_SPACE_MODE) | 346 | lc->external_new_psw.mask = psw_kernel_bits | |
351 | lc->restart_psw.mask |= PSW_ASC_HOME; | 347 | PSW_MASK_DAT | PSW_MASK_MCHECK; |
352 | lc->external_new_psw.mask = psw_kernel_bits; | ||
353 | lc->external_new_psw.addr = | 348 | lc->external_new_psw.addr = |
354 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | 349 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; |
355 | lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; | 350 | lc->svc_new_psw.mask = psw_kernel_bits | |
351 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
356 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 352 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; |
357 | lc->program_new_psw.mask = psw_kernel_bits; | 353 | lc->program_new_psw.mask = psw_kernel_bits | |
354 | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
358 | lc->program_new_psw.addr = | 355 | lc->program_new_psw.addr = |
359 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | 356 | PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; |
360 | lc->mcck_new_psw.mask = | 357 | lc->mcck_new_psw.mask = psw_kernel_bits; |
361 | psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; | ||
362 | lc->mcck_new_psw.addr = | 358 | lc->mcck_new_psw.addr = |
363 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | 359 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; |
364 | lc->io_new_psw.mask = psw_kernel_bits; | 360 | lc->io_new_psw.mask = psw_kernel_bits | |
361 | PSW_MASK_DAT | PSW_MASK_MCHECK; | ||
365 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 362 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
366 | lc->clock_comparator = -1ULL; | 363 | lc->clock_comparator = -1ULL; |
367 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 364 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
@@ -435,10 +432,14 @@ static void __init setup_resources(void) | |||
435 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 432 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
436 | if (!memory_chunk[i].size) | 433 | if (!memory_chunk[i].size) |
437 | continue; | 434 | continue; |
435 | if (memory_chunk[i].type == CHUNK_OLDMEM || | ||
436 | memory_chunk[i].type == CHUNK_CRASHK) | ||
437 | continue; | ||
438 | res = alloc_bootmem_low(sizeof(*res)); | 438 | res = alloc_bootmem_low(sizeof(*res)); |
439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
440 | switch (memory_chunk[i].type) { | 440 | switch (memory_chunk[i].type) { |
441 | case CHUNK_READ_WRITE: | 441 | case CHUNK_READ_WRITE: |
442 | case CHUNK_CRASHK: | ||
442 | res->name = "System RAM"; | 443 | res->name = "System RAM"; |
443 | break; | 444 | break; |
444 | case CHUNK_READ_ONLY: | 445 | case CHUNK_READ_ONLY: |
@@ -479,6 +480,7 @@ static void __init setup_memory_end(void) | |||
479 | unsigned long max_mem; | 480 | unsigned long max_mem; |
480 | int i; | 481 | int i; |
481 | 482 | ||
483 | |||
482 | #ifdef CONFIG_ZFCPDUMP | 484 | #ifdef CONFIG_ZFCPDUMP |
483 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { | 485 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { |
484 | memory_end = ZFCPDUMP_HSA_SIZE; | 486 | memory_end = ZFCPDUMP_HSA_SIZE; |
@@ -545,11 +547,201 @@ static void __init setup_restart_psw(void) | |||
545 | * Setup restart PSW for absolute zero lowcore. This is necesary | 547 | * Setup restart PSW for absolute zero lowcore. This is necesary |
546 | * if PSW restart is done on an offline CPU that has lowcore zero | 548 | * if PSW restart is done on an offline CPU that has lowcore zero |
547 | */ | 549 | */ |
548 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 550 | psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; |
549 | psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 551 | psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
550 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); | 552 | copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); |
551 | } | 553 | } |
552 | 554 | ||
555 | static void __init setup_vmcoreinfo(void) | ||
556 | { | ||
557 | #ifdef CONFIG_KEXEC | ||
558 | unsigned long ptr = paddr_vmcoreinfo_note(); | ||
559 | |||
560 | copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); | ||
561 | #endif | ||
562 | } | ||
563 | |||
564 | #ifdef CONFIG_CRASH_DUMP | ||
565 | |||
566 | /* | ||
567 | * Find suitable location for crashkernel memory | ||
568 | */ | ||
569 | static unsigned long __init find_crash_base(unsigned long crash_size, | ||
570 | char **msg) | ||
571 | { | ||
572 | unsigned long crash_base; | ||
573 | struct mem_chunk *chunk; | ||
574 | int i; | ||
575 | |||
576 | if (memory_chunk[0].size < crash_size) { | ||
577 | *msg = "first memory chunk must be at least crashkernel size"; | ||
578 | return 0; | ||
579 | } | ||
580 | if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE)) | ||
581 | return OLDMEM_BASE; | ||
582 | |||
583 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { | ||
584 | chunk = &memory_chunk[i]; | ||
585 | if (chunk->size == 0) | ||
586 | continue; | ||
587 | if (chunk->type != CHUNK_READ_WRITE) | ||
588 | continue; | ||
589 | if (chunk->size < crash_size) | ||
590 | continue; | ||
591 | crash_base = (chunk->addr + chunk->size) - crash_size; | ||
592 | if (crash_base < crash_size) | ||
593 | continue; | ||
594 | if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) | ||
595 | continue; | ||
596 | if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) | ||
597 | continue; | ||
598 | return crash_base; | ||
599 | } | ||
600 | *msg = "no suitable area found"; | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Check if crash_base and crash_size is valid | ||
606 | */ | ||
607 | static int __init verify_crash_base(unsigned long crash_base, | ||
608 | unsigned long crash_size, | ||
609 | char **msg) | ||
610 | { | ||
611 | struct mem_chunk *chunk; | ||
612 | int i; | ||
613 | |||
614 | /* | ||
615 | * Because we do the swap to zero, we must have at least 'crash_size' | ||
616 | * bytes free space before crash_base | ||
617 | */ | ||
618 | if (crash_size > crash_base) { | ||
619 | *msg = "crashkernel offset must be greater than size"; | ||
620 | return -EINVAL; | ||
621 | } | ||
622 | |||
623 | /* First memory chunk must be at least crash_size */ | ||
624 | if (memory_chunk[0].size < crash_size) { | ||
625 | *msg = "first memory chunk must be at least crashkernel size"; | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | /* Check if we fit into the respective memory chunk */ | ||
629 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
630 | chunk = &memory_chunk[i]; | ||
631 | if (chunk->size == 0) | ||
632 | continue; | ||
633 | if (crash_base < chunk->addr) | ||
634 | continue; | ||
635 | if (crash_base >= chunk->addr + chunk->size) | ||
636 | continue; | ||
637 | /* we have found the memory chunk */ | ||
638 | if (crash_base + crash_size > chunk->addr + chunk->size) { | ||
639 | *msg = "selected memory chunk is too small for " | ||
640 | "crashkernel memory"; | ||
641 | return -EINVAL; | ||
642 | } | ||
643 | return 0; | ||
644 | } | ||
645 | *msg = "invalid memory range specified"; | ||
646 | return -EINVAL; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * Reserve kdump memory by creating a memory hole in the mem_chunk array | ||
651 | */ | ||
652 | static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, | ||
653 | int type) | ||
654 | { | ||
655 | |||
656 | create_mem_hole(memory_chunk, addr, size, type); | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * When kdump is enabled, we have to ensure that no memory from | ||
661 | * the area [0 - crashkernel memory size] and | ||
662 | * [crashk_res.start - crashk_res.end] is set offline. | ||
663 | */ | ||
664 | static int kdump_mem_notifier(struct notifier_block *nb, | ||
665 | unsigned long action, void *data) | ||
666 | { | ||
667 | struct memory_notify *arg = data; | ||
668 | |||
669 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) | ||
670 | return NOTIFY_BAD; | ||
671 | if (arg->start_pfn > PFN_DOWN(crashk_res.end)) | ||
672 | return NOTIFY_OK; | ||
673 | if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) | ||
674 | return NOTIFY_OK; | ||
675 | return NOTIFY_BAD; | ||
676 | } | ||
677 | |||
678 | static struct notifier_block kdump_mem_nb = { | ||
679 | .notifier_call = kdump_mem_notifier, | ||
680 | }; | ||
681 | |||
682 | #endif | ||
683 | |||
684 | /* | ||
685 | * Make sure that oldmem, where the dump is stored, is protected | ||
686 | */ | ||
687 | static void reserve_oldmem(void) | ||
688 | { | ||
689 | #ifdef CONFIG_CRASH_DUMP | ||
690 | if (!OLDMEM_BASE) | ||
691 | return; | ||
692 | |||
693 | reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); | ||
694 | reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, | ||
695 | CHUNK_OLDMEM); | ||
696 | if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) | ||
697 | saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; | ||
698 | else | ||
699 | saved_max_pfn = PFN_DOWN(real_memory_size) - 1; | ||
700 | #endif | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Reserve memory for kdump kernel to be loaded with kexec | ||
705 | */ | ||
706 | static void __init reserve_crashkernel(void) | ||
707 | { | ||
708 | #ifdef CONFIG_CRASH_DUMP | ||
709 | unsigned long long crash_base, crash_size; | ||
710 | char *msg; | ||
711 | int rc; | ||
712 | |||
713 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, | ||
714 | &crash_base); | ||
715 | if (rc || crash_size == 0) | ||
716 | return; | ||
717 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); | ||
718 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); | ||
719 | if (register_memory_notifier(&kdump_mem_nb)) | ||
720 | return; | ||
721 | if (!crash_base) | ||
722 | crash_base = find_crash_base(crash_size, &msg); | ||
723 | if (!crash_base) { | ||
724 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
725 | unregister_memory_notifier(&kdump_mem_nb); | ||
726 | return; | ||
727 | } | ||
728 | if (verify_crash_base(crash_base, crash_size, &msg)) { | ||
729 | pr_info("crashkernel reservation failed: %s\n", msg); | ||
730 | unregister_memory_notifier(&kdump_mem_nb); | ||
731 | return; | ||
732 | } | ||
733 | if (!OLDMEM_BASE && MACHINE_IS_VM) | ||
734 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); | ||
735 | crashk_res.start = crash_base; | ||
736 | crashk_res.end = crash_base + crash_size - 1; | ||
737 | insert_resource(&iomem_resource, &crashk_res); | ||
738 | reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); | ||
739 | pr_info("Reserving %lluMB of memory at %lluMB " | ||
740 | "for crashkernel (System RAM: %luMB)\n", | ||
741 | crash_size >> 20, crash_base >> 20, memory_end >> 20); | ||
742 | #endif | ||
743 | } | ||
744 | |||
553 | static void __init | 745 | static void __init |
554 | setup_memory(void) | 746 | setup_memory(void) |
555 | { | 747 | { |
@@ -580,6 +772,14 @@ setup_memory(void) | |||
580 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { | 772 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { |
581 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | 773 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; |
582 | 774 | ||
775 | #ifdef CONFIG_CRASH_DUMP | ||
776 | if (OLDMEM_BASE) { | ||
777 | /* Move initrd behind kdump oldmem */ | ||
778 | if (start + INITRD_SIZE > OLDMEM_BASE && | ||
779 | start < OLDMEM_BASE + OLDMEM_SIZE) | ||
780 | start = OLDMEM_BASE + OLDMEM_SIZE; | ||
781 | } | ||
782 | #endif | ||
583 | if (start + INITRD_SIZE > memory_end) { | 783 | if (start + INITRD_SIZE > memory_end) { |
584 | pr_err("initrd extends beyond end of " | 784 | pr_err("initrd extends beyond end of " |
585 | "memory (0x%08lx > 0x%08lx) " | 785 | "memory (0x%08lx > 0x%08lx) " |
@@ -610,7 +810,8 @@ setup_memory(void) | |||
610 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 810 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
611 | unsigned long start_chunk, end_chunk, pfn; | 811 | unsigned long start_chunk, end_chunk, pfn; |
612 | 812 | ||
613 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 813 | if (memory_chunk[i].type != CHUNK_READ_WRITE && |
814 | memory_chunk[i].type != CHUNK_CRASHK) | ||
614 | continue; | 815 | continue; |
615 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 816 | start_chunk = PFN_DOWN(memory_chunk[i].addr); |
616 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); | 817 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); |
@@ -644,6 +845,15 @@ setup_memory(void) | |||
644 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, | 845 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, |
645 | BOOTMEM_DEFAULT); | 846 | BOOTMEM_DEFAULT); |
646 | 847 | ||
848 | #ifdef CONFIG_CRASH_DUMP | ||
849 | if (crashk_res.start) | ||
850 | reserve_bootmem(crashk_res.start, | ||
851 | crashk_res.end - crashk_res.start + 1, | ||
852 | BOOTMEM_DEFAULT); | ||
853 | if (is_kdump_kernel()) | ||
854 | reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, | ||
855 | PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); | ||
856 | #endif | ||
647 | #ifdef CONFIG_BLK_DEV_INITRD | 857 | #ifdef CONFIG_BLK_DEV_INITRD |
648 | if (INITRD_START && INITRD_SIZE) { | 858 | if (INITRD_START && INITRD_SIZE) { |
649 | if (INITRD_START + INITRD_SIZE <= memory_end) { | 859 | if (INITRD_START + INITRD_SIZE <= memory_end) { |
@@ -812,8 +1022,11 @@ setup_arch(char **cmdline_p) | |||
812 | setup_ipl(); | 1022 | setup_ipl(); |
813 | setup_memory_end(); | 1023 | setup_memory_end(); |
814 | setup_addressing_mode(); | 1024 | setup_addressing_mode(); |
1025 | reserve_oldmem(); | ||
1026 | reserve_crashkernel(); | ||
815 | setup_memory(); | 1027 | setup_memory(); |
816 | setup_resources(); | 1028 | setup_resources(); |
1029 | setup_vmcoreinfo(); | ||
817 | setup_restart_psw(); | 1030 | setup_restart_psw(); |
818 | setup_lowcore(); | 1031 | setup_lowcore(); |
819 | 1032 | ||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 9a40e1cc5ec3..05a85bc14c98 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/ucontext.h> | 30 | #include <asm/ucontext.h> |
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/lowcore.h> | 32 | #include <asm/lowcore.h> |
33 | #include <asm/compat.h> | ||
33 | #include "entry.h" | 34 | #include "entry.h" |
34 | 35 | ||
35 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 36 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
@@ -116,7 +117,8 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
116 | 117 | ||
117 | /* Copy a 'clean' PSW mask to the user to avoid leaking | 118 | /* Copy a 'clean' PSW mask to the user to avoid leaking |
118 | information about whether PER is currently on. */ | 119 | information about whether PER is currently on. */ |
119 | user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask); | 120 | user_sregs.regs.psw.mask = psw_user_bits | |
121 | (regs->psw.mask & PSW_MASK_USER); | ||
120 | user_sregs.regs.psw.addr = regs->psw.addr; | 122 | user_sregs.regs.psw.addr = regs->psw.addr; |
121 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); | 123 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); |
122 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, | 124 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, |
@@ -143,9 +145,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
143 | err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); | 145 | err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); |
144 | if (err) | 146 | if (err) |
145 | return err; | 147 | return err; |
146 | regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, | 148 | /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ |
147 | user_sregs.regs.psw.mask); | 149 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
148 | regs->psw.addr = PSW_ADDR_AMODE | user_sregs.regs.psw.addr; | 150 | (user_sregs.regs.psw.mask & PSW_MASK_USER); |
151 | /* Check for invalid amode */ | ||
152 | if (regs->psw.mask & PSW_MASK_EA) | ||
153 | regs->psw.mask |= PSW_MASK_BA; | ||
154 | regs->psw.addr = user_sregs.regs.psw.addr; | ||
149 | memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); | 155 | memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); |
150 | memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, | 156 | memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, |
151 | sizeof(sregs->regs.acrs)); | 157 | sizeof(sregs->regs.acrs)); |
@@ -156,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
156 | current->thread.fp_regs.fpc &= FPC_VALID_MASK; | 162 | current->thread.fp_regs.fpc &= FPC_VALID_MASK; |
157 | 163 | ||
158 | restore_fp_regs(¤t->thread.fp_regs); | 164 | restore_fp_regs(¤t->thread.fp_regs); |
159 | regs->svcnr = 0; /* disable syscall checks */ | 165 | clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ |
160 | return 0; | 166 | return 0; |
161 | } | 167 | } |
162 | 168 | ||
@@ -288,6 +294,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
288 | 294 | ||
289 | /* Set up registers for signal handler */ | 295 | /* Set up registers for signal handler */ |
290 | regs->gprs[15] = (unsigned long) frame; | 296 | regs->gprs[15] = (unsigned long) frame; |
297 | regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ | ||
291 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 298 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; |
292 | 299 | ||
293 | regs->gprs[2] = map_signal(sig); | 300 | regs->gprs[2] = map_signal(sig); |
@@ -356,6 +363,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
356 | 363 | ||
357 | /* Set up registers for signal handler */ | 364 | /* Set up registers for signal handler */ |
358 | regs->gprs[15] = (unsigned long) frame; | 365 | regs->gprs[15] = (unsigned long) frame; |
366 | regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ | ||
359 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 367 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; |
360 | 368 | ||
361 | regs->gprs[2] = map_signal(sig); | 369 | regs->gprs[2] = map_signal(sig); |
@@ -401,7 +409,6 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
401 | */ | 409 | */ |
402 | void do_signal(struct pt_regs *regs) | 410 | void do_signal(struct pt_regs *regs) |
403 | { | 411 | { |
404 | unsigned long retval = 0, continue_addr = 0, restart_addr = 0; | ||
405 | siginfo_t info; | 412 | siginfo_t info; |
406 | int signr; | 413 | int signr; |
407 | struct k_sigaction ka; | 414 | struct k_sigaction ka; |
@@ -421,54 +428,45 @@ void do_signal(struct pt_regs *regs) | |||
421 | else | 428 | else |
422 | oldset = ¤t->blocked; | 429 | oldset = ¤t->blocked; |
423 | 430 | ||
424 | /* Are we from a system call? */ | 431 | /* |
425 | if (regs->svcnr) { | 432 | * Get signal to deliver. When running under ptrace, at this point |
426 | continue_addr = regs->psw.addr; | 433 | * the debugger may change all our registers, including the system |
427 | restart_addr = continue_addr - regs->ilc; | 434 | * call information. |
428 | retval = regs->gprs[2]; | 435 | */ |
429 | 436 | current_thread_info()->system_call = | |
430 | /* Prepare for system call restart. We do this here so that a | 437 | test_thread_flag(TIF_SYSCALL) ? regs->svc_code : 0; |
431 | debugger will see the already changed PSW. */ | ||
432 | switch (retval) { | ||
433 | case -ERESTARTNOHAND: | ||
434 | case -ERESTARTSYS: | ||
435 | case -ERESTARTNOINTR: | ||
436 | regs->gprs[2] = regs->orig_gpr2; | ||
437 | regs->psw.addr = restart_addr; | ||
438 | break; | ||
439 | case -ERESTART_RESTARTBLOCK: | ||
440 | regs->gprs[2] = -EINTR; | ||
441 | } | ||
442 | regs->svcnr = 0; /* Don't deal with this again. */ | ||
443 | } | ||
444 | |||
445 | /* Get signal to deliver. When running under ptrace, at this point | ||
446 | the debugger may change all our registers ... */ | ||
447 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 438 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
448 | 439 | ||
449 | /* Depending on the signal settings we may need to revert the | ||
450 | decision to restart the system call. */ | ||
451 | if (signr > 0 && regs->psw.addr == restart_addr) { | ||
452 | if (retval == -ERESTARTNOHAND | ||
453 | || (retval == -ERESTARTSYS | ||
454 | && !(current->sighand->action[signr-1].sa.sa_flags | ||
455 | & SA_RESTART))) { | ||
456 | regs->gprs[2] = -EINTR; | ||
457 | regs->psw.addr = continue_addr; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | if (signr > 0) { | 440 | if (signr > 0) { |
462 | /* Whee! Actually deliver the signal. */ | 441 | /* Whee! Actually deliver the signal. */ |
463 | int ret; | 442 | if (current_thread_info()->system_call) { |
464 | #ifdef CONFIG_COMPAT | 443 | regs->svc_code = current_thread_info()->system_call; |
465 | if (is_compat_task()) { | 444 | /* Check for system call restarting. */ |
466 | ret = handle_signal32(signr, &ka, &info, oldset, regs); | 445 | switch (regs->gprs[2]) { |
467 | } | 446 | case -ERESTART_RESTARTBLOCK: |
468 | else | 447 | case -ERESTARTNOHAND: |
469 | #endif | 448 | regs->gprs[2] = -EINTR; |
470 | ret = handle_signal(signr, &ka, &info, oldset, regs); | 449 | break; |
471 | if (!ret) { | 450 | case -ERESTARTSYS: |
451 | if (!(ka.sa.sa_flags & SA_RESTART)) { | ||
452 | regs->gprs[2] = -EINTR; | ||
453 | break; | ||
454 | } | ||
455 | /* fallthrough */ | ||
456 | case -ERESTARTNOINTR: | ||
457 | regs->gprs[2] = regs->orig_gpr2; | ||
458 | regs->psw.addr = | ||
459 | __rewind_psw(regs->psw, | ||
460 | regs->svc_code >> 16); | ||
461 | break; | ||
462 | } | ||
463 | /* No longer in a system call */ | ||
464 | clear_thread_flag(TIF_SYSCALL); | ||
465 | } | ||
466 | |||
467 | if ((is_compat_task() ? | ||
468 | handle_signal32(signr, &ka, &info, oldset, regs) : | ||
469 | handle_signal(signr, &ka, &info, oldset, regs)) == 0) { | ||
472 | /* | 470 | /* |
473 | * A signal was successfully delivered; the saved | 471 | * A signal was successfully delivered; the saved |
474 | * sigmask will have been stored in the signal frame, | 472 | * sigmask will have been stored in the signal frame, |
@@ -482,11 +480,32 @@ void do_signal(struct pt_regs *regs) | |||
482 | * Let tracing know that we've done the handler setup. | 480 | * Let tracing know that we've done the handler setup. |
483 | */ | 481 | */ |
484 | tracehook_signal_handler(signr, &info, &ka, regs, | 482 | tracehook_signal_handler(signr, &info, &ka, regs, |
485 | test_thread_flag(TIF_SINGLE_STEP)); | 483 | test_thread_flag(TIF_SINGLE_STEP)); |
486 | } | 484 | } |
487 | return; | 485 | return; |
488 | } | 486 | } |
489 | 487 | ||
488 | /* No handlers present - check for system call restart */ | ||
489 | if (current_thread_info()->system_call) { | ||
490 | regs->svc_code = current_thread_info()->system_call; | ||
491 | switch (regs->gprs[2]) { | ||
492 | case -ERESTART_RESTARTBLOCK: | ||
493 | /* Restart with sys_restart_syscall */ | ||
494 | regs->svc_code = __NR_restart_syscall; | ||
495 | /* fallthrough */ | ||
496 | case -ERESTARTNOHAND: | ||
497 | case -ERESTARTSYS: | ||
498 | case -ERESTARTNOINTR: | ||
499 | /* Restart system call with magic TIF bit. */ | ||
500 | regs->gprs[2] = regs->orig_gpr2; | ||
501 | set_thread_flag(TIF_SYSCALL); | ||
502 | break; | ||
503 | default: | ||
504 | clear_thread_flag(TIF_SYSCALL); | ||
505 | break; | ||
506 | } | ||
507 | } | ||
508 | |||
490 | /* | 509 | /* |
491 | * If there's no signal to deliver, we just put the saved sigmask back. | 510 | * If there's no signal to deliver, we just put the saved sigmask back. |
492 | */ | 511 | */ |
@@ -494,13 +513,6 @@ void do_signal(struct pt_regs *regs) | |||
494 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 513 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
495 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 514 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
496 | } | 515 | } |
497 | |||
498 | /* Restart a different system call. */ | ||
499 | if (retval == -ERESTART_RESTARTBLOCK | ||
500 | && regs->psw.addr == continue_addr) { | ||
501 | regs->gprs[2] = __NR_restart_syscall; | ||
502 | set_thread_flag(TIF_RESTART_SVC); | ||
503 | } | ||
504 | } | 516 | } |
505 | 517 | ||
506 | void do_notify_resume(struct pt_regs *regs) | 518 | void do_notify_resume(struct pt_regs *regs) |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6ab16ac64d29..3ea872890da2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/timex.h> | 38 | #include <linux/timex.h> |
39 | #include <linux/bootmem.h> | 39 | #include <linux/bootmem.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/crash_dump.h> | ||
41 | #include <asm/asm-offsets.h> | 42 | #include <asm/asm-offsets.h> |
42 | #include <asm/ipl.h> | 43 | #include <asm/ipl.h> |
43 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
@@ -97,6 +98,29 @@ static inline int cpu_stopped(int cpu) | |||
97 | return raw_cpu_stopped(cpu_logical_map(cpu)); | 98 | return raw_cpu_stopped(cpu_logical_map(cpu)); |
98 | } | 99 | } |
99 | 100 | ||
101 | /* | ||
102 | * Ensure that PSW restart is done on an online CPU | ||
103 | */ | ||
104 | void smp_restart_with_online_cpu(void) | ||
105 | { | ||
106 | int cpu; | ||
107 | |||
108 | for_each_online_cpu(cpu) { | ||
109 | if (stap() == __cpu_logical_map[cpu]) { | ||
110 | /* We are online: Enable DAT again and return */ | ||
111 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); | ||
112 | return; | ||
113 | } | ||
114 | } | ||
115 | /* We are not online: Do PSW restart on an online CPU */ | ||
116 | while (sigp(cpu, sigp_restart) == sigp_busy) | ||
117 | cpu_relax(); | ||
118 | /* And stop ourself */ | ||
119 | while (raw_sigp(stap(), sigp_stop) == sigp_busy) | ||
120 | cpu_relax(); | ||
121 | for (;;); | ||
122 | } | ||
123 | |||
100 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | 124 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) |
101 | { | 125 | { |
102 | struct _lowcore *lc, *current_lc; | 126 | struct _lowcore *lc, *current_lc; |
@@ -106,14 +130,16 @@ void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | |||
106 | 130 | ||
107 | if (smp_processor_id() == 0) | 131 | if (smp_processor_id() == 0) |
108 | func(data); | 132 | func(data); |
109 | __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); | 133 | __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | |
134 | PSW_MASK_EA | PSW_MASK_BA); | ||
110 | /* Disable lowcore protection */ | 135 | /* Disable lowcore protection */ |
111 | __ctl_clear_bit(0, 28); | 136 | __ctl_clear_bit(0, 28); |
112 | current_lc = lowcore_ptr[smp_processor_id()]; | 137 | current_lc = lowcore_ptr[smp_processor_id()]; |
113 | lc = lowcore_ptr[0]; | 138 | lc = lowcore_ptr[0]; |
114 | if (!lc) | 139 | if (!lc) |
115 | lc = current_lc; | 140 | lc = current_lc; |
116 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 141 | lc->restart_psw.mask = |
142 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
117 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; | 143 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; |
118 | if (!cpu_online(0)) | 144 | if (!cpu_online(0)) |
119 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); | 145 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); |
@@ -135,7 +161,7 @@ void smp_send_stop(void) | |||
135 | int cpu, rc; | 161 | int cpu, rc; |
136 | 162 | ||
137 | /* Disable all interrupts/machine checks */ | 163 | /* Disable all interrupts/machine checks */ |
138 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); | 164 | __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); |
139 | trace_hardirqs_off(); | 165 | trace_hardirqs_off(); |
140 | 166 | ||
141 | /* stop all processors */ | 167 | /* stop all processors */ |
@@ -161,7 +187,10 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, | |||
161 | { | 187 | { |
162 | unsigned long bits; | 188 | unsigned long bits; |
163 | 189 | ||
164 | kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; | 190 | if (ext_int_code == 0x1202) |
191 | kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; | ||
192 | else | ||
193 | kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; | ||
165 | /* | 194 | /* |
166 | * handle bit signal external calls | 195 | * handle bit signal external calls |
167 | */ | 196 | */ |
@@ -183,12 +212,19 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, | |||
183 | */ | 212 | */ |
184 | static void smp_ext_bitcall(int cpu, int sig) | 213 | static void smp_ext_bitcall(int cpu, int sig) |
185 | { | 214 | { |
215 | int order; | ||
216 | |||
186 | /* | 217 | /* |
187 | * Set signaling bit in lowcore of target cpu and kick it | 218 | * Set signaling bit in lowcore of target cpu and kick it |
188 | */ | 219 | */ |
189 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | 220 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
190 | while (sigp(cpu, sigp_emergency_signal) == sigp_busy) | 221 | while (1) { |
222 | order = smp_vcpu_scheduled(cpu) ? | ||
223 | sigp_external_call : sigp_emergency_signal; | ||
224 | if (sigp(cpu, order) != sigp_busy) | ||
225 | break; | ||
191 | udelay(10); | 226 | udelay(10); |
227 | } | ||
192 | } | 228 | } |
193 | 229 | ||
194 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 230 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
@@ -281,11 +317,13 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
281 | } | 317 | } |
282 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 318 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
283 | 319 | ||
284 | #ifdef CONFIG_ZFCPDUMP | 320 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) |
285 | 321 | ||
286 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | 322 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) |
287 | { | 323 | { |
288 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 324 | if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) |
325 | return; | ||
326 | if (is_kdump_kernel()) | ||
289 | return; | 327 | return; |
290 | if (cpu >= NR_CPUS) { | 328 | if (cpu >= NR_CPUS) { |
291 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " | 329 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " |
@@ -403,6 +441,18 @@ static void __init smp_detect_cpus(void) | |||
403 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 441 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
404 | if (!info) | 442 | if (!info) |
405 | panic("smp_detect_cpus failed to allocate memory\n"); | 443 | panic("smp_detect_cpus failed to allocate memory\n"); |
444 | #ifdef CONFIG_CRASH_DUMP | ||
445 | if (OLDMEM_BASE && !is_kdump_kernel()) { | ||
446 | struct save_area *save_area; | ||
447 | |||
448 | save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); | ||
449 | if (!save_area) | ||
450 | panic("could not allocate memory for save area\n"); | ||
451 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), | ||
452 | 0x200, 0); | ||
453 | zfcpdump_save_areas[0] = save_area; | ||
454 | } | ||
455 | #endif | ||
406 | /* Use sigp detection algorithm if sclp doesn't work. */ | 456 | /* Use sigp detection algorithm if sclp doesn't work. */ |
407 | if (sclp_get_cpu_info(info)) { | 457 | if (sclp_get_cpu_info(info)) { |
408 | smp_use_sigp_detection = 1; | 458 | smp_use_sigp_detection = 1; |
@@ -463,7 +513,8 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
463 | set_cpu_online(smp_processor_id(), true); | 513 | set_cpu_online(smp_processor_id(), true); |
464 | ipi_call_unlock(); | 514 | ipi_call_unlock(); |
465 | __ctl_clear_bit(0, 28); /* Disable lowcore protection */ | 515 | __ctl_clear_bit(0, 28); /* Disable lowcore protection */ |
466 | S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 516 | S390_lowcore.restart_psw.mask = |
517 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
467 | S390_lowcore.restart_psw.addr = | 518 | S390_lowcore.restart_psw.addr = |
468 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 519 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
469 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ | 520 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ |
@@ -511,7 +562,8 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
511 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); | 562 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); |
512 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 563 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
513 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 564 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
514 | lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 565 | lowcore->restart_psw.mask = |
566 | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; | ||
515 | lowcore->restart_psw.addr = | 567 | lowcore->restart_psw.addr = |
516 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 568 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; |
517 | if (user_mode != HOME_SPACE_MODE) | 569 | if (user_mode != HOME_SPACE_MODE) |
@@ -712,6 +764,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
712 | /* request the 0x1201 emergency signal external interrupt */ | 764 | /* request the 0x1201 emergency signal external interrupt */ |
713 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | 765 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) |
714 | panic("Couldn't request external interrupt 0x1201"); | 766 | panic("Couldn't request external interrupt 0x1201"); |
767 | /* request the 0x1202 external call external interrupt */ | ||
768 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) | ||
769 | panic("Couldn't request external interrupt 0x1202"); | ||
715 | 770 | ||
716 | /* Reallocate current lowcore, but keep its contents. */ | 771 | /* Reallocate current lowcore, but keep its contents. */ |
717 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); | 772 | lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); |
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index b6f9afed74ec..47df775c844d 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/pfn.h> | 9 | #include <linux/pfn.h> |
10 | #include <linux/suspend.h> | ||
10 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
11 | #include <asm/system.h> | 12 | #include <asm/system.h> |
12 | 13 | ||
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 5c9e439bf3f6..2a94b774695c 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c | |||
@@ -442,7 +442,7 @@ void s390_adjust_jiffies(void) | |||
442 | */ | 442 | */ |
443 | FP_UNPACK_SP(SA, &fmil); | 443 | FP_UNPACK_SP(SA, &fmil); |
444 | if ((info->capability >> 23) == 0) | 444 | if ((info->capability >> 23) == 0) |
445 | FP_FROM_INT_S(SB, info->capability, 32, int); | 445 | FP_FROM_INT_S(SB, (long) info->capability, 64, long); |
446 | else | 446 | else |
447 | FP_UNPACK_SP(SB, &info->capability); | 447 | FP_UNPACK_SP(SB, &info->capability); |
448 | FP_DIV_S(SR, SA, SB); | 448 | FP_DIV_S(SR, SA, SB); |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 8d65bd0383fc..ebbfab3c6e5a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <asm/timer.h> | 48 | #include <asm/timer.h> |
49 | #include <asm/etr.h> | 49 | #include <asm/etr.h> |
50 | #include <asm/cio.h> | 50 | #include <asm/cio.h> |
51 | #include "entry.h" | ||
51 | 52 | ||
52 | /* change this if you have some constant time drift */ | 53 | /* change this if you have some constant time drift */ |
53 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 54 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 0cd340b72632..77b8942b9a15 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -299,8 +299,8 @@ out: | |||
299 | } | 299 | } |
300 | __initcall(init_topology_update); | 300 | __initcall(init_topology_update); |
301 | 301 | ||
302 | static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask, | 302 | static void __init alloc_masks(struct sysinfo_15_1_x *info, |
303 | int offset) | 303 | struct mask_info *mask, int offset) |
304 | { | 304 | { |
305 | int i, nr_masks; | 305 | int i, nr_masks; |
306 | 306 | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index ffabcd9d3363..a9807dd86276 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -200,7 +200,7 @@ void show_registers(struct pt_regs *regs) | |||
200 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), | 200 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), |
201 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); | 201 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); |
202 | #ifdef CONFIG_64BIT | 202 | #ifdef CONFIG_64BIT |
203 | printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); | 203 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); |
204 | #endif | 204 | #endif |
205 | printk("\n%s GPRS: " FOURLONG, mode, | 205 | printk("\n%s GPRS: " FOURLONG, mode, |
206 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | 206 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); |
@@ -334,7 +334,8 @@ void __kprobes do_per_trap(struct pt_regs *regs) | |||
334 | info.si_signo = SIGTRAP; | 334 | info.si_signo = SIGTRAP; |
335 | info.si_errno = 0; | 335 | info.si_errno = 0; |
336 | info.si_code = TRAP_HWBKPT; | 336 | info.si_code = TRAP_HWBKPT; |
337 | info.si_addr = (void *) current->thread.per_event.address; | 337 | info.si_addr = |
338 | (void __force __user *) current->thread.per_event.address; | ||
338 | force_sig_info(SIGTRAP, &info, current); | 339 | force_sig_info(SIGTRAP, &info, current); |
339 | } | 340 | } |
340 | 341 | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 2d6228f60cd6..bb48977f5469 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -170,7 +170,8 @@ void __kprobes vtime_stop_cpu(void) | |||
170 | psw_t psw; | 170 | psw_t psw; |
171 | 171 | ||
172 | /* Wait for external, I/O or machine check interrupt. */ | 172 | /* Wait for external, I/O or machine check interrupt. */ |
173 | psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; | 173 | psw.mask = psw_kernel_bits | PSW_MASK_WAIT | |
174 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
174 | 175 | ||
175 | idle->nohz_delay = 0; | 176 | idle->nohz_delay = 0; |
176 | 177 | ||
@@ -183,7 +184,8 @@ void __kprobes vtime_stop_cpu(void) | |||
183 | * set_cpu_timer(VTIMER_MAX_SLICE); | 184 | * set_cpu_timer(VTIMER_MAX_SLICE); |
184 | * idle->idle_enter = get_clock(); | 185 | * idle->idle_enter = get_clock(); |
185 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 186 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
186 | * PSW_MASK_IO | PSW_MASK_EXT); | 187 | * PSW_MASK_DAT | PSW_MASK_IO | |
188 | * PSW_MASK_EXT | PSW_MASK_MCHECK); | ||
187 | * The difference is that the inline assembly makes sure that | 189 | * The difference is that the inline assembly makes sure that |
188 | * the last three instruction are stpt, stck and lpsw in that | 190 | * the last three instruction are stpt, stck and lpsw in that |
189 | * order. This is done to increase the precision. | 191 | * order. This is done to increase the precision. |
@@ -216,7 +218,8 @@ void __kprobes vtime_stop_cpu(void) | |||
216 | * vq->idle = get_cpu_timer(); | 218 | * vq->idle = get_cpu_timer(); |
217 | * idle->idle_enter = get_clock(); | 219 | * idle->idle_enter = get_clock(); |
218 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 220 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
219 | * PSW_MASK_IO | PSW_MASK_EXT); | 221 | * PSW_MASK_DAT | PSW_MASK_IO | |
222 | * PSW_MASK_EXT | PSW_MASK_MCHECK); | ||
220 | * The difference is that the inline assembly makes sure that | 223 | * The difference is that the inline assembly makes sure that |
221 | * the last three instruction are stpt, stck and lpsw in that | 224 | * the last three instruction are stpt, stck and lpsw in that |
222 | * order. This is done to increase the precision. | 225 | * order. This is done to increase the precision. |
@@ -458,7 +461,7 @@ void add_virt_timer_periodic(void *new) | |||
458 | } | 461 | } |
459 | EXPORT_SYMBOL(add_virt_timer_periodic); | 462 | EXPORT_SYMBOL(add_virt_timer_periodic); |
460 | 463 | ||
461 | int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) | 464 | static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) |
462 | { | 465 | { |
463 | struct vtimer_queue *vq; | 466 | struct vtimer_queue *vq; |
464 | unsigned long flags; | 467 | unsigned long flags; |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 9e4c84187cf5..87cedd61be04 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * diag.c - handling diagnose instructions | 2 | * diag.c - handling diagnose instructions |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008 | 4 | * Copyright IBM Corp. 2008,2011 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -15,6 +15,34 @@ | |||
15 | #include <linux/kvm_host.h> | 15 | #include <linux/kvm_host.h> |
16 | #include "kvm-s390.h" | 16 | #include "kvm-s390.h" |
17 | 17 | ||
18 | static int diag_release_pages(struct kvm_vcpu *vcpu) | ||
19 | { | ||
20 | unsigned long start, end; | ||
21 | unsigned long prefix = vcpu->arch.sie_block->prefix; | ||
22 | |||
23 | start = vcpu->arch.guest_gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; | ||
24 | end = vcpu->arch.guest_gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; | ||
25 | |||
26 | if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end | ||
27 | || start < 2 * PAGE_SIZE) | ||
28 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
29 | |||
30 | VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); | ||
31 | vcpu->stat.diagnose_10++; | ||
32 | |||
33 | /* we checked for start > end above */ | ||
34 | if (end < prefix || start >= prefix + 2 * PAGE_SIZE) { | ||
35 | gmap_discard(start, end, vcpu->arch.gmap); | ||
36 | } else { | ||
37 | if (start < prefix) | ||
38 | gmap_discard(start, prefix, vcpu->arch.gmap); | ||
39 | if (end >= prefix) | ||
40 | gmap_discard(prefix + 2 * PAGE_SIZE, | ||
41 | end, vcpu->arch.gmap); | ||
42 | } | ||
43 | return 0; | ||
44 | } | ||
45 | |||
18 | static int __diag_time_slice_end(struct kvm_vcpu *vcpu) | 46 | static int __diag_time_slice_end(struct kvm_vcpu *vcpu) |
19 | { | 47 | { |
20 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); | 48 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); |
@@ -57,6 +85,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) | |||
57 | int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; | 85 | int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; |
58 | 86 | ||
59 | switch (code) { | 87 | switch (code) { |
88 | case 0x10: | ||
89 | return diag_release_pages(vcpu); | ||
60 | case 0x44: | 90 | case 0x44: |
61 | return __diag_time_slice_end(vcpu); | 91 | return __diag_time_slice_end(vcpu); |
62 | case 0x308: | 92 | case 0x308: |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 9610ba41b974..0bd3bea1e4cd 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -71,6 +71,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
71 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, | 71 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, |
72 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, | 72 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, |
73 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, | 73 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, |
74 | { "diagnose_10", VCPU_STAT(diagnose_10) }, | ||
74 | { "diagnose_44", VCPU_STAT(diagnose_44) }, | 75 | { "diagnose_44", VCPU_STAT(diagnose_44) }, |
75 | { NULL } | 76 | { NULL } |
76 | }; | 77 | }; |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index a65229d91c92..db92f044024c 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -32,7 +32,8 @@ static void __udelay_disabled(unsigned long long usecs) | |||
32 | u64 clock_saved; | 32 | u64 clock_saved; |
33 | u64 end; | 33 | u64 end; |
34 | 34 | ||
35 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | 35 | mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_WAIT | |
36 | PSW_MASK_EXT | PSW_MASK_MCHECK; | ||
36 | end = get_clock() + (usecs << 12); | 37 | end = get_clock() + (usecs << 12); |
37 | clock_saved = local_tick_disable(); | 38 | clock_saved = local_tick_disable(); |
38 | __ctl_store(cr0_saved, 0, 0); | 39 | __ctl_store(cr0_saved, 0, 0); |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 74833831417f..342ae35a5ba9 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -342,7 +342,8 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) | |||
342 | if (segment_eq(get_fs(), KERNEL_DS)) | 342 | if (segment_eq(get_fs(), KERNEL_DS)) |
343 | return __futex_atomic_op_pt(op, uaddr, oparg, old); | 343 | return __futex_atomic_op_pt(op, uaddr, oparg, old); |
344 | spin_lock(¤t->mm->page_table_lock); | 344 | spin_lock(¤t->mm->page_table_lock); |
345 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 345 | uaddr = (u32 __force __user *) |
346 | __dat_user_addr((__force unsigned long) uaddr); | ||
346 | if (!uaddr) { | 347 | if (!uaddr) { |
347 | spin_unlock(¤t->mm->page_table_lock); | 348 | spin_unlock(¤t->mm->page_table_lock); |
348 | return -EFAULT; | 349 | return -EFAULT; |
@@ -378,7 +379,8 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, | |||
378 | if (segment_eq(get_fs(), KERNEL_DS)) | 379 | if (segment_eq(get_fs(), KERNEL_DS)) |
379 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); | 380 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); |
380 | spin_lock(¤t->mm->page_table_lock); | 381 | spin_lock(¤t->mm->page_table_lock); |
381 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 382 | uaddr = (u32 __force __user *) |
383 | __dat_user_addr((__force unsigned long) uaddr); | ||
382 | if (!uaddr) { | 384 | if (!uaddr) { |
383 | spin_unlock(¤t->mm->page_table_lock); | 385 | spin_unlock(¤t->mm->page_table_lock); |
384 | return -EFAULT; | 386 | return -EFAULT; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 9564fc779b27..1766def5bc3f 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -307,7 +307,7 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
307 | 307 | ||
308 | #ifdef CONFIG_PGSTE | 308 | #ifdef CONFIG_PGSTE |
309 | if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { | 309 | if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { |
310 | address = gmap_fault(address, | 310 | address = __gmap_fault(address, |
311 | (struct gmap *) S390_lowcore.gmap); | 311 | (struct gmap *) S390_lowcore.gmap); |
312 | if (address == -EFAULT) { | 312 | if (address == -EFAULT) { |
313 | fault = VM_FAULT_BADMAP; | 313 | fault = VM_FAULT_BADMAP; |
@@ -393,7 +393,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, | |||
393 | int fault; | 393 | int fault; |
394 | 394 | ||
395 | /* Protection exception is suppressing, decrement psw address. */ | 395 | /* Protection exception is suppressing, decrement psw address. */ |
396 | regs->psw.addr -= (pgm_int_code >> 16); | 396 | regs->psw.addr = __rewind_psw(regs->psw, pgm_int_code >> 16); |
397 | /* | 397 | /* |
398 | * Check for low-address protection. This needs to be treated | 398 | * Check for low-address protection. This needs to be treated |
399 | * as a special case because the translation exception code | 399 | * as a special case because the translation exception code |
@@ -454,7 +454,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | |||
454 | struct pt_regs regs; | 454 | struct pt_regs regs; |
455 | int access, fault; | 455 | int access, fault; |
456 | 456 | ||
457 | regs.psw.mask = psw_kernel_bits; | 457 | regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; |
458 | if (!irqs_disabled()) | 458 | if (!irqs_disabled()) |
459 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; | 459 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; |
460 | regs.psw.addr = (unsigned long) __builtin_return_address(0); | 460 | regs.psw.addr = (unsigned long) __builtin_return_address(0); |
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 5dbbaa6e594c..1cb8427bedfb 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/gfp.h> | ||
14 | #include <asm/system.h> | 15 | #include <asm/system.h> |
15 | 16 | ||
16 | /* | 17 | /* |
@@ -60,6 +61,9 @@ long probe_kernel_write(void *dst, const void *src, size_t size) | |||
60 | return copied < 0 ? -EFAULT : 0; | 61 | return copied < 0 ? -EFAULT : 0; |
61 | } | 62 | } |
62 | 63 | ||
64 | /* | ||
65 | * Copy memory in real mode (kernel to kernel) | ||
66 | */ | ||
63 | int memcpy_real(void *dest, void *src, size_t count) | 67 | int memcpy_real(void *dest, void *src, size_t count) |
64 | { | 68 | { |
65 | register unsigned long _dest asm("2") = (unsigned long) dest; | 69 | register unsigned long _dest asm("2") = (unsigned long) dest; |
@@ -101,3 +105,55 @@ void copy_to_absolute_zero(void *dest, void *src, size_t count) | |||
101 | __ctl_load(cr0, 0, 0); | 105 | __ctl_load(cr0, 0, 0); |
102 | preempt_enable(); | 106 | preempt_enable(); |
103 | } | 107 | } |
108 | |||
109 | /* | ||
110 | * Copy memory from kernel (real) to user (virtual) | ||
111 | */ | ||
112 | int copy_to_user_real(void __user *dest, void *src, size_t count) | ||
113 | { | ||
114 | int offs = 0, size, rc; | ||
115 | char *buf; | ||
116 | |||
117 | buf = (char *) __get_free_page(GFP_KERNEL); | ||
118 | if (!buf) | ||
119 | return -ENOMEM; | ||
120 | rc = -EFAULT; | ||
121 | while (offs < count) { | ||
122 | size = min(PAGE_SIZE, count - offs); | ||
123 | if (memcpy_real(buf, src + offs, size)) | ||
124 | goto out; | ||
125 | if (copy_to_user(dest + offs, buf, size)) | ||
126 | goto out; | ||
127 | offs += size; | ||
128 | } | ||
129 | rc = 0; | ||
130 | out: | ||
131 | free_page((unsigned long) buf); | ||
132 | return rc; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Copy memory from user (virtual) to kernel (real) | ||
137 | */ | ||
138 | int copy_from_user_real(void *dest, void __user *src, size_t count) | ||
139 | { | ||
140 | int offs = 0, size, rc; | ||
141 | char *buf; | ||
142 | |||
143 | buf = (char *) __get_free_page(GFP_KERNEL); | ||
144 | if (!buf) | ||
145 | return -ENOMEM; | ||
146 | rc = -EFAULT; | ||
147 | while (offs < count) { | ||
148 | size = min(PAGE_SIZE, count - offs); | ||
149 | if (copy_from_user(buf, src + offs, size)) | ||
150 | goto out; | ||
151 | if (memcpy_real(dest + offs, buf, size)) | ||
152 | goto out; | ||
153 | offs += size; | ||
154 | } | ||
155 | rc = 0; | ||
156 | out: | ||
157 | free_page((unsigned long) buf); | ||
158 | return rc; | ||
159 | } | ||
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index c9a9f7f18188..f09c74881b7e 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/personality.h> | 27 | #include <linux/personality.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/mman.h> | ||
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/random.h> | 31 | #include <linux/random.h> |
31 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index d013ed39743b..b36537a5f43e 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/hugetlb.h> | 7 | #include <linux/hugetlb.h> |
8 | #include <asm/cacheflush.h> | ||
8 | #include <asm/pgtable.h> | 9 | #include <asm/pgtable.h> |
9 | 10 | ||
10 | static void change_page_attr(unsigned long addr, int numpages, | 11 | static void change_page_attr(unsigned long addr, int numpages, |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5d56c2b95b14..301c84d3b542 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2007,2009 | 2 | * Copyright IBM Corp. 2007,2011 |
3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -222,6 +222,7 @@ void gmap_free(struct gmap *gmap) | |||
222 | 222 | ||
223 | /* Free all segment & region tables. */ | 223 | /* Free all segment & region tables. */ |
224 | down_read(&gmap->mm->mmap_sem); | 224 | down_read(&gmap->mm->mmap_sem); |
225 | spin_lock(&gmap->mm->page_table_lock); | ||
225 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { | 226 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { |
226 | table = (unsigned long *) page_to_phys(page); | 227 | table = (unsigned long *) page_to_phys(page); |
227 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) | 228 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) |
@@ -230,6 +231,7 @@ void gmap_free(struct gmap *gmap) | |||
230 | gmap_unlink_segment(gmap, table); | 231 | gmap_unlink_segment(gmap, table); |
231 | __free_pages(page, ALLOC_ORDER); | 232 | __free_pages(page, ALLOC_ORDER); |
232 | } | 233 | } |
234 | spin_unlock(&gmap->mm->page_table_lock); | ||
233 | up_read(&gmap->mm->mmap_sem); | 235 | up_read(&gmap->mm->mmap_sem); |
234 | list_del(&gmap->list); | 236 | list_del(&gmap->list); |
235 | kfree(gmap); | 237 | kfree(gmap); |
@@ -256,6 +258,9 @@ void gmap_disable(struct gmap *gmap) | |||
256 | } | 258 | } |
257 | EXPORT_SYMBOL_GPL(gmap_disable); | 259 | EXPORT_SYMBOL_GPL(gmap_disable); |
258 | 260 | ||
261 | /* | ||
262 | * gmap_alloc_table is assumed to be called with mmap_sem held | ||
263 | */ | ||
259 | static int gmap_alloc_table(struct gmap *gmap, | 264 | static int gmap_alloc_table(struct gmap *gmap, |
260 | unsigned long *table, unsigned long init) | 265 | unsigned long *table, unsigned long init) |
261 | { | 266 | { |
@@ -267,14 +272,12 @@ static int gmap_alloc_table(struct gmap *gmap, | |||
267 | return -ENOMEM; | 272 | return -ENOMEM; |
268 | new = (unsigned long *) page_to_phys(page); | 273 | new = (unsigned long *) page_to_phys(page); |
269 | crst_table_init(new, init); | 274 | crst_table_init(new, init); |
270 | down_read(&gmap->mm->mmap_sem); | ||
271 | if (*table & _REGION_ENTRY_INV) { | 275 | if (*table & _REGION_ENTRY_INV) { |
272 | list_add(&page->lru, &gmap->crst_list); | 276 | list_add(&page->lru, &gmap->crst_list); |
273 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | | 277 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | |
274 | (*table & _REGION_ENTRY_TYPE_MASK); | 278 | (*table & _REGION_ENTRY_TYPE_MASK); |
275 | } else | 279 | } else |
276 | __free_pages(page, ALLOC_ORDER); | 280 | __free_pages(page, ALLOC_ORDER); |
277 | up_read(&gmap->mm->mmap_sem); | ||
278 | return 0; | 281 | return 0; |
279 | } | 282 | } |
280 | 283 | ||
@@ -299,6 +302,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
299 | 302 | ||
300 | flush = 0; | 303 | flush = 0; |
301 | down_read(&gmap->mm->mmap_sem); | 304 | down_read(&gmap->mm->mmap_sem); |
305 | spin_lock(&gmap->mm->page_table_lock); | ||
302 | for (off = 0; off < len; off += PMD_SIZE) { | 306 | for (off = 0; off < len; off += PMD_SIZE) { |
303 | /* Walk the guest addr space page table */ | 307 | /* Walk the guest addr space page table */ |
304 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 308 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
@@ -320,6 +324,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
320 | *table = _SEGMENT_ENTRY_INV; | 324 | *table = _SEGMENT_ENTRY_INV; |
321 | } | 325 | } |
322 | out: | 326 | out: |
327 | spin_unlock(&gmap->mm->page_table_lock); | ||
323 | up_read(&gmap->mm->mmap_sem); | 328 | up_read(&gmap->mm->mmap_sem); |
324 | if (flush) | 329 | if (flush) |
325 | gmap_flush_tlb(gmap); | 330 | gmap_flush_tlb(gmap); |
@@ -350,6 +355,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, | |||
350 | 355 | ||
351 | flush = 0; | 356 | flush = 0; |
352 | down_read(&gmap->mm->mmap_sem); | 357 | down_read(&gmap->mm->mmap_sem); |
358 | spin_lock(&gmap->mm->page_table_lock); | ||
353 | for (off = 0; off < len; off += PMD_SIZE) { | 359 | for (off = 0; off < len; off += PMD_SIZE) { |
354 | /* Walk the gmap address space page table */ | 360 | /* Walk the gmap address space page table */ |
355 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 361 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
@@ -373,19 +379,24 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, | |||
373 | flush |= gmap_unlink_segment(gmap, table); | 379 | flush |= gmap_unlink_segment(gmap, table); |
374 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); | 380 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); |
375 | } | 381 | } |
382 | spin_unlock(&gmap->mm->page_table_lock); | ||
376 | up_read(&gmap->mm->mmap_sem); | 383 | up_read(&gmap->mm->mmap_sem); |
377 | if (flush) | 384 | if (flush) |
378 | gmap_flush_tlb(gmap); | 385 | gmap_flush_tlb(gmap); |
379 | return 0; | 386 | return 0; |
380 | 387 | ||
381 | out_unmap: | 388 | out_unmap: |
389 | spin_unlock(&gmap->mm->page_table_lock); | ||
382 | up_read(&gmap->mm->mmap_sem); | 390 | up_read(&gmap->mm->mmap_sem); |
383 | gmap_unmap_segment(gmap, to, len); | 391 | gmap_unmap_segment(gmap, to, len); |
384 | return -ENOMEM; | 392 | return -ENOMEM; |
385 | } | 393 | } |
386 | EXPORT_SYMBOL_GPL(gmap_map_segment); | 394 | EXPORT_SYMBOL_GPL(gmap_map_segment); |
387 | 395 | ||
388 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | 396 | /* |
397 | * this function is assumed to be called with mmap_sem held | ||
398 | */ | ||
399 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | ||
389 | { | 400 | { |
390 | unsigned long *table, vmaddr, segment; | 401 | unsigned long *table, vmaddr, segment; |
391 | struct mm_struct *mm; | 402 | struct mm_struct *mm; |
@@ -445,16 +456,75 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | |||
445 | page = pmd_page(*pmd); | 456 | page = pmd_page(*pmd); |
446 | mp = (struct gmap_pgtable *) page->index; | 457 | mp = (struct gmap_pgtable *) page->index; |
447 | rmap->entry = table; | 458 | rmap->entry = table; |
459 | spin_lock(&mm->page_table_lock); | ||
448 | list_add(&rmap->list, &mp->mapper); | 460 | list_add(&rmap->list, &mp->mapper); |
461 | spin_unlock(&mm->page_table_lock); | ||
449 | /* Set gmap segment table entry to page table. */ | 462 | /* Set gmap segment table entry to page table. */ |
450 | *table = pmd_val(*pmd) & PAGE_MASK; | 463 | *table = pmd_val(*pmd) & PAGE_MASK; |
451 | return vmaddr | (address & ~PMD_MASK); | 464 | return vmaddr | (address & ~PMD_MASK); |
452 | } | 465 | } |
453 | return -EFAULT; | 466 | return -EFAULT; |
467 | } | ||
454 | 468 | ||
469 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | ||
470 | { | ||
471 | unsigned long rc; | ||
472 | |||
473 | down_read(&gmap->mm->mmap_sem); | ||
474 | rc = __gmap_fault(address, gmap); | ||
475 | up_read(&gmap->mm->mmap_sem); | ||
476 | |||
477 | return rc; | ||
455 | } | 478 | } |
456 | EXPORT_SYMBOL_GPL(gmap_fault); | 479 | EXPORT_SYMBOL_GPL(gmap_fault); |
457 | 480 | ||
481 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) | ||
482 | { | ||
483 | |||
484 | unsigned long *table, address, size; | ||
485 | struct vm_area_struct *vma; | ||
486 | struct gmap_pgtable *mp; | ||
487 | struct page *page; | ||
488 | |||
489 | down_read(&gmap->mm->mmap_sem); | ||
490 | address = from; | ||
491 | while (address < to) { | ||
492 | /* Walk the gmap address space page table */ | ||
493 | table = gmap->table + ((address >> 53) & 0x7ff); | ||
494 | if (unlikely(*table & _REGION_ENTRY_INV)) { | ||
495 | address = (address + PMD_SIZE) & PMD_MASK; | ||
496 | continue; | ||
497 | } | ||
498 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
499 | table = table + ((address >> 42) & 0x7ff); | ||
500 | if (unlikely(*table & _REGION_ENTRY_INV)) { | ||
501 | address = (address + PMD_SIZE) & PMD_MASK; | ||
502 | continue; | ||
503 | } | ||
504 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
505 | table = table + ((address >> 31) & 0x7ff); | ||
506 | if (unlikely(*table & _REGION_ENTRY_INV)) { | ||
507 | address = (address + PMD_SIZE) & PMD_MASK; | ||
508 | continue; | ||
509 | } | ||
510 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
511 | table = table + ((address >> 20) & 0x7ff); | ||
512 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) { | ||
513 | address = (address + PMD_SIZE) & PMD_MASK; | ||
514 | continue; | ||
515 | } | ||
516 | page = pfn_to_page(*table >> PAGE_SHIFT); | ||
517 | mp = (struct gmap_pgtable *) page->index; | ||
518 | vma = find_vma(gmap->mm, mp->vmaddr); | ||
519 | size = min(to - address, PMD_SIZE - (address & ~PMD_MASK)); | ||
520 | zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK), | ||
521 | size, NULL); | ||
522 | address = (address + PMD_SIZE) & PMD_MASK; | ||
523 | } | ||
524 | up_read(&gmap->mm->mmap_sem); | ||
525 | } | ||
526 | EXPORT_SYMBOL_GPL(gmap_discard); | ||
527 | |||
458 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) | 528 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) |
459 | { | 529 | { |
460 | struct gmap_rmap *rmap, *next; | 530 | struct gmap_rmap *rmap, *next; |
@@ -662,8 +732,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) | |||
662 | 732 | ||
663 | void __tlb_remove_table(void *_table) | 733 | void __tlb_remove_table(void *_table) |
664 | { | 734 | { |
665 | void *table = (void *)((unsigned long) _table & PAGE_MASK); | 735 | const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; |
666 | unsigned type = (unsigned long) _table & ~PAGE_MASK; | 736 | void *table = (void *)((unsigned long) _table & ~mask); |
737 | unsigned type = (unsigned long) _table & mask; | ||
667 | 738 | ||
668 | if (type) | 739 | if (type) |
669 | __page_table_free_rcu(table, type); | 740 | __page_table_free_rcu(table, type); |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 781ff5169560..4799383e2df9 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -335,6 +335,9 @@ void __init vmem_map_init(void) | |||
335 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; | 335 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; |
336 | ro_end = PFN_ALIGN((unsigned long)&_eshared); | 336 | ro_end = PFN_ALIGN((unsigned long)&_eshared); |
337 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 337 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
338 | if (memory_chunk[i].type == CHUNK_CRASHK || | ||
339 | memory_chunk[i].type == CHUNK_OLDMEM) | ||
340 | continue; | ||
338 | start = memory_chunk[i].addr; | 341 | start = memory_chunk[i].addr; |
339 | end = memory_chunk[i].addr + memory_chunk[i].size; | 342 | end = memory_chunk[i].addr + memory_chunk[i].size; |
340 | if (start >= ro_end || end <= ro_start) | 343 | if (start >= ro_end || end <= ro_start) |
@@ -368,6 +371,9 @@ static int __init vmem_convert_memory_chunk(void) | |||
368 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 371 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
369 | if (!memory_chunk[i].size) | 372 | if (!memory_chunk[i].size) |
370 | continue; | 373 | continue; |
374 | if (memory_chunk[i].type == CHUNK_CRASHK || | ||
375 | memory_chunk[i].type == CHUNK_OLDMEM) | ||
376 | continue; | ||
371 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | 377 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
372 | if (!seg) | 378 | if (!seg) |
373 | panic("Out of memory...\n"); | 379 | panic("Out of memory...\n"); |
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 4552ce40c81a..f43c0e4282af 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -994,7 +994,7 @@ allocate_error: | |||
994 | * | 994 | * |
995 | * Returns 0 on success, !0 on failure. | 995 | * Returns 0 on success, !0 on failure. |
996 | */ | 996 | */ |
997 | int hwsampler_deallocate() | 997 | int hwsampler_deallocate(void) |
998 | { | 998 | { |
999 | int rc; | 999 | int rc; |
1000 | 1000 | ||
@@ -1035,7 +1035,7 @@ unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) | |||
1035 | return cb->sample_overflow; | 1035 | return cb->sample_overflow; |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | int hwsampler_setup() | 1038 | int hwsampler_setup(void) |
1039 | { | 1039 | { |
1040 | int rc; | 1040 | int rc; |
1041 | int cpu; | 1041 | int cpu; |
@@ -1102,7 +1102,7 @@ setup_exit: | |||
1102 | return rc; | 1102 | return rc; |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | int hwsampler_shutdown() | 1105 | int hwsampler_shutdown(void) |
1106 | { | 1106 | { |
1107 | int rc; | 1107 | int rc; |
1108 | 1108 | ||
@@ -1203,7 +1203,7 @@ start_all_exit: | |||
1203 | * | 1203 | * |
1204 | * Returns 0 on success, !0 on failure. | 1204 | * Returns 0 on success, !0 on failure. |
1205 | */ | 1205 | */ |
1206 | int hwsampler_stop_all() | 1206 | int hwsampler_stop_all(void) |
1207 | { | 1207 | { |
1208 | int tmp_rc, rc, cpu; | 1208 | int tmp_rc, rc, cpu; |
1209 | struct hws_cpu_buffer *cb; | 1209 | struct hws_cpu_buffer *cb; |