aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 19:14:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 19:14:20 -0400
commit32087d4eeca14b82660dab288b1d659963b954bd (patch)
tree8c131ca9bf08f88d3b02e1937b795a42f8951d79
parentb1c907f3b2675ecb01e340948fc62d6535ff5ac3 (diff)
parent07ea815b22b9f70ec8de6ddf8db63a1dd1585caf (diff)
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (54 commits) [S390] Remove error checking from copy_oldmem_page() [S390] qdio: prevent dsci access without adapter interrupts [S390] irqstats: split IPI interrupt accounting [S390] add missing __tlb_flush_global() for !CONFIG_SMP [S390] sparse: fix sparse symbol shadow warning [S390] sparse: fix sparse NULL pointer warnings [S390] sparse: fix sparse warnings with __user pointers [S390] sparse: fix sparse warnings in math-emu [S390] sparse: fix sparse warnings about missing prototypes [S390] sparse: fix sparse ANSI-C warnings [S390] sparse: fix sparse static warnings [S390] sparse: fix access past end of array warnings [S390] dasd: prevent path verification before resume [S390] qdio: remove multicast polling [S390] qdio: reset outbound SBAL error states [S390] qdio: EQBS retry after CCQ 96 [S390] qdio: add timestamp for last queue scan time [S390] Introduce get_clock_fast() [S390] kvm: Handle diagnose 0x10 (release pages) [S390] take mmap_sem when walking guest page table ...
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--arch/s390/Kconfig10
-rw-r--r--arch/s390/boot/compressed/misc.c2
-rw-r--r--arch/s390/defconfig2
-rw-r--r--arch/s390/include/asm/ccwdev.h3
-rw-r--r--arch/s390/include/asm/compat.h13
-rw-r--r--arch/s390/include/asm/ipl.h1
-rw-r--r--arch/s390/include/asm/irq.h6
-rw-r--r--arch/s390/include/asm/kexec.h6
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/lowcore.h10
-rw-r--r--arch/s390/include/asm/page.h1
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/processor.h48
-rw-r--r--arch/s390/include/asm/ptrace.h29
-rw-r--r--arch/s390/include/asm/reset.h2
-rw-r--r--arch/s390/include/asm/setup.h13
-rw-r--r--arch/s390/include/asm/sfp-util.h2
-rw-r--r--arch/s390/include/asm/smp.h5
-rw-r--r--arch/s390/include/asm/spinlock.h2
-rw-r--r--arch/s390/include/asm/syscall.h6
-rw-r--r--arch/s390/include/asm/system.h10
-rw-r--r--arch/s390/include/asm/thread_info.h7
-rw-r--r--arch/s390/include/asm/timex.h11
-rw-r--r--arch/s390/include/asm/tlbflush.h1
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/base.S6
-rw-r--r--arch/s390/kernel/compat_linux.c32
-rw-r--r--arch/s390/kernel/compat_signal.c71
-rw-r--r--arch/s390/kernel/compat_wrapper.S3
-rw-r--r--arch/s390/kernel/crash_dump.c426
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.S84
-rw-r--r--arch/s390/kernel/entry.h13
-rw-r--r--arch/s390/kernel/entry64.S86
-rw-r--r--arch/s390/kernel/head.S22
-rw-r--r--arch/s390/kernel/head31.S2
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/head_kdump.S119
-rw-r--r--arch/s390/kernel/ipl.c26
-rw-r--r--arch/s390/kernel/irq.c6
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/machine_kexec.c192
-rw-r--r--arch/s390/kernel/mem_detect.c69
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/ptrace.c124
-rw-r--r--arch/s390/kernel/reipl.S6
-rw-r--r--arch/s390/kernel/reipl64.S9
-rw-r--r--arch/s390/kernel/setup.c275
-rw-r--r--arch/s390/kernel/signal.c128
-rw-r--r--arch/s390/kernel/smp.c73
-rw-r--r--arch/s390/kernel/suspend.c1
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/topology.c4
-rw-r--r--arch/s390/kernel/traps.c5
-rw-r--r--arch/s390/kernel/vtime.c11
-rw-r--r--arch/s390/kvm/diag.c32
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/s390/lib/delay.c3
-rw-r--r--arch/s390/lib/uaccess_pt.c6
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/s390/mm/maccess.c56
-rw-r--r--arch/s390/mm/mmap.c1
-rw-r--r--arch/s390/mm/pageattr.c1
-rw-r--r--arch/s390/mm/pgtable.c83
-rw-r--r--arch/s390/mm/vmem.c6
-rw-r--r--arch/s390/oprofile/hwsampler.c8
-rw-r--r--drivers/s390/block/dasd.c19
-rw-r--r--drivers/s390/block/dasd_eckd.c45
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/char/con3215.c3
-rw-r--r--drivers/s390/char/raw3270.c3
-rw-r--r--drivers/s390/char/sclp_cmd.c4
-rw-r--r--drivers/s390/char/sclp_quiesce.c3
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c1
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmur.c3
-rw-r--r--drivers/s390/char/zcore.c21
-rw-r--r--drivers/s390/cio/ccwgroup.c333
-rw-r--r--drivers/s390/cio/ccwreq.c23
-rw-r--r--drivers/s390/cio/chsc_sch.c5
-rw-r--r--drivers/s390/cio/cio.c19
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device.h13
-rw-r--r--drivers/s390/cio/io_sch.h2
-rw-r--r--drivers/s390/cio/qdio.h40
-rw-r--r--drivers/s390/cio/qdio_debug.c10
-rw-r--r--drivers/s390/cio/qdio_main.c102
-rw-r--r--drivers/s390/cio/qdio_thinint.c52
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/net/claw.c3
-rw-r--r--drivers/s390/net/ctcm_main.c3
-rw-r--r--drivers/s390/net/ctcm_sysfs.c2
-rw-r--r--drivers/s390/net/lcs.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--include/linux/crash_dump.h1
-rw-r--r--include/linux/elf.h1
-rw-r--r--include/linux/kexec.h10
-rw-r--r--kernel/crash_dump.c11
-rw-r--r--kernel/kexec.c41
-rw-r--r--kernel/sysctl.c8
107 files changed, 2236 insertions, 796 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 27e0488d54d2..661efd4dab6b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -741,10 +741,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
741 See Documentation/block/cfq-iosched.txt and 741 See Documentation/block/cfq-iosched.txt and
742 Documentation/block/deadline-iosched.txt for details. 742 Documentation/block/deadline-iosched.txt for details.
743 743
744 elfcorehdr= [IA-64,PPC,SH,X86] 744 elfcorehdr=[size[KMG]@]offset[KMG] [IA64,PPC,SH,X86,S390]
745 Specifies physical address of start of kernel core 745 Specifies physical address of start of kernel core
746 image elf header. Generally kexec loader will 746 image elf header and optionally the size. Generally
747 pass this option to capture kernel. 747 kexec loader will pass this option to capture kernel.
748 See Documentation/kdump/kdump.txt for details. 748 See Documentation/kdump/kdump.txt for details.
749 749
750 enable_mtrr_cleanup [X86] 750 enable_mtrr_cleanup [X86]
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 6b99fc3f9b63..a9fbd43395f7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -569,6 +569,16 @@ config KEXEC
569 current kernel, and to start another kernel. It is like a reboot 569 current kernel, and to start another kernel. It is like a reboot
570 but is independent of hardware/microcode support. 570 but is independent of hardware/microcode support.
571 571
572config CRASH_DUMP
573 bool "kernel crash dumps"
574 depends on 64BIT
575 help
576 Generate crash dump after being started by kexec.
577 Crash dump kernels are loaded in the main kernel with kexec-tools
578 into a specially reserved region and then later executed after
579 a crash by kdump/kexec.
580 For more details see Documentation/kdump/kdump.txt
581
572config ZFCPDUMP 582config ZFCPDUMP
573 def_bool n 583 def_bool n
574 prompt "zfcpdump support" 584 prompt "zfcpdump support"
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 028f23ea81d1..465eca756feb 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -61,7 +61,7 @@ static unsigned long free_mem_end_ptr;
61 61
62extern _sclp_print_early(const char *); 62extern _sclp_print_early(const char *);
63 63
64int puts(const char *s) 64static int puts(const char *s)
65{ 65{
66 _sclp_print_early(s); 66 _sclp_print_early(s);
67 return 0; 67 return 0;
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 29c82c640a88..6cf8e26b3137 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -68,7 +68,7 @@ CONFIG_NET_CLS_RSVP6=m
68CONFIG_NET_CLS_ACT=y 68CONFIG_NET_CLS_ACT=y
69CONFIG_NET_ACT_POLICE=y 69CONFIG_NET_ACT_POLICE=y
70CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 70CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
71# CONFIG_FIRMWARE_IN_KERNEL is not set 71CONFIG_DEVTMPFS=y
72CONFIG_BLK_DEV_LOOP=m 72CONFIG_BLK_DEV_LOOP=m
73CONFIG_BLK_DEV_NBD=m 73CONFIG_BLK_DEV_NBD=m
74CONFIG_BLK_DEV_RAM=y 74CONFIG_BLK_DEV_RAM=y
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 623f2fb71774..9381c92cc779 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -11,6 +11,7 @@
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/mod_devicetable.h> 12#include <linux/mod_devicetable.h>
13#include <asm/fcx.h> 13#include <asm/fcx.h>
14#include <asm/irq.h>
14 15
15/* structs from asm/cio.h */ 16/* structs from asm/cio.h */
16struct irb; 17struct irb;
@@ -127,6 +128,7 @@ enum uc_todo {
127 * @restore: callback for restoring after hibernation 128 * @restore: callback for restoring after hibernation
128 * @uc_handler: callback for unit check handler 129 * @uc_handler: callback for unit check handler
129 * @driver: embedded device driver structure 130 * @driver: embedded device driver structure
131 * @int_class: interruption class to use for accounting interrupts
130 */ 132 */
131struct ccw_driver { 133struct ccw_driver {
132 struct ccw_device_id *ids; 134 struct ccw_device_id *ids;
@@ -144,6 +146,7 @@ struct ccw_driver {
144 int (*restore)(struct ccw_device *); 146 int (*restore)(struct ccw_device *);
145 enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); 147 enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
146 struct device_driver driver; 148 struct device_driver driver;
149 enum interruption_class int_class;
147}; 150};
148 151
149extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 152extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index cdb9b78f6c08..2e49748b27da 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -12,6 +12,7 @@
12#define PSW32_MASK_IO 0x02000000UL 12#define PSW32_MASK_IO 0x02000000UL
13#define PSW32_MASK_EXT 0x01000000UL 13#define PSW32_MASK_EXT 0x01000000UL
14#define PSW32_MASK_KEY 0x00F00000UL 14#define PSW32_MASK_KEY 0x00F00000UL
15#define PSW32_MASK_BASE 0x00080000UL /* Always one */
15#define PSW32_MASK_MCHECK 0x00040000UL 16#define PSW32_MASK_MCHECK 0x00040000UL
16#define PSW32_MASK_WAIT 0x00020000UL 17#define PSW32_MASK_WAIT 0x00020000UL
17#define PSW32_MASK_PSTATE 0x00010000UL 18#define PSW32_MASK_PSTATE 0x00010000UL
@@ -19,21 +20,19 @@
19#define PSW32_MASK_CC 0x00003000UL 20#define PSW32_MASK_CC 0x00003000UL
20#define PSW32_MASK_PM 0x00000f00UL 21#define PSW32_MASK_PM 0x00000f00UL
21 22
22#define PSW32_ADDR_AMODE31 0x80000000UL 23#define PSW32_MASK_USER 0x00003F00UL
24
25#define PSW32_ADDR_AMODE 0x80000000UL
23#define PSW32_ADDR_INSN 0x7FFFFFFFUL 26#define PSW32_ADDR_INSN 0x7FFFFFFFUL
24 27
25#define PSW32_BASE_BITS 0x00080000UL 28#define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20)
26 29
27#define PSW32_ASC_PRIMARY 0x00000000UL 30#define PSW32_ASC_PRIMARY 0x00000000UL
28#define PSW32_ASC_ACCREG 0x00004000UL 31#define PSW32_ASC_ACCREG 0x00004000UL
29#define PSW32_ASC_SECONDARY 0x00008000UL 32#define PSW32_ASC_SECONDARY 0x00008000UL
30#define PSW32_ASC_HOME 0x0000C000UL 33#define PSW32_ASC_HOME 0x0000C000UL
31 34
32#define PSW32_MASK_MERGE(CURRENT,NEW) \ 35extern u32 psw32_user_bits;
33 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
34 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
35
36extern long psw32_user_bits;
37 36
38#define COMPAT_USER_HZ 100 37#define COMPAT_USER_HZ 100
39#define COMPAT_UTS_MACHINE "s390\0\0\0\0" 38#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 97cc4403fabf..6940abfbe1d9 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -168,5 +168,6 @@ enum diag308_rc {
168 168
169extern int diag308(unsigned long subcode, void *addr); 169extern int diag308(unsigned long subcode, void *addr);
170extern void diag308_reset(void); 170extern void diag308_reset(void);
171extern void store_status(void);
171 172
172#endif /* _ASM_S390_IPL_H */ 173#endif /* _ASM_S390_IPL_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index ba7b01c726a3..ba6d85f88d50 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -8,7 +8,8 @@ enum interruption_class {
8 EXTERNAL_INTERRUPT, 8 EXTERNAL_INTERRUPT,
9 IO_INTERRUPT, 9 IO_INTERRUPT,
10 EXTINT_CLK, 10 EXTINT_CLK,
11 EXTINT_IPI, 11 EXTINT_EXC,
12 EXTINT_EMS,
12 EXTINT_TMR, 13 EXTINT_TMR,
13 EXTINT_TLA, 14 EXTINT_TLA,
14 EXTINT_PFL, 15 EXTINT_PFL,
@@ -17,8 +18,8 @@ enum interruption_class {
17 EXTINT_SCP, 18 EXTINT_SCP,
18 EXTINT_IUC, 19 EXTINT_IUC,
19 EXTINT_CPM, 20 EXTINT_CPM,
21 IOINT_CIO,
20 IOINT_QAI, 22 IOINT_QAI,
21 IOINT_QDI,
22 IOINT_DAS, 23 IOINT_DAS,
23 IOINT_C15, 24 IOINT_C15,
24 IOINT_C70, 25 IOINT_C70,
@@ -28,6 +29,7 @@ enum interruption_class {
28 IOINT_CLW, 29 IOINT_CLW,
29 IOINT_CTC, 30 IOINT_CTC,
30 IOINT_APB, 31 IOINT_APB,
32 IOINT_CSC,
31 NMI_NMI, 33 NMI_NMI,
32 NR_IRQS, 34 NR_IRQS,
33}; 35};
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index bb729b84a21e..cf4e47b0948c 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -30,9 +30,15 @@
30/* Not more than 2GB */ 30/* Not more than 2GB */
31#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) 31#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
32 32
33/* Maximum address we can use for the crash control pages */
34#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
35
33/* Allocate one page for the pdp and the second for the code */ 36/* Allocate one page for the pdp and the second for the code */
34#define KEXEC_CONTROL_PAGE_SIZE 4096 37#define KEXEC_CONTROL_PAGE_SIZE 4096
35 38
39/* Alignment of crashkernel memory */
40#define KEXEC_CRASH_MEM_ALIGN HPAGE_SIZE
41
36/* The native architecture */ 42/* The native architecture */
37#define KEXEC_ARCH KEXEC_ARCH_S390 43#define KEXEC_ARCH KEXEC_ARCH_S390
38 44
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 1ca5de07ac36..24e18473d926 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -145,6 +145,7 @@ struct kvm_vcpu_stat {
145 u32 instruction_sigp_arch; 145 u32 instruction_sigp_arch;
146 u32 instruction_sigp_prefix; 146 u32 instruction_sigp_prefix;
147 u32 instruction_sigp_restart; 147 u32 instruction_sigp_restart;
148 u32 diagnose_10;
148 u32 diagnose_44; 149 u32 diagnose_44;
149}; 150};
150 151
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index e85c911aabf0..9e13c7d56cc1 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -151,10 +151,8 @@ struct _lowcore {
151 */ 151 */
152 __u32 ipib; /* 0x0e00 */ 152 __u32 ipib; /* 0x0e00 */
153 __u32 ipib_checksum; /* 0x0e04 */ 153 __u32 ipib_checksum; /* 0x0e04 */
154 154 __u32 vmcore_info; /* 0x0e08 */
155 /* 64 bit save area */ 155 __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */
156 __u64 save_area_64; /* 0x0e08 */
157 __u8 pad_0x0e10[0x0f00-0x0e10]; /* 0x0e10 */
158 156
159 /* Extended facility list */ 157 /* Extended facility list */
160 __u64 stfle_fac_list[32]; /* 0x0f00 */ 158 __u64 stfle_fac_list[32]; /* 0x0f00 */
@@ -290,9 +288,7 @@ struct _lowcore {
290 */ 288 */
291 __u64 ipib; /* 0x0e00 */ 289 __u64 ipib; /* 0x0e00 */
292 __u32 ipib_checksum; /* 0x0e08 */ 290 __u32 ipib_checksum; /* 0x0e08 */
293 291 __u64 vmcore_info; /* 0x0e0c */
294 /* 64 bit save area */
295 __u64 save_area_64; /* 0x0e0c */
296 __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ 292 __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */
297 293
298 /* Extended facility list */ 294 /* Extended facility list */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index accb372ddc7e..f7ec548c2b9d 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -177,6 +177,7 @@ static inline int page_test_and_clear_young(unsigned long pfn)
177struct page; 177struct page;
178void arch_free_page(struct page *page, int order); 178void arch_free_page(struct page *page, int order);
179void arch_alloc_page(struct page *page, int order); 179void arch_alloc_page(struct page *page, int order);
180void arch_set_page_states(int make_stable);
180 181
181static inline int devmem_is_allowed(unsigned long pfn) 182static inline int devmem_is_allowed(unsigned long pfn)
182{ 183{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c0cb794bb365..34ede0ea85a9 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -696,7 +696,9 @@ void gmap_disable(struct gmap *gmap);
696int gmap_map_segment(struct gmap *gmap, unsigned long from, 696int gmap_map_segment(struct gmap *gmap, unsigned long from,
697 unsigned long to, unsigned long length); 697 unsigned long to, unsigned long length);
698int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 698int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
699unsigned long __gmap_fault(unsigned long address, struct gmap *);
699unsigned long gmap_fault(unsigned long address, struct gmap *); 700unsigned long gmap_fault(unsigned long address, struct gmap *);
701void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
700 702
701/* 703/*
702 * Certain architectures need to do special things when PTEs 704 * Certain architectures need to do special things when PTEs
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index a4b6229e5d4b..5f33d37d032c 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -33,6 +33,8 @@ static inline void get_cpu_id(struct cpuid *ptr)
33 33
34extern void s390_adjust_jiffies(void); 34extern void s390_adjust_jiffies(void);
35extern int get_cpu_capability(unsigned int *); 35extern int get_cpu_capability(unsigned int *);
36extern const struct seq_operations cpuinfo_op;
37extern int sysctl_ieee_emulation_warnings;
36 38
37/* 39/*
38 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 40 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -118,17 +120,17 @@ struct stack_frame {
118/* 120/*
119 * Do necessary setup to start up a new thread. 121 * Do necessary setup to start up a new thread.
120 */ 122 */
121#define start_thread(regs, new_psw, new_stackp) do { \ 123#define start_thread(regs, new_psw, new_stackp) do { \
122 regs->psw.mask = psw_user_bits; \ 124 regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \
123 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 125 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
124 regs->gprs[15] = new_stackp; \ 126 regs->gprs[15] = new_stackp; \
125} while (0) 127} while (0)
126 128
127#define start_thread31(regs, new_psw, new_stackp) do { \ 129#define start_thread31(regs, new_psw, new_stackp) do { \
128 regs->psw.mask = psw_user32_bits; \ 130 regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
129 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 131 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
130 regs->gprs[15] = new_stackp; \ 132 regs->gprs[15] = new_stackp; \
131 crst_table_downgrade(current->mm, 1UL << 31); \ 133 crst_table_downgrade(current->mm, 1UL << 31); \
132} while (0) 134} while (0)
133 135
134/* Forward declaration, a strange C thing */ 136/* Forward declaration, a strange C thing */
@@ -187,7 +189,6 @@ static inline void __load_psw(psw_t psw)
187 * Set PSW mask to specified value, while leaving the 189 * Set PSW mask to specified value, while leaving the
188 * PSW addr pointing to the next instruction. 190 * PSW addr pointing to the next instruction.
189 */ 191 */
190
191static inline void __load_psw_mask (unsigned long mask) 192static inline void __load_psw_mask (unsigned long mask)
192{ 193{
193 unsigned long addr; 194 unsigned long addr;
@@ -212,26 +213,37 @@ static inline void __load_psw_mask (unsigned long mask)
212 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 213 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
213#endif /* __s390x__ */ 214#endif /* __s390x__ */
214} 215}
215 216
216/* 217/*
217 * Function to stop a processor until an interruption occurred 218 * Rewind PSW instruction address by specified number of bytes.
218 */ 219 */
219static inline void enabled_wait(void) 220static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
220{ 221{
221 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | 222#ifndef __s390x__
222 PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY); 223 if (psw.addr & PSW_ADDR_AMODE)
223} 224 /* 31 bit mode */
225 return (psw.addr - ilc) | PSW_ADDR_AMODE;
226 /* 24 bit mode */
227 return (psw.addr - ilc) & ((1UL << 24) - 1);
228#else
229 unsigned long mask;
224 230
231 mask = (psw.mask & PSW_MASK_EA) ? -1UL :
232 (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
233 (1UL << 24) - 1;
234 return (psw.addr - ilc) & mask;
235#endif
236}
237
225/* 238/*
226 * Function to drop a processor into disabled wait state 239 * Function to drop a processor into disabled wait state
227 */ 240 */
228
229static inline void ATTRIB_NORET disabled_wait(unsigned long code) 241static inline void ATTRIB_NORET disabled_wait(unsigned long code)
230{ 242{
231 unsigned long ctl_buf; 243 unsigned long ctl_buf;
232 psw_t dw_psw; 244 psw_t dw_psw;
233 245
234 dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; 246 dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
235 dw_psw.addr = code; 247 dw_psw.addr = code;
236 /* 248 /*
237 * Store status and then load disabled wait psw, 249 * Store status and then load disabled wait psw,
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 62fd80c9e98c..a65846340d51 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -230,17 +230,21 @@ typedef struct
230#define PSW_MASK_IO 0x02000000UL 230#define PSW_MASK_IO 0x02000000UL
231#define PSW_MASK_EXT 0x01000000UL 231#define PSW_MASK_EXT 0x01000000UL
232#define PSW_MASK_KEY 0x00F00000UL 232#define PSW_MASK_KEY 0x00F00000UL
233#define PSW_MASK_BASE 0x00080000UL /* always one */
233#define PSW_MASK_MCHECK 0x00040000UL 234#define PSW_MASK_MCHECK 0x00040000UL
234#define PSW_MASK_WAIT 0x00020000UL 235#define PSW_MASK_WAIT 0x00020000UL
235#define PSW_MASK_PSTATE 0x00010000UL 236#define PSW_MASK_PSTATE 0x00010000UL
236#define PSW_MASK_ASC 0x0000C000UL 237#define PSW_MASK_ASC 0x0000C000UL
237#define PSW_MASK_CC 0x00003000UL 238#define PSW_MASK_CC 0x00003000UL
238#define PSW_MASK_PM 0x00000F00UL 239#define PSW_MASK_PM 0x00000F00UL
240#define PSW_MASK_EA 0x00000000UL
241#define PSW_MASK_BA 0x00000000UL
242
243#define PSW_MASK_USER 0x00003F00UL
239 244
240#define PSW_ADDR_AMODE 0x80000000UL 245#define PSW_ADDR_AMODE 0x80000000UL
241#define PSW_ADDR_INSN 0x7FFFFFFFUL 246#define PSW_ADDR_INSN 0x7FFFFFFFUL
242 247
243#define PSW_BASE_BITS 0x00080000UL
244#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20) 248#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
245 249
246#define PSW_ASC_PRIMARY 0x00000000UL 250#define PSW_ASC_PRIMARY 0x00000000UL
@@ -254,6 +258,7 @@ typedef struct
254#define PSW_MASK_DAT 0x0400000000000000UL 258#define PSW_MASK_DAT 0x0400000000000000UL
255#define PSW_MASK_IO 0x0200000000000000UL 259#define PSW_MASK_IO 0x0200000000000000UL
256#define PSW_MASK_EXT 0x0100000000000000UL 260#define PSW_MASK_EXT 0x0100000000000000UL
261#define PSW_MASK_BASE 0x0000000000000000UL
257#define PSW_MASK_KEY 0x00F0000000000000UL 262#define PSW_MASK_KEY 0x00F0000000000000UL
258#define PSW_MASK_MCHECK 0x0004000000000000UL 263#define PSW_MASK_MCHECK 0x0004000000000000UL
259#define PSW_MASK_WAIT 0x0002000000000000UL 264#define PSW_MASK_WAIT 0x0002000000000000UL
@@ -261,12 +266,14 @@ typedef struct
261#define PSW_MASK_ASC 0x0000C00000000000UL 266#define PSW_MASK_ASC 0x0000C00000000000UL
262#define PSW_MASK_CC 0x0000300000000000UL 267#define PSW_MASK_CC 0x0000300000000000UL
263#define PSW_MASK_PM 0x00000F0000000000UL 268#define PSW_MASK_PM 0x00000F0000000000UL
269#define PSW_MASK_EA 0x0000000100000000UL
270#define PSW_MASK_BA 0x0000000080000000UL
271
272#define PSW_MASK_USER 0x00003F0180000000UL
264 273
265#define PSW_ADDR_AMODE 0x0000000000000000UL 274#define PSW_ADDR_AMODE 0x0000000000000000UL
266#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL 275#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
267 276
268#define PSW_BASE_BITS 0x0000000180000000UL
269#define PSW_BASE32_BITS 0x0000000080000000UL
270#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52) 277#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
271 278
272#define PSW_ASC_PRIMARY 0x0000000000000000UL 279#define PSW_ASC_PRIMARY 0x0000000000000000UL
@@ -279,18 +286,7 @@ typedef struct
279#ifdef __KERNEL__ 286#ifdef __KERNEL__
280extern long psw_kernel_bits; 287extern long psw_kernel_bits;
281extern long psw_user_bits; 288extern long psw_user_bits;
282#ifdef CONFIG_64BIT
283extern long psw_user32_bits;
284#endif 289#endif
285#endif
286
287/* This macro merges a NEW PSW mask specified by the user into
288 the currently active PSW mask CURRENT, modifying only those
289 bits in CURRENT that the user may be allowed to change: this
290 is the condition code and the program mask bits. */
291#define PSW_MASK_MERGE(CURRENT,NEW) \
292 (((CURRENT) & ~(PSW_MASK_CC|PSW_MASK_PM)) | \
293 ((NEW) & (PSW_MASK_CC|PSW_MASK_PM)))
294 290
295/* 291/*
296 * The s390_regs structure is used to define the elf_gregset_t. 292 * The s390_regs structure is used to define the elf_gregset_t.
@@ -328,8 +324,7 @@ struct pt_regs
328 psw_t psw; 324 psw_t psw;
329 unsigned long gprs[NUM_GPRS]; 325 unsigned long gprs[NUM_GPRS];
330 unsigned long orig_gpr2; 326 unsigned long orig_gpr2;
331 unsigned short ilc; 327 unsigned int svc_code;
332 unsigned short svcnr;
333}; 328};
334 329
335/* 330/*
@@ -487,6 +482,8 @@ typedef struct
487#define PTRACE_POKETEXT_AREA 0x5004 482#define PTRACE_POKETEXT_AREA 0x5004
488#define PTRACE_POKEDATA_AREA 0x5005 483#define PTRACE_POKEDATA_AREA 0x5005
489#define PTRACE_GET_LAST_BREAK 0x5006 484#define PTRACE_GET_LAST_BREAK 0x5006
485#define PTRACE_PEEK_SYSTEM_CALL 0x5007
486#define PTRACE_POKE_SYSTEM_CALL 0x5008
490 487
491/* 488/*
492 * PT_PROT definition is loosely based on hppa bsd definition in 489 * PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
index f584f4a52581..3d6ad4ad2a3f 100644
--- a/arch/s390/include/asm/reset.h
+++ b/arch/s390/include/asm/reset.h
@@ -17,5 +17,5 @@ struct reset_call {
17 17
18extern void register_reset_call(struct reset_call *reset); 18extern void register_reset_call(struct reset_call *reset);
19extern void unregister_reset_call(struct reset_call *reset); 19extern void unregister_reset_call(struct reset_call *reset);
20extern void s390_reset_system(void); 20extern void s390_reset_system(void (*func)(void *), void *data);
21#endif /* _ASM_S390_RESET_H */ 21#endif /* _ASM_S390_RESET_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index d5e2ef10537d..5a099714df04 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -26,15 +26,21 @@
26#define IPL_DEVICE (*(unsigned long *) (0x10404)) 26#define IPL_DEVICE (*(unsigned long *) (0x10404))
27#define INITRD_START (*(unsigned long *) (0x1040C)) 27#define INITRD_START (*(unsigned long *) (0x1040C))
28#define INITRD_SIZE (*(unsigned long *) (0x10414)) 28#define INITRD_SIZE (*(unsigned long *) (0x10414))
29#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
30#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
29#else /* __s390x__ */ 31#else /* __s390x__ */
30#define IPL_DEVICE (*(unsigned long *) (0x10400)) 32#define IPL_DEVICE (*(unsigned long *) (0x10400))
31#define INITRD_START (*(unsigned long *) (0x10408)) 33#define INITRD_START (*(unsigned long *) (0x10408))
32#define INITRD_SIZE (*(unsigned long *) (0x10410)) 34#define INITRD_SIZE (*(unsigned long *) (0x10410))
35#define OLDMEM_BASE (*(unsigned long *) (0x10418))
36#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
33#endif /* __s390x__ */ 37#endif /* __s390x__ */
34#define COMMAND_LINE ((char *) (0x10480)) 38#define COMMAND_LINE ((char *) (0x10480))
35 39
36#define CHUNK_READ_WRITE 0 40#define CHUNK_READ_WRITE 0
37#define CHUNK_READ_ONLY 1 41#define CHUNK_READ_ONLY 1
42#define CHUNK_OLDMEM 4
43#define CHUNK_CRASHK 5
38 44
39struct mem_chunk { 45struct mem_chunk {
40 unsigned long addr; 46 unsigned long addr;
@@ -48,6 +54,8 @@ extern int memory_end_set;
48extern unsigned long memory_end; 54extern unsigned long memory_end;
49 55
50void detect_memory_layout(struct mem_chunk chunk[]); 56void detect_memory_layout(struct mem_chunk chunk[]);
57void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
58 unsigned long size, int type);
51 59
52#define PRIMARY_SPACE_MODE 0 60#define PRIMARY_SPACE_MODE 0
53#define ACCESS_REGISTER_MODE 1 61#define ACCESS_REGISTER_MODE 1
@@ -106,6 +114,7 @@ extern unsigned int user_mode;
106#endif /* __s390x__ */ 114#endif /* __s390x__ */
107 115
108#define ZFCPDUMP_HSA_SIZE (32UL<<20) 116#define ZFCPDUMP_HSA_SIZE (32UL<<20)
117#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20)
109 118
110/* 119/*
111 * Console mode. Override with conmode= 120 * Console mode. Override with conmode=
@@ -134,10 +143,14 @@ extern char kernel_nss_name[];
134#define IPL_DEVICE 0x10404 143#define IPL_DEVICE 0x10404
135#define INITRD_START 0x1040C 144#define INITRD_START 0x1040C
136#define INITRD_SIZE 0x10414 145#define INITRD_SIZE 0x10414
146#define OLDMEM_BASE 0x1041C
147#define OLDMEM_SIZE 0x10424
137#else /* __s390x__ */ 148#else /* __s390x__ */
138#define IPL_DEVICE 0x10400 149#define IPL_DEVICE 0x10400
139#define INITRD_START 0x10408 150#define INITRD_START 0x10408
140#define INITRD_SIZE 0x10410 151#define INITRD_SIZE 0x10410
152#define OLDMEM_BASE 0x10418
153#define OLDMEM_SIZE 0x10420
141#endif /* __s390x__ */ 154#endif /* __s390x__ */
142#define COMMAND_LINE 0x10480 155#define COMMAND_LINE 0x10480
143 156
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 0addc6466d95..ca3f8814e361 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -72,6 +72,6 @@ extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
72 72
73#define UDIV_NEEDS_NORMALIZATION 0 73#define UDIV_NEEDS_NORMALIZATION 0
74 74
75#define abort() return 0 75#define abort() BUG()
76 76
77#define __BYTE_ORDER __BIG_ENDIAN 77#define __BYTE_ORDER __BIG_ENDIAN
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 045e009fc164..ab47a69fdf07 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -33,6 +33,7 @@ extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
33extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *); 33extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
34extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, 34extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
35 int from, int to); 35 int from, int to);
36extern void smp_restart_with_online_cpu(void);
36extern void smp_restart_cpu(void); 37extern void smp_restart_cpu(void);
37 38
38/* 39/*
@@ -64,6 +65,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
64 func(data); 65 func(data);
65} 66}
66 67
68static inline void smp_restart_with_online_cpu(void)
69{
70}
71
67#define smp_vcpu_scheduled (1) 72#define smp_vcpu_scheduled (1)
68 73
69#endif /* CONFIG_SMP */ 74#endif /* CONFIG_SMP */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 56612fc8186e..fd94dfec8d08 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -13,6 +13,8 @@
13 13
14#include <linux/smp.h> 14#include <linux/smp.h>
15 15
16extern int spin_retry;
17
16static inline int 18static inline int
17_raw_compare_and_swap(volatile unsigned int *lock, 19_raw_compare_and_swap(volatile unsigned int *lock,
18 unsigned int old, unsigned int new) 20 unsigned int old, unsigned int new)
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 5c0246b955d8..b239ff53b189 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -13,6 +13,7 @@
13#define _ASM_SYSCALL_H 1 13#define _ASM_SYSCALL_H 1
14 14
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/err.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
17 18
18/* 19/*
@@ -25,7 +26,8 @@ extern const unsigned int sys_call_table[];
25static inline long syscall_get_nr(struct task_struct *task, 26static inline long syscall_get_nr(struct task_struct *task,
26 struct pt_regs *regs) 27 struct pt_regs *regs)
27{ 28{
28 return regs->svcnr ? regs->svcnr : -1; 29 return test_tsk_thread_flag(task, TIF_SYSCALL) ?
30 (regs->svc_code & 0xffff) : -1;
29} 31}
30 32
31static inline void syscall_rollback(struct task_struct *task, 33static inline void syscall_rollback(struct task_struct *task,
@@ -37,7 +39,7 @@ static inline void syscall_rollback(struct task_struct *task,
37static inline long syscall_get_error(struct task_struct *task, 39static inline long syscall_get_error(struct task_struct *task,
38 struct pt_regs *regs) 40 struct pt_regs *regs)
39{ 41{
40 return (regs->gprs[2] >= -4096UL) ? -regs->gprs[2] : 0; 42 return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0;
41} 43}
42 44
43static inline long syscall_get_return_value(struct task_struct *task, 45static inline long syscall_get_return_value(struct task_struct *task,
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 6582f69f2389..ef573c1d71a7 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -20,6 +20,8 @@
20 20
21struct task_struct; 21struct task_struct;
22 22
23extern int sysctl_userprocess_debug;
24
23extern struct task_struct *__switch_to(void *, void *); 25extern struct task_struct *__switch_to(void *, void *);
24extern void update_per_regs(struct task_struct *task); 26extern void update_per_regs(struct task_struct *task);
25 27
@@ -114,6 +116,8 @@ extern void pfault_fini(void);
114extern void cmma_init(void); 116extern void cmma_init(void);
115extern int memcpy_real(void *, void *, size_t); 117extern int memcpy_real(void *, void *, size_t);
116extern void copy_to_absolute_zero(void *dest, void *src, size_t count); 118extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
119extern int copy_to_user_real(void __user *dest, void *src, size_t count);
120extern int copy_from_user_real(void *dest, void __user *src, size_t count);
117 121
118#define finish_arch_switch(prev) do { \ 122#define finish_arch_switch(prev) do { \
119 set_fs(current->thread.mm_segment); \ 123 set_fs(current->thread.mm_segment); \
@@ -210,8 +214,10 @@ __set_psw_mask(unsigned long mask)
210 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); 214 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
211} 215}
212 216
213#define local_mcck_enable() __set_psw_mask(psw_kernel_bits) 217#define local_mcck_enable() \
214#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) 218 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
219#define local_mcck_disable() \
220 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
215 221
216#ifdef CONFIG_SMP 222#ifdef CONFIG_SMP
217 223
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 1a5dbb6f1495..a23183423b14 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -48,6 +48,7 @@ struct thread_info {
48 unsigned int cpu; /* current CPU */ 48 unsigned int cpu; /* current CPU */
49 int preempt_count; /* 0 => preemptable, <0 => BUG */ 49 int preempt_count; /* 0 => preemptable, <0 => BUG */
50 struct restart_block restart_block; 50 struct restart_block restart_block;
51 unsigned int system_call;
51 __u64 user_timer; 52 __u64 user_timer;
52 __u64 system_timer; 53 __u64 system_timer;
53 unsigned long last_break; /* last breaking-event-address. */ 54 unsigned long last_break; /* last breaking-event-address. */
@@ -84,10 +85,10 @@ static inline struct thread_info *current_thread_info(void)
84/* 85/*
85 * thread information flags bit numbers 86 * thread information flags bit numbers
86 */ 87 */
88#define TIF_SYSCALL 0 /* inside a system call */
87#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 89#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
88#define TIF_SIGPENDING 2 /* signal pending */ 90#define TIF_SIGPENDING 2 /* signal pending */
89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 91#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
91#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ 92#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 93#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 94#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
@@ -103,11 +104,11 @@ static inline struct thread_info *current_thread_info(void)
103#define TIF_SINGLE_STEP 20 /* This task is single stepped */ 104#define TIF_SINGLE_STEP 20 /* This task is single stepped */
104#define TIF_FREEZE 21 /* thread is freezing for suspend */ 105#define TIF_FREEZE 21 /* thread is freezing for suspend */
105 106
107#define _TIF_SYSCALL (1<<TIF_SYSCALL)
106#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 108#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
107#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 109#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
108#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 110#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
109#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 111#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
110#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
111#define _TIF_PER_TRAP (1<<TIF_PER_TRAP) 112#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
112#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 113#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
113#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 114#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -117,7 +118,7 @@ static inline struct thread_info *current_thread_info(void)
117#define _TIF_SIE (1<<TIF_SIE) 118#define _TIF_SIE (1<<TIF_SIE)
118#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 119#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
119#define _TIF_31BIT (1<<TIF_31BIT) 120#define _TIF_31BIT (1<<TIF_31BIT)
120#define _TIF_SINGLE_STEP (1<<TIF_FREEZE) 121#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
121#define _TIF_FREEZE (1<<TIF_FREEZE) 122#define _TIF_FREEZE (1<<TIF_FREEZE)
122 123
123#ifdef CONFIG_64BIT 124#ifdef CONFIG_64BIT
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 88829a40af6f..d610bef9c5e9 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -86,6 +86,17 @@ static inline void get_clock_ext(char *clk)
86 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 86 asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
87} 87}
88 88
89static inline unsigned long long get_clock_fast(void)
90{
91 unsigned long long clk;
92
93 if (test_facility(25))
94 asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
95 else
96 clk = get_clock();
97 return clk;
98}
99
89static inline unsigned long long get_clock_xt(void) 100static inline unsigned long long get_clock_xt(void)
90{ 101{
91 unsigned char clk[16]; 102 unsigned char clk[16];
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 304445382382..1d8648cf2fea 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -59,6 +59,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
59} 59}
60#else 60#else
61#define __tlb_flush_full(mm) __tlb_flush_local() 61#define __tlb_flush_full(mm) __tlb_flush_local()
62#define __tlb_flush_global() __tlb_flush_local()
62#endif 63#endif
63 64
64/* 65/*
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index df3732249baa..dd4f07640919 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
48obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 48obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
49obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 49obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
50obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 50obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
51obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
51 52
52# Kexec part 53# Kexec part
53S390_KEXEC_OBJS := machine_kexec.o crash.o 54S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2b45591e1582..751318765e2e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -45,8 +45,7 @@ int main(void)
45 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); 45 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
46 DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); 46 DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
47 DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); 47 DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
48 DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc)); 48 DEFINE(__PT_SVC_CODE, offsetof(struct pt_regs, svc_code));
49 DEFINE(__PT_SVCNR, offsetof(struct pt_regs, svcnr));
50 DEFINE(__PT_SIZE, sizeof(struct pt_regs)); 49 DEFINE(__PT_SIZE, sizeof(struct pt_regs));
51 BLANK(); 50 BLANK();
52 DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); 51 DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
@@ -141,7 +140,6 @@ int main(void)
141 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); 140 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
142 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); 141 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
143 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); 142 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
144 DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64));
145#ifdef CONFIG_32BIT 143#ifdef CONFIG_32BIT
146 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); 144 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
147#else /* CONFIG_32BIT */ 145#else /* CONFIG_32BIT */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 255435663bf8..f8828d38fa6e 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -86,6 +86,8 @@ s390_base_pgm_handler_fn:
86ENTRY(diag308_reset) 86ENTRY(diag308_reset)
87 larl %r4,.Lctlregs # Save control registers 87 larl %r4,.Lctlregs # Save control registers
88 stctg %c0,%c15,0(%r4) 88 stctg %c0,%c15,0(%r4)
89 larl %r4,.Lfpctl # Floating point control register
90 stfpc 0(%r4)
89 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 91 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
90 lghi %r3,0 92 lghi %r3,0
91 lg %r4,0(%r4) # Save PSW 93 lg %r4,0(%r4) # Save PSW
@@ -99,6 +101,8 @@ ENTRY(diag308_reset)
99 sam64 # Switch to 64 bit addressing mode 101 sam64 # Switch to 64 bit addressing mode
100 larl %r4,.Lctlregs # Restore control registers 102 larl %r4,.Lctlregs # Restore control registers
101 lctlg %c0,%c15,0(%r4) 103 lctlg %c0,%c15,0(%r4)
104 larl %r4,.Lfpctl # Restore floating point ctl register
105 lfpc 0(%r4)
102 br %r14 106 br %r14
103.align 16 107.align 16
104.Lrestart_psw: 108.Lrestart_psw:
@@ -110,6 +114,8 @@ ENTRY(diag308_reset)
110 .rept 16 114 .rept 16
111 .quad 0 115 .quad 0
112 .endr 116 .endr
117.Lfpctl:
118 .long 0
113 .previous 119 .previous
114 120
115#else /* CONFIG_64BIT */ 121#else /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 53acaa86dd94..84a982898448 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -60,12 +60,9 @@
60 60
61#include "compat_linux.h" 61#include "compat_linux.h"
62 62
63long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | 63u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT |
64 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | 64 PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK |
65 PSW_MASK_PSTATE | PSW_DEFAULT_KEY); 65 PSW32_MASK_PSTATE | PSW32_ASC_HOME;
66long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
67 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
68 PSW32_MASK_PSTATE);
69 66
70/* For this source file, we want overflow handling. */ 67/* For this source file, we want overflow handling. */
71 68
@@ -365,12 +362,7 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
365 if (set) { 362 if (set) {
366 if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) 363 if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
367 return -EFAULT; 364 return -EFAULT;
368 switch (_NSIG_WORDS) { 365 s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
369 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
370 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
371 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
372 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
373 }
374 } 366 }
375 set_fs (KERNEL_DS); 367 set_fs (KERNEL_DS);
376 ret = sys_rt_sigprocmask(how, 368 ret = sys_rt_sigprocmask(how,
@@ -380,12 +372,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
380 set_fs (old_fs); 372 set_fs (old_fs);
381 if (ret) return ret; 373 if (ret) return ret;
382 if (oset) { 374 if (oset) {
383 switch (_NSIG_WORDS) { 375 s32.sig[1] = (s.sig[0] >> 32);
384 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; 376 s32.sig[0] = s.sig[0];
385 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
386 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
387 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
388 }
389 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) 377 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
390 return -EFAULT; 378 return -EFAULT;
391 } 379 }
@@ -404,12 +392,8 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
404 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); 392 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
405 set_fs (old_fs); 393 set_fs (old_fs);
406 if (!ret) { 394 if (!ret) {
407 switch (_NSIG_WORDS) { 395 s32.sig[1] = (s.sig[0] >> 32);
408 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; 396 s32.sig[0] = s.sig[0];
409 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
410 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
411 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
412 }
413 if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) 397 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
414 return -EFAULT; 398 return -EFAULT;
415 } 399 }
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index a9a285b8c4ad..4f68c81d3ffa 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -141,7 +141,8 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
141 break; 141 break;
142 case __SI_FAULT >> 16: 142 case __SI_FAULT >> 16:
143 err |= __get_user(tmp, &from->si_addr); 143 err |= __get_user(tmp, &from->si_addr);
144 to->si_addr = (void __user *)(u64) (tmp & PSW32_ADDR_INSN); 144 to->si_addr = (void __force __user *)
145 (u64) (tmp & PSW32_ADDR_INSN);
145 break; 146 break;
146 case __SI_POLL >> 16: 147 case __SI_POLL >> 16:
147 err |= __get_user(to->si_band, &from->si_band); 148 err |= __get_user(to->si_band, &from->si_band);
@@ -213,16 +214,8 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
213 ret = get_user(sa_handler, &act->sa_handler); 214 ret = get_user(sa_handler, &act->sa_handler);
214 ret |= __copy_from_user(&set32, &act->sa_mask, 215 ret |= __copy_from_user(&set32, &act->sa_mask,
215 sizeof(compat_sigset_t)); 216 sizeof(compat_sigset_t));
216 switch (_NSIG_WORDS) { 217 new_ka.sa.sa_mask.sig[0] =
217 case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] 218 set32.sig[0] | (((long)set32.sig[1]) << 32);
218 | (((long)set32.sig[7]) << 32);
219 case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
220 | (((long)set32.sig[5]) << 32);
221 case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
222 | (((long)set32.sig[3]) << 32);
223 case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
224 | (((long)set32.sig[1]) << 32);
225 }
226 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 219 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
227 220
228 if (ret) 221 if (ret)
@@ -233,20 +226,8 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
233 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 226 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
234 227
235 if (!ret && oact) { 228 if (!ret && oact) {
236 switch (_NSIG_WORDS) { 229 set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
237 case 4: 230 set32.sig[0] = old_ka.sa.sa_mask.sig[0];
238 set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
239 set32.sig[6] = old_ka.sa.sa_mask.sig[3];
240 case 3:
241 set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
242 set32.sig[4] = old_ka.sa.sa_mask.sig[2];
243 case 2:
244 set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
245 set32.sig[2] = old_ka.sa.sa_mask.sig[1];
246 case 1:
247 set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
248 set32.sig[0] = old_ka.sa.sa_mask.sig[0];
249 }
250 ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); 231 ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler);
251 ret |= __copy_to_user(&oact->sa_mask, &set32, 232 ret |= __copy_to_user(&oact->sa_mask, &set32,
252 sizeof(compat_sigset_t)); 233 sizeof(compat_sigset_t));
@@ -300,9 +281,10 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
300 _s390_regs_common32 regs32; 281 _s390_regs_common32 regs32;
301 int err, i; 282 int err, i;
302 283
303 regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits, 284 regs32.psw.mask = psw32_user_bits |
304 (__u32)(regs->psw.mask >> 32)); 285 ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER);
305 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; 286 regs32.psw.addr = (__u32) regs->psw.addr |
287 (__u32)(regs->psw.mask & PSW_MASK_BA);
306 for (i = 0; i < NUM_GPRS; i++) 288 for (i = 0; i < NUM_GPRS; i++)
307 regs32.gprs[i] = (__u32) regs->gprs[i]; 289 regs32.gprs[i] = (__u32) regs->gprs[i];
308 save_access_regs(current->thread.acrs); 290 save_access_regs(current->thread.acrs);
@@ -327,8 +309,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
327 err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32)); 309 err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
328 if (err) 310 if (err)
329 return err; 311 return err;
330 regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, 312 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
331 (__u64)regs32.psw.mask << 32); 313 (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
314 (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
332 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); 315 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
333 for (i = 0; i < NUM_GPRS; i++) 316 for (i = 0; i < NUM_GPRS; i++)
334 regs->gprs[i] = (__u64) regs32.gprs[i]; 317 regs->gprs[i] = (__u64) regs32.gprs[i];
@@ -342,7 +325,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
342 return err; 325 return err;
343 326
344 restore_fp_regs(&current->thread.fp_regs); 327 restore_fp_regs(&current->thread.fp_regs);
345 regs->svcnr = 0; /* disable syscall checks */ 328 clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
346 return 0; 329 return 0;
347} 330}
348 331
@@ -496,11 +479,11 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
496 /* Set up to return from userspace. If provided, use a stub 479 /* Set up to return from userspace. If provided, use a stub
497 already in userspace. */ 480 already in userspace. */
498 if (ka->sa.sa_flags & SA_RESTORER) { 481 if (ka->sa.sa_flags & SA_RESTORER) {
499 regs->gprs[14] = (__u64) ka->sa.sa_restorer; 482 regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
500 } else { 483 } else {
501 regs->gprs[14] = (__u64) frame->retcode; 484 regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE;
502 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, 485 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
503 (u16 __user *)(frame->retcode))) 486 (u16 __force __user *)(frame->retcode)))
504 goto give_sigsegv; 487 goto give_sigsegv;
505 } 488 }
506 489
@@ -509,11 +492,12 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
509 goto give_sigsegv; 492 goto give_sigsegv;
510 493
511 /* Set up registers for signal handler */ 494 /* Set up registers for signal handler */
512 regs->gprs[15] = (__u64) frame; 495 regs->gprs[15] = (__force __u64) frame;
513 regs->psw.addr = (__u64) ka->sa.sa_handler; 496 regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
497 regs->psw.addr = (__force __u64) ka->sa.sa_handler;
514 498
515 regs->gprs[2] = map_signal(sig); 499 regs->gprs[2] = map_signal(sig);
516 regs->gprs[3] = (__u64) &frame->sc; 500 regs->gprs[3] = (__force __u64) &frame->sc;
517 501
518 /* We forgot to include these in the sigcontext. 502 /* We forgot to include these in the sigcontext.
519 To avoid breaking binary compatibility, they are passed as args. */ 503 To avoid breaking binary compatibility, they are passed as args. */
@@ -521,7 +505,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
521 regs->gprs[5] = current->thread.prot_addr; 505 regs->gprs[5] = current->thread.prot_addr;
522 506
523 /* Place signal number on stack to allow backtrace from handler. */ 507 /* Place signal number on stack to allow backtrace from handler. */
524 if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) 508 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
525 goto give_sigsegv; 509 goto give_sigsegv;
526 return 0; 510 return 0;
527 511
@@ -564,20 +548,21 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
564 } else { 548 } else {
565 regs->gprs[14] = (__u64) frame->retcode; 549 regs->gprs[14] = (__u64) frame->retcode;
566 err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, 550 err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
567 (u16 __user *)(frame->retcode)); 551 (u16 __force __user *)(frame->retcode));
568 } 552 }
569 553
570 /* Set up backchain. */ 554 /* Set up backchain. */
571 if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) 555 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
572 goto give_sigsegv; 556 goto give_sigsegv;
573 557
574 /* Set up registers for signal handler */ 558 /* Set up registers for signal handler */
575 regs->gprs[15] = (__u64) frame; 559 regs->gprs[15] = (__force __u64) frame;
560 regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
576 regs->psw.addr = (__u64) ka->sa.sa_handler; 561 regs->psw.addr = (__u64) ka->sa.sa_handler;
577 562
578 regs->gprs[2] = map_signal(sig); 563 regs->gprs[2] = map_signal(sig);
579 regs->gprs[3] = (__u64) &frame->info; 564 regs->gprs[3] = (__force __u64) &frame->info;
580 regs->gprs[4] = (__u64) &frame->uc; 565 regs->gprs[4] = (__force __u64) &frame->uc;
581 return 0; 566 return 0;
582 567
583give_sigsegv: 568give_sigsegv:
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 7526db6bf501..5006a1d9f5d0 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1623,8 +1623,7 @@ ENTRY(sys_syncfs_wrapper)
1623 lgfr %r2,%r2 # int 1623 lgfr %r2,%r2 # int
1624 jg sys_syncfs 1624 jg sys_syncfs
1625 1625
1626 .globl sys_setns_wrapper 1626ENTRY(sys_setns_wrapper)
1627sys_setns_wrapper:
1628 lgfr %r2,%r2 # int 1627 lgfr %r2,%r2 # int
1629 lgfr %r3,%r3 # int 1628 lgfr %r3,%r3 # int
1630 jg sys_setns 1629 jg sys_setns
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
new file mode 100644
index 000000000000..39f8fd4438fc
--- /dev/null
+++ b/arch/s390/kernel/crash_dump.c
@@ -0,0 +1,426 @@
1/*
2 * S390 kdump implementation
3 *
4 * Copyright IBM Corp. 2011
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#include <linux/crash_dump.h>
9#include <asm/lowcore.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/gfp.h>
13#include <linux/slab.h>
14#include <linux/crash_dump.h>
15#include <linux/bootmem.h>
16#include <linux/elf.h>
17#include <asm/ipl.h>
18
19#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
20#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
21#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
22
23/*
24 * Copy one page from "oldmem"
25 *
26 * For the kdump reserved memory this functions performs a swap operation:
27 * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
28 * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
29 */
30ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
31 size_t csize, unsigned long offset, int userbuf)
32{
33 unsigned long src;
34
35 if (!csize)
36 return 0;
37
38 src = (pfn << PAGE_SHIFT) + offset;
39 if (src < OLDMEM_SIZE)
40 src += OLDMEM_BASE;
41 else if (src > OLDMEM_BASE &&
42 src < OLDMEM_BASE + OLDMEM_SIZE)
43 src -= OLDMEM_BASE;
44 if (userbuf)
45 copy_to_user_real((void __force __user *) buf, (void *) src,
46 csize);
47 else
48 memcpy_real(buf, (void *) src, csize);
49 return csize;
50}
51
52/*
53 * Copy memory from old kernel
54 */
55static int copy_from_oldmem(void *dest, void *src, size_t count)
56{
57 unsigned long copied = 0;
58 int rc;
59
60 if ((unsigned long) src < OLDMEM_SIZE) {
61 copied = min(count, OLDMEM_SIZE - (unsigned long) src);
62 rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
63 if (rc)
64 return rc;
65 }
66 return memcpy_real(dest + copied, src + copied, count - copied);
67}
68
69/*
70 * Alloc memory and panic in case of ENOMEM
71 */
72static void *kzalloc_panic(int len)
73{
74 void *rc;
75
76 rc = kzalloc(len, GFP_KERNEL);
77 if (!rc)
78 panic("s390 kdump kzalloc (%d) failed", len);
79 return rc;
80}
81
82/*
83 * Get memory layout and create hole for oldmem
84 */
85static struct mem_chunk *get_memory_layout(void)
86{
87 struct mem_chunk *chunk_array;
88
89 chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
90 detect_memory_layout(chunk_array);
91 create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK);
92 return chunk_array;
93}
94
95/*
96 * Initialize ELF note
97 */
98static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
99 const char *name)
100{
101 Elf64_Nhdr *note;
102 u64 len;
103
104 note = (Elf64_Nhdr *)buf;
105 note->n_namesz = strlen(name) + 1;
106 note->n_descsz = d_len;
107 note->n_type = type;
108 len = sizeof(Elf64_Nhdr);
109
110 memcpy(buf + len, name, note->n_namesz);
111 len = roundup(len + note->n_namesz, 4);
112
113 memcpy(buf + len, desc, note->n_descsz);
114 len = roundup(len + note->n_descsz, 4);
115
116 return PTR_ADD(buf, len);
117}
118
119/*
120 * Initialize prstatus note
121 */
122static void *nt_prstatus(void *ptr, struct save_area *sa)
123{
124 struct elf_prstatus nt_prstatus;
125 static int cpu_nr = 1;
126
127 memset(&nt_prstatus, 0, sizeof(nt_prstatus));
128 memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
129 memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
130 memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
131 nt_prstatus.pr_pid = cpu_nr;
132 cpu_nr++;
133
134 return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
135 "CORE");
136}
137
138/*
139 * Initialize fpregset (floating point) note
140 */
141static void *nt_fpregset(void *ptr, struct save_area *sa)
142{
143 elf_fpregset_t nt_fpregset;
144
145 memset(&nt_fpregset, 0, sizeof(nt_fpregset));
146 memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
147 memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
148
149 return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
150 "CORE");
151}
152
153/*
154 * Initialize timer note
155 */
156static void *nt_s390_timer(void *ptr, struct save_area *sa)
157{
158 return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
159 KEXEC_CORE_NOTE_NAME);
160}
161
162/*
163 * Initialize TOD clock comparator note
164 */
165static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
166{
167 return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
168 sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
169}
170
171/*
172 * Initialize TOD programmable register note
173 */
174static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
175{
176 return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
177 sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
178}
179
180/*
181 * Initialize control register note
182 */
183static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
184{
185 return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
186 sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
187}
188
189/*
190 * Initialize prefix register note
191 */
192static void *nt_s390_prefix(void *ptr, struct save_area *sa)
193{
194 return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
195 sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
196}
197
198/*
199 * Fill ELF notes for one CPU with save area registers
200 */
201void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
202{
203 ptr = nt_prstatus(ptr, sa);
204 ptr = nt_fpregset(ptr, sa);
205 ptr = nt_s390_timer(ptr, sa);
206 ptr = nt_s390_tod_cmp(ptr, sa);
207 ptr = nt_s390_tod_preg(ptr, sa);
208 ptr = nt_s390_ctrs(ptr, sa);
209 ptr = nt_s390_prefix(ptr, sa);
210 return ptr;
211}
212
213/*
214 * Initialize prpsinfo note (new kernel)
215 */
216static void *nt_prpsinfo(void *ptr)
217{
218 struct elf_prpsinfo prpsinfo;
219
220 memset(&prpsinfo, 0, sizeof(prpsinfo));
221 prpsinfo.pr_sname = 'R';
222 strcpy(prpsinfo.pr_fname, "vmlinux");
223 return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
224 KEXEC_CORE_NOTE_NAME);
225}
226
227/*
228 * Initialize vmcoreinfo note (new kernel)
229 */
230static void *nt_vmcoreinfo(void *ptr)
231{
232 char nt_name[11], *vmcoreinfo;
233 Elf64_Nhdr note;
234 void *addr;
235
236 if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
237 return ptr;
238 memset(nt_name, 0, sizeof(nt_name));
239 if (copy_from_oldmem(&note, addr, sizeof(note)))
240 return ptr;
241 if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
242 return ptr;
243 if (strcmp(nt_name, "VMCOREINFO") != 0)
244 return ptr;
245 vmcoreinfo = kzalloc_panic(note.n_descsz + 1);
246 if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
247 return ptr;
248 vmcoreinfo[note.n_descsz + 1] = 0;
249 return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO");
250}
251
252/*
253 * Initialize ELF header (new kernel)
254 */
255static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
256{
257 memset(ehdr, 0, sizeof(*ehdr));
258 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
259 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
260 ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
261 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
262 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
263 ehdr->e_type = ET_CORE;
264 ehdr->e_machine = EM_S390;
265 ehdr->e_version = EV_CURRENT;
266 ehdr->e_phoff = sizeof(Elf64_Ehdr);
267 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
268 ehdr->e_phentsize = sizeof(Elf64_Phdr);
269 ehdr->e_phnum = mem_chunk_cnt + 1;
270 return ehdr + 1;
271}
272
273/*
274 * Return CPU count for ELF header (new kernel)
275 */
276static int get_cpu_cnt(void)
277{
278 int i, cpus = 0;
279
280 for (i = 0; zfcpdump_save_areas[i]; i++) {
281 if (zfcpdump_save_areas[i]->pref_reg == 0)
282 continue;
283 cpus++;
284 }
285 return cpus;
286}
287
288/*
289 * Return memory chunk count for ELF header (new kernel)
290 */
291static int get_mem_chunk_cnt(void)
292{
293 struct mem_chunk *chunk_array, *mem_chunk;
294 int i, cnt = 0;
295
296 chunk_array = get_memory_layout();
297 for (i = 0; i < MEMORY_CHUNKS; i++) {
298 mem_chunk = &chunk_array[i];
299 if (chunk_array[i].type != CHUNK_READ_WRITE &&
300 chunk_array[i].type != CHUNK_READ_ONLY)
301 continue;
302 if (mem_chunk->size == 0)
303 continue;
304 cnt++;
305 }
306 kfree(chunk_array);
307 return cnt;
308}
309
310/*
311 * Relocate pointer in order to allow vmcore code access the data
312 */
313static inline unsigned long relocate(unsigned long addr)
314{
315 return OLDMEM_BASE + addr;
316}
317
318/*
319 * Initialize ELF loads (new kernel)
320 */
321static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
322{
323 struct mem_chunk *chunk_array, *mem_chunk;
324 int i;
325
326 chunk_array = get_memory_layout();
327 for (i = 0; i < MEMORY_CHUNKS; i++) {
328 mem_chunk = &chunk_array[i];
329 if (mem_chunk->size == 0)
330 break;
331 if (chunk_array[i].type != CHUNK_READ_WRITE &&
332 chunk_array[i].type != CHUNK_READ_ONLY)
333 continue;
334 else
335 phdr->p_filesz = mem_chunk->size;
336 phdr->p_type = PT_LOAD;
337 phdr->p_offset = mem_chunk->addr;
338 phdr->p_vaddr = mem_chunk->addr;
339 phdr->p_paddr = mem_chunk->addr;
340 phdr->p_memsz = mem_chunk->size;
341 phdr->p_flags = PF_R | PF_W | PF_X;
342 phdr->p_align = PAGE_SIZE;
343 phdr++;
344 }
345 kfree(chunk_array);
346 return i;
347}
348
349/*
350 * Initialize notes (new kernel)
351 */
352static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
353{
354 struct save_area *sa;
355 void *ptr_start = ptr;
356 int i;
357
358 ptr = nt_prpsinfo(ptr);
359
360 for (i = 0; zfcpdump_save_areas[i]; i++) {
361 sa = zfcpdump_save_areas[i];
362 if (sa->pref_reg == 0)
363 continue;
364 ptr = fill_cpu_elf_notes(ptr, sa);
365 }
366 ptr = nt_vmcoreinfo(ptr);
367 memset(phdr, 0, sizeof(*phdr));
368 phdr->p_type = PT_NOTE;
369 phdr->p_offset = relocate(notes_offset);
370 phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
371 phdr->p_memsz = phdr->p_filesz;
372 return ptr;
373}
374
375/*
376 * Create ELF core header (new kernel)
377 */
378static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
379{
380 Elf64_Phdr *phdr_notes, *phdr_loads;
381 int mem_chunk_cnt;
382 void *ptr, *hdr;
383 u32 alloc_size;
384 u64 hdr_off;
385
386 mem_chunk_cnt = get_mem_chunk_cnt();
387
388 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
389 mem_chunk_cnt * sizeof(Elf64_Phdr);
390 hdr = kzalloc_panic(alloc_size);
391 /* Init elf header */
392 ptr = ehdr_init(hdr, mem_chunk_cnt);
393 /* Init program headers */
394 phdr_notes = ptr;
395 ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
396 phdr_loads = ptr;
397 ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
398 /* Init notes */
399 hdr_off = PTR_DIFF(ptr, hdr);
400 ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
401 /* Init loads */
402 hdr_off = PTR_DIFF(ptr, hdr);
403 loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off);
404 *elfcorebuf_sz = hdr_off;
405 *elfcorebuf = (void *) relocate((unsigned long) hdr);
406 BUG_ON(*elfcorebuf_sz > alloc_size);
407}
408
409/*
410 * Create kdump ELF core header in new kernel, if it has not been passed via
411 * the "elfcorehdr" kernel parameter
412 */
413static int setup_kdump_elfcorehdr(void)
414{
415 size_t elfcorebuf_sz;
416 char *elfcorebuf;
417
418 if (!OLDMEM_BASE || is_kdump_kernel())
419 return -EINVAL;
420 s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz);
421 elfcorehdr_addr = (unsigned long long) elfcorebuf;
422 elfcorehdr_size = elfcorebuf_sz;
423 return 0;
424}
425
426subsys_initcall(setup_kdump_elfcorehdr);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index f297456dba7a..37394b3413e2 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -252,7 +252,7 @@ static noinline __init void setup_lowcore_early(void)
252{ 252{
253 psw_t psw; 253 psw_t psw;
254 254
255 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 255 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
256 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; 256 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
257 S390_lowcore.external_new_psw = psw; 257 S390_lowcore.external_new_psw = psw;
258 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 258 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 02ec8fe7d03f..b13157057e02 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -43,16 +43,15 @@ SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
43SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 43SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
44SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 44SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
45SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 45SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
46SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC 46SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE
47SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 47SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
49 48
50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 49_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
51 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) 50 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 51_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
53 _TIF_MCCK_PENDING) 52 _TIF_MCCK_PENDING)
54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 53_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
55 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) 54 _TIF_SYSCALL_TRACEPOINT)
56 55
57STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 56STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
58STACK_SIZE = 1 << STACK_SHIFT 57STACK_SIZE = 1 << STACK_SHIFT
@@ -228,9 +227,10 @@ ENTRY(system_call)
228sysc_saveall: 227sysc_saveall:
229 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 228 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
230 CREATE_STACK_FRAME __LC_SAVE_AREA 229 CREATE_STACK_FRAME __LC_SAVE_AREA
231 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
232 mvc SP_ILC(4,%r15),__LC_SVC_ILC
233 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 230 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
231 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
232 mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
233 oi __TI_flags+3(%r12),_TIF_SYSCALL
234sysc_vtime: 234sysc_vtime:
235 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 235 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
236sysc_stime: 236sysc_stime:
@@ -239,17 +239,17 @@ sysc_update:
239 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 239 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
240sysc_do_svc: 240sysc_do_svc:
241 xr %r7,%r7 241 xr %r7,%r7
242 icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0 242 icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0
243 bnz BASED(sysc_nr_ok) # svc number > 0 243 bnz BASED(sysc_nr_ok) # svc number > 0
244 # svc 0: system call number in %r1 244 # svc 0: system call number in %r1
245 cl %r1,BASED(.Lnr_syscalls) 245 cl %r1,BASED(.Lnr_syscalls)
246 bnl BASED(sysc_nr_ok) 246 bnl BASED(sysc_nr_ok)
247 sth %r1,SP_SVCNR(%r15) 247 sth %r1,SP_SVC_CODE+2(%r15)
248 lr %r7,%r1 # copy svc number to %r7 248 lr %r7,%r1 # copy svc number to %r7
249sysc_nr_ok: 249sysc_nr_ok:
250 sll %r7,2 # svc number *4 250 sll %r7,2 # svc number *4
251 l %r10,BASED(.Lsysc_table) 251 l %r10,BASED(.Lsysc_table)
252 tm __TI_flags+2(%r12),_TIF_SYSCALL 252 tm __TI_flags+2(%r12),_TIF_TRACE >> 8
253 mvc SP_ARGS(4,%r15),SP_R7(%r15) 253 mvc SP_ARGS(4,%r15),SP_R7(%r15)
254 l %r8,0(%r7,%r10) # get system call addr. 254 l %r8,0(%r7,%r10) # get system call addr.
255 bnz BASED(sysc_tracesys) 255 bnz BASED(sysc_tracesys)
@@ -259,23 +259,19 @@ sysc_nr_ok:
259sysc_return: 259sysc_return:
260 LOCKDEP_SYS_EXIT 260 LOCKDEP_SYS_EXIT
261sysc_tif: 261sysc_tif:
262 tm SP_PSW+1(%r15),0x01 # returning to user ?
263 bno BASED(sysc_restore)
262 tm __TI_flags+3(%r12),_TIF_WORK_SVC 264 tm __TI_flags+3(%r12),_TIF_WORK_SVC
263 bnz BASED(sysc_work) # there is work to do (signals etc.) 265 bnz BASED(sysc_work) # there is work to do (signals etc.)
266 ni __TI_flags+3(%r12),255-_TIF_SYSCALL
264sysc_restore: 267sysc_restore:
265 RESTORE_ALL __LC_RETURN_PSW,1 268 RESTORE_ALL __LC_RETURN_PSW,1
266sysc_done: 269sysc_done:
267 270
268# 271#
269# There is work to do, but first we need to check if we return to userspace.
270#
271sysc_work:
272 tm SP_PSW+1(%r15),0x01 # returning to user ?
273 bno BASED(sysc_restore)
274
275#
276# One of the work bits is on. Find out which one. 272# One of the work bits is on. Find out which one.
277# 273#
278sysc_work_tif: 274sysc_work:
279 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 275 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
280 bo BASED(sysc_mcck_pending) 276 bo BASED(sysc_mcck_pending)
281 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 277 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
@@ -284,8 +280,6 @@ sysc_work_tif:
284 bo BASED(sysc_sigpending) 280 bo BASED(sysc_sigpending)
285 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 281 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
286 bo BASED(sysc_notify_resume) 282 bo BASED(sysc_notify_resume)
287 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
288 bo BASED(sysc_restart)
289 tm __TI_flags+3(%r12),_TIF_PER_TRAP 283 tm __TI_flags+3(%r12),_TIF_PER_TRAP
290 bo BASED(sysc_singlestep) 284 bo BASED(sysc_singlestep)
291 b BASED(sysc_return) # beware of critical section cleanup 285 b BASED(sysc_return) # beware of critical section cleanup
@@ -314,11 +308,14 @@ sysc_sigpending:
314 la %r2,SP_PTREGS(%r15) # load pt_regs 308 la %r2,SP_PTREGS(%r15) # load pt_regs
315 l %r1,BASED(.Ldo_signal) 309 l %r1,BASED(.Ldo_signal)
316 basr %r14,%r1 # call do_signal 310 basr %r14,%r1 # call do_signal
317 tm __TI_flags+3(%r12),_TIF_RESTART_SVC 311 tm __TI_flags+3(%r12),_TIF_SYSCALL
318 bo BASED(sysc_restart) 312 bno BASED(sysc_return)
319 tm __TI_flags+3(%r12),_TIF_PER_TRAP 313 lm %r2,%r6,SP_R2(%r15) # load svc arguments
320 bo BASED(sysc_singlestep) 314 xr %r7,%r7 # svc 0 returns -ENOSYS
321 b BASED(sysc_return) 315 clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2)
316 bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0
317 icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number
318 b BASED(sysc_nr_ok) # restart svc
322 319
323# 320#
324# _TIF_NOTIFY_RESUME is set, call do_notify_resume 321# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@@ -329,24 +326,11 @@ sysc_notify_resume:
329 la %r14,BASED(sysc_return) 326 la %r14,BASED(sysc_return)
330 br %r1 # call do_notify_resume 327 br %r1 # call do_notify_resume
331 328
332
333#
334# _TIF_RESTART_SVC is set, set up registers and restart svc
335#
336sysc_restart:
337 ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
338 l %r7,SP_R2(%r15) # load new svc number
339 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
340 lm %r2,%r6,SP_R2(%r15) # load svc arguments
341 sth %r7,SP_SVCNR(%r15)
342 b BASED(sysc_nr_ok) # restart svc
343
344# 329#
345# _TIF_PER_TRAP is set, call do_per_trap 330# _TIF_PER_TRAP is set, call do_per_trap
346# 331#
347sysc_singlestep: 332sysc_singlestep:
348 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 333 ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
349 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
350 la %r2,SP_PTREGS(%r15) # address of register-save area 334 la %r2,SP_PTREGS(%r15) # address of register-save area
351 l %r1,BASED(.Lhandle_per) # load adr. of per handler 335 l %r1,BASED(.Lhandle_per) # load adr. of per handler
352 la %r14,BASED(sysc_return) # load adr. of system return 336 la %r14,BASED(sysc_return) # load adr. of system return
@@ -361,7 +345,7 @@ sysc_tracesys:
361 la %r2,SP_PTREGS(%r15) # load pt_regs 345 la %r2,SP_PTREGS(%r15) # load pt_regs
362 la %r3,0 346 la %r3,0
363 xr %r0,%r0 347 xr %r0,%r0
364 icm %r0,3,SP_SVCNR(%r15) 348 icm %r0,3,SP_SVC_CODE(%r15)
365 st %r0,SP_R2(%r15) 349 st %r0,SP_R2(%r15)
366 basr %r14,%r1 350 basr %r14,%r1
367 cl %r2,BASED(.Lnr_syscalls) 351 cl %r2,BASED(.Lnr_syscalls)
@@ -376,7 +360,7 @@ sysc_tracego:
376 basr %r14,%r8 # call sys_xxx 360 basr %r14,%r8 # call sys_xxx
377 st %r2,SP_R2(%r15) # store return value 361 st %r2,SP_R2(%r15) # store return value
378sysc_tracenogo: 362sysc_tracenogo:
379 tm __TI_flags+2(%r12),_TIF_SYSCALL 363 tm __TI_flags+2(%r12),_TIF_TRACE >> 8
380 bz BASED(sysc_return) 364 bz BASED(sysc_return)
381 l %r1,BASED(.Ltrace_exit) 365 l %r1,BASED(.Ltrace_exit)
382 la %r2,SP_PTREGS(%r15) # load pt_regs 366 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -454,7 +438,6 @@ ENTRY(pgm_check_handler)
454 bnz BASED(pgm_per) # got per exception -> special case 438 bnz BASED(pgm_per) # got per exception -> special case
455 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA 439 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
456 CREATE_STACK_FRAME __LC_SAVE_AREA 440 CREATE_STACK_FRAME __LC_SAVE_AREA
457 xc SP_ILC(4,%r15),SP_ILC(%r15)
458 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW 441 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
459 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 442 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
460 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 443 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -530,9 +513,10 @@ pgm_exit2:
530pgm_svcper: 513pgm_svcper:
531 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA 514 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
532 CREATE_STACK_FRAME __LC_SAVE_AREA 515 CREATE_STACK_FRAME __LC_SAVE_AREA
533 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
534 mvc SP_ILC(4,%r15),__LC_SVC_ILC
535 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 516 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
517 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
518 mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
519 oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
536 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 520 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
537 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 521 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
538 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 522 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
@@ -540,7 +524,6 @@ pgm_svcper:
540 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE 524 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
541 mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS 525 mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
542 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID 526 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
543 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
544 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 527 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
545 lm %r2,%r6,SP_R2(%r15) # load svc arguments 528 lm %r2,%r6,SP_R2(%r15) # load svc arguments
546 b BASED(sysc_do_svc) 529 b BASED(sysc_do_svc)
@@ -550,7 +533,6 @@ pgm_svcper:
550# 533#
551kernel_per: 534kernel_per:
552 REENABLE_IRQS 535 REENABLE_IRQS
553 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15)
554 la %r2,SP_PTREGS(%r15) # address of register-save area 536 la %r2,SP_PTREGS(%r15) # address of register-save area
555 l %r1,BASED(.Lhandle_per) # load adr. of per handler 537 l %r1,BASED(.Lhandle_per) # load adr. of per handler
556 basr %r14,%r1 # branch to do_single_step 538 basr %r14,%r1 # branch to do_single_step
@@ -853,13 +835,13 @@ restart_go:
853# PSW restart interrupt handler 835# PSW restart interrupt handler
854# 836#
855ENTRY(psw_restart_int_handler) 837ENTRY(psw_restart_int_handler)
856 st %r15,__LC_SAVE_AREA_64(%r0) # save r15 838 st %r15,__LC_SAVE_AREA+48(%r0) # save r15
857 basr %r15,0 839 basr %r15,0
8580: l %r15,.Lrestart_stack-0b(%r15) # load restart stack 8400: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
859 l %r15,0(%r15) 841 l %r15,0(%r15)
860 ahi %r15,-SP_SIZE # make room for pt_regs 842 ahi %r15,-SP_SIZE # make room for pt_regs
861 stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack 843 stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
862 mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack 844 mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack
863 mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw 845 mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
864 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 846 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
865 basr %r14,0 847 basr %r14,0
@@ -965,9 +947,11 @@ cleanup_system_call:
965 s %r15,BASED(.Lc_spsize) # make room for registers & psw 947 s %r15,BASED(.Lc_spsize) # make room for registers & psw
966 st %r15,12(%r12) 948 st %r15,12(%r12)
967 CREATE_STACK_FRAME __LC_SAVE_AREA 949 CREATE_STACK_FRAME __LC_SAVE_AREA
968 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
969 mvc SP_ILC(4,%r15),__LC_SVC_ILC
970 mvc 0(4,%r12),__LC_THREAD_INFO 950 mvc 0(4,%r12),__LC_THREAD_INFO
951 l %r12,__LC_THREAD_INFO
952 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
953 mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
954 oi __TI_flags+3(%r12),_TIF_SYSCALL
971cleanup_vtime: 955cleanup_vtime:
972 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) 956 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
973 bhe BASED(cleanup_stime) 957 bhe BASED(cleanup_stime)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 66729eb7bbc5..ef8fb1d6e8d7 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,24 +5,33 @@
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7 7
8
9extern void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
10extern void *restart_stack;
11
12asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
13asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
14
8void do_protection_exception(struct pt_regs *, long, unsigned long); 15void do_protection_exception(struct pt_regs *, long, unsigned long);
9void do_dat_exception(struct pt_regs *, long, unsigned long); 16void do_dat_exception(struct pt_regs *, long, unsigned long);
10void do_asce_exception(struct pt_regs *, long, unsigned long); 17void do_asce_exception(struct pt_regs *, long, unsigned long);
11 18
12extern int sysctl_userprocess_debug;
13
14void do_per_trap(struct pt_regs *regs); 19void do_per_trap(struct pt_regs *regs);
15void syscall_trace(struct pt_regs *regs, int entryexit); 20void syscall_trace(struct pt_regs *regs, int entryexit);
16void kernel_stack_overflow(struct pt_regs * regs); 21void kernel_stack_overflow(struct pt_regs * regs);
17void do_signal(struct pt_regs *regs); 22void do_signal(struct pt_regs *regs);
18int handle_signal32(unsigned long sig, struct k_sigaction *ka, 23int handle_signal32(unsigned long sig, struct k_sigaction *ka,
19 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); 24 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
25void do_notify_resume(struct pt_regs *regs);
20 26
21void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); 27void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
28void do_restart(void);
22int __cpuinit start_secondary(void *cpuvoid); 29int __cpuinit start_secondary(void *cpuvoid);
23void __init startup_init(void); 30void __init startup_init(void);
24void die(const char * str, struct pt_regs * regs, long err); 31void die(const char * str, struct pt_regs * regs, long err);
25 32
33void __init time_init(void);
34
26struct s390_mmap_arg_struct; 35struct s390_mmap_arg_struct;
27struct fadvise64_64_args; 36struct fadvise64_64_args;
28struct old_sigaction; 37struct old_sigaction;
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 713da0760538..83a93747e2fd 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -43,19 +43,18 @@ SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
43SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 43SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
44SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 44SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
45SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 45SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
46SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC 46SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE
47SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 47SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
49 48
50STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 49STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
51STACK_SIZE = 1 << STACK_SHIFT 50STACK_SIZE = 1 << STACK_SHIFT
52 51
53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 52_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) 53 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 54_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56 _TIF_MCCK_PENDING) 55 _TIF_MCCK_PENDING)
57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 56_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
58 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) 57 _TIF_SYSCALL_TRACEPOINT)
59_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) 58_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
60 59
61#define BASED(name) name-system_call(%r13) 60#define BASED(name) name-system_call(%r13)
@@ -249,9 +248,10 @@ ENTRY(system_call)
249sysc_saveall: 248sysc_saveall:
250 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 249 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
251 CREATE_STACK_FRAME __LC_SAVE_AREA 250 CREATE_STACK_FRAME __LC_SAVE_AREA
252 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
253 mvc SP_ILC(4,%r15),__LC_SVC_ILC
254 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 251 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
252 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
253 mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
254 oi __TI_flags+7(%r12),_TIF_SYSCALL
255sysc_vtime: 255sysc_vtime:
256 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 256 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
257sysc_stime: 257sysc_stime:
@@ -260,14 +260,14 @@ sysc_update:
260 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 260 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
261 LAST_BREAK 261 LAST_BREAK
262sysc_do_svc: 262sysc_do_svc:
263 llgh %r7,SP_SVCNR(%r15) 263 llgh %r7,SP_SVC_CODE+2(%r15)
264 slag %r7,%r7,2 # shift and test for svc 0 264 slag %r7,%r7,2 # shift and test for svc 0
265 jnz sysc_nr_ok 265 jnz sysc_nr_ok
266 # svc 0: system call number in %r1 266 # svc 0: system call number in %r1
267 llgfr %r1,%r1 # clear high word in r1 267 llgfr %r1,%r1 # clear high word in r1
268 cghi %r1,NR_syscalls 268 cghi %r1,NR_syscalls
269 jnl sysc_nr_ok 269 jnl sysc_nr_ok
270 sth %r1,SP_SVCNR(%r15) 270 sth %r1,SP_SVC_CODE+2(%r15)
271 slag %r7,%r1,2 # shift and test for svc 0 271 slag %r7,%r1,2 # shift and test for svc 0
272sysc_nr_ok: 272sysc_nr_ok:
273 larl %r10,sys_call_table 273 larl %r10,sys_call_table
@@ -277,7 +277,7 @@ sysc_nr_ok:
277 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 277 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
278sysc_noemu: 278sysc_noemu:
279#endif 279#endif
280 tm __TI_flags+6(%r12),_TIF_SYSCALL 280 tm __TI_flags+6(%r12),_TIF_TRACE >> 8
281 mvc SP_ARGS(8,%r15),SP_R7(%r15) 281 mvc SP_ARGS(8,%r15),SP_R7(%r15)
282 lgf %r8,0(%r7,%r10) # load address of system call routine 282 lgf %r8,0(%r7,%r10) # load address of system call routine
283 jnz sysc_tracesys 283 jnz sysc_tracesys
@@ -287,23 +287,19 @@ sysc_noemu:
287sysc_return: 287sysc_return:
288 LOCKDEP_SYS_EXIT 288 LOCKDEP_SYS_EXIT
289sysc_tif: 289sysc_tif:
290 tm SP_PSW+1(%r15),0x01 # returning to user ?
291 jno sysc_restore
290 tm __TI_flags+7(%r12),_TIF_WORK_SVC 292 tm __TI_flags+7(%r12),_TIF_WORK_SVC
291 jnz sysc_work # there is work to do (signals etc.) 293 jnz sysc_work # there is work to do (signals etc.)
294 ni __TI_flags+7(%r12),255-_TIF_SYSCALL
292sysc_restore: 295sysc_restore:
293 RESTORE_ALL __LC_RETURN_PSW,1 296 RESTORE_ALL __LC_RETURN_PSW,1
294sysc_done: 297sysc_done:
295 298
296# 299#
297# There is work to do, but first we need to check if we return to userspace.
298#
299sysc_work:
300 tm SP_PSW+1(%r15),0x01 # returning to user ?
301 jno sysc_restore
302
303#
304# One of the work bits is on. Find out which one. 300# One of the work bits is on. Find out which one.
305# 301#
306sysc_work_tif: 302sysc_work:
307 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING 303 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
308 jo sysc_mcck_pending 304 jo sysc_mcck_pending
309 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 305 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
@@ -312,8 +308,6 @@ sysc_work_tif:
312 jo sysc_sigpending 308 jo sysc_sigpending
313 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 309 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
314 jo sysc_notify_resume 310 jo sysc_notify_resume
315 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
316 jo sysc_restart
317 tm __TI_flags+7(%r12),_TIF_PER_TRAP 311 tm __TI_flags+7(%r12),_TIF_PER_TRAP
318 jo sysc_singlestep 312 jo sysc_singlestep
319 j sysc_return # beware of critical section cleanup 313 j sysc_return # beware of critical section cleanup
@@ -339,11 +333,15 @@ sysc_sigpending:
339 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 333 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
340 la %r2,SP_PTREGS(%r15) # load pt_regs 334 la %r2,SP_PTREGS(%r15) # load pt_regs
341 brasl %r14,do_signal # call do_signal 335 brasl %r14,do_signal # call do_signal
342 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 336 tm __TI_flags+7(%r12),_TIF_SYSCALL
343 jo sysc_restart 337 jno sysc_return
344 tm __TI_flags+7(%r12),_TIF_PER_TRAP 338 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
345 jo sysc_singlestep 339 lghi %r7,0 # svc 0 returns -ENOSYS
346 j sysc_return 340 lh %r1,SP_SVC_CODE+2(%r15) # load new svc number
341 cghi %r1,NR_syscalls
342 jnl sysc_nr_ok # invalid svc number -> do svc 0
343 slag %r7,%r1,2
344 j sysc_nr_ok # restart svc
347 345
348# 346#
349# _TIF_NOTIFY_RESUME is set, call do_notify_resume 347# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@@ -354,23 +352,10 @@ sysc_notify_resume:
354 jg do_notify_resume # call do_notify_resume 352 jg do_notify_resume # call do_notify_resume
355 353
356# 354#
357# _TIF_RESTART_SVC is set, set up registers and restart svc
358#
359sysc_restart:
360 ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
361 lg %r7,SP_R2(%r15) # load new svc number
362 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
363 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
364 sth %r7,SP_SVCNR(%r15)
365 slag %r7,%r7,2
366 j sysc_nr_ok # restart svc
367
368#
369# _TIF_PER_TRAP is set, call do_per_trap 355# _TIF_PER_TRAP is set, call do_per_trap
370# 356#
371sysc_singlestep: 357sysc_singlestep:
372 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 358 ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
373 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
374 la %r2,SP_PTREGS(%r15) # address of register-save area 359 la %r2,SP_PTREGS(%r15) # address of register-save area
375 larl %r14,sysc_return # load adr. of system return 360 larl %r14,sysc_return # load adr. of system return
376 jg do_per_trap 361 jg do_per_trap
@@ -382,7 +367,7 @@ sysc_singlestep:
382sysc_tracesys: 367sysc_tracesys:
383 la %r2,SP_PTREGS(%r15) # load pt_regs 368 la %r2,SP_PTREGS(%r15) # load pt_regs
384 la %r3,0 369 la %r3,0
385 llgh %r0,SP_SVCNR(%r15) 370 llgh %r0,SP_SVC_CODE+2(%r15)
386 stg %r0,SP_R2(%r15) 371 stg %r0,SP_R2(%r15)
387 brasl %r14,do_syscall_trace_enter 372 brasl %r14,do_syscall_trace_enter
388 lghi %r0,NR_syscalls 373 lghi %r0,NR_syscalls
@@ -397,7 +382,7 @@ sysc_tracego:
397 basr %r14,%r8 # call sys_xxx 382 basr %r14,%r8 # call sys_xxx
398 stg %r2,SP_R2(%r15) # store return value 383 stg %r2,SP_R2(%r15) # store return value
399sysc_tracenogo: 384sysc_tracenogo:
400 tm __TI_flags+6(%r12),_TIF_SYSCALL 385 tm __TI_flags+6(%r12),_TIF_TRACE >> 8
401 jz sysc_return 386 jz sysc_return
402 la %r2,SP_PTREGS(%r15) # load pt_regs 387 la %r2,SP_PTREGS(%r15) # load pt_regs
403 larl %r14,sysc_return # return point is sysc_return 388 larl %r14,sysc_return # return point is sysc_return
@@ -470,7 +455,6 @@ ENTRY(pgm_check_handler)
470 jnz pgm_per # got per exception -> special case 455 jnz pgm_per # got per exception -> special case
471 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA 456 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
472 CREATE_STACK_FRAME __LC_SAVE_AREA 457 CREATE_STACK_FRAME __LC_SAVE_AREA
473 xc SP_ILC(4,%r15),SP_ILC(%r15)
474 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW 458 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
475 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 459 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
476 HANDLE_SIE_INTERCEPT 460 HANDLE_SIE_INTERCEPT
@@ -550,9 +534,10 @@ pgm_exit2:
550pgm_svcper: 534pgm_svcper:
551 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA 535 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
552 CREATE_STACK_FRAME __LC_SAVE_AREA 536 CREATE_STACK_FRAME __LC_SAVE_AREA
553 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
554 mvc SP_ILC(4,%r15),__LC_SVC_ILC
555 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 537 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
538 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
539 mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
540 oi __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
556 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 541 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
557 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 542 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
558 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
@@ -561,7 +546,6 @@ pgm_svcper:
561 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE 546 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
562 mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS 547 mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
563 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID 548 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
564 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
565 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 549 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
566 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 550 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
567 j sysc_do_svc 551 j sysc_do_svc
@@ -571,7 +555,6 @@ pgm_svcper:
571# 555#
572kernel_per: 556kernel_per:
573 REENABLE_IRQS 557 REENABLE_IRQS
574 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
575 la %r2,SP_PTREGS(%r15) # address of register-save area 558 la %r2,SP_PTREGS(%r15) # address of register-save area
576 brasl %r14,do_per_trap 559 brasl %r14,do_per_trap
577 j pgm_exit 560 j pgm_exit
@@ -869,12 +852,12 @@ restart_go:
869# PSW restart interrupt handler 852# PSW restart interrupt handler
870# 853#
871ENTRY(psw_restart_int_handler) 854ENTRY(psw_restart_int_handler)
872 stg %r15,__LC_SAVE_AREA_64(%r0) # save r15 855 stg %r15,__LC_SAVE_AREA+120(%r0) # save r15
873 larl %r15,restart_stack # load restart stack 856 larl %r15,restart_stack # load restart stack
874 lg %r15,0(%r15) 857 lg %r15,0(%r15)
875 aghi %r15,-SP_SIZE # make room for pt_regs 858 aghi %r15,-SP_SIZE # make room for pt_regs
876 stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack 859 stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
877 mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack 860 mvc SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack
878 mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw 861 mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
879 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 862 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
880 brasl %r14,do_restart 863 brasl %r14,do_restart
@@ -972,9 +955,11 @@ cleanup_system_call:
972 stg %r15,32(%r12) 955 stg %r15,32(%r12)
973 stg %r11,0(%r12) 956 stg %r11,0(%r12)
974 CREATE_STACK_FRAME __LC_SAVE_AREA 957 CREATE_STACK_FRAME __LC_SAVE_AREA
975 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
976 mvc SP_ILC(4,%r15),__LC_SVC_ILC
977 mvc 8(8,%r12),__LC_THREAD_INFO 958 mvc 8(8,%r12),__LC_THREAD_INFO
959 lg %r12,__LC_THREAD_INFO
960 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
961 mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
962 oi __TI_flags+7(%r12),_TIF_SYSCALL
978cleanup_vtime: 963cleanup_vtime:
979 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) 964 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
980 jhe cleanup_stime 965 jhe cleanup_stime
@@ -1096,6 +1081,7 @@ sie_exit:
1096 lghi %r2,0 1081 lghi %r2,0
1097 br %r14 1082 br %r14
1098sie_fault: 1083sie_fault:
1084 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1099 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 1085 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
1100 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) 1086 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
1101 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 1087 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 2d781bab37bb..900068d2bf92 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -449,10 +449,28 @@ ENTRY(start)
449# 449#
450 .org 0x10000 450 .org 0x10000
451ENTRY(startup) 451ENTRY(startup)
452 j .Lep_startup_normal
453 .org 0x10008
454#
455# This is a list of s390 kernel entry points. At address 0x1000f the number of
456# valid entry points is stored.
457#
458# IMPORTANT: Do not change this table, it is s390 kernel ABI!
459#
460 .ascii "S390EP"
461 .byte 0x00,0x01
462#
463# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
464#
465 .org 0x10010
466ENTRY(startup_kdump)
467 j .Lep_startup_kdump
468.Lep_startup_normal:
452 basr %r13,0 # get base 469 basr %r13,0 # get base
453.LPG0: 470.LPG0:
454 xc 0x200(256),0x200 # partially clear lowcore 471 xc 0x200(256),0x200 # partially clear lowcore
455 xc 0x300(256),0x300 472 xc 0x300(256),0x300
473 xc 0xe00(256),0xe00
456 stck __LC_LAST_UPDATE_CLOCK 474 stck __LC_LAST_UPDATE_CLOCK
457 spt 5f-.LPG0(%r13) 475 spt 5f-.LPG0(%r13)
458 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) 476 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
@@ -534,6 +552,8 @@ ENTRY(startup)
534 .align 8 552 .align 8
5355: .long 0x7fffffff,0xffffffff 5535: .long 0x7fffffff,0xffffffff
536 554
555#include "head_kdump.S"
556
537# 557#
538# params at 10400 (setup.h) 558# params at 10400 (setup.h)
539# 559#
@@ -541,6 +561,8 @@ ENTRY(startup)
541 .long 0,0 # IPL_DEVICE 561 .long 0,0 # IPL_DEVICE
542 .long 0,0 # INITRD_START 562 .long 0,0 # INITRD_START
543 .long 0,0 # INITRD_SIZE 563 .long 0,0 # INITRD_SIZE
564 .long 0,0 # OLDMEM_BASE
565 .long 0,0 # OLDMEM_SIZE
544 566
545 .org COMMAND_LINE 567 .org COMMAND_LINE
546 .byte "root=/dev/ram0 ro" 568 .byte "root=/dev/ram0 ro"
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index f21954b44dc1..d3f1ab7d90ad 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -92,7 +92,7 @@ ENTRY(_stext)
92.LPG3: 92.LPG3:
93# check control registers 93# check control registers
94 stctl %c0,%c15,0(%r15) 94 stctl %c0,%c15,0(%r15)
95 oi 2(%r15),0x40 # enable sigp emergency signal 95 oi 2(%r15),0x60 # enable sigp emergency & external call
96 oi 0(%r15),0x10 # switch on low address protection 96 oi 0(%r15),0x10 # switch on low address protection
97 lctl %c0,%c15,0(%r15) 97 lctl %c0,%c15,0(%r15)
98 98
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index ae5d492b069e..99348c0eaa41 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -90,7 +90,7 @@ ENTRY(_stext)
90.LPG3: 90.LPG3:
91# check control registers 91# check control registers
92 stctg %c0,%c15,0(%r15) 92 stctg %c0,%c15,0(%r15)
93 oi 6(%r15),0x40 # enable sigp emergency signal 93 oi 6(%r15),0x60 # enable sigp emergency & external call
94 oi 4(%r15),0x10 # switch on low address proctection 94 oi 4(%r15),0x10 # switch on low address proctection
95 lctlg %c0,%c15,0(%r15) 95 lctlg %c0,%c15,0(%r15)
96 96
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
new file mode 100644
index 000000000000..e1ac3893e972
--- /dev/null
+++ b/arch/s390/kernel/head_kdump.S
@@ -0,0 +1,119 @@
1/*
2 * S390 kdump lowlevel functions (new kernel)
3 *
4 * Copyright IBM Corp. 2011
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#define DATAMOVER_ADDR 0x4000
9#define COPY_PAGE_ADDR 0x6000
10
11#ifdef CONFIG_CRASH_DUMP
12
13#
14# kdump entry (new kernel - not yet relocated)
15#
16# Note: This code has to be position independent
17#
18
19.align 2
20.Lep_startup_kdump:
21 lhi %r1,2 # mode 2 = esame (dump)
22 sigp %r1,%r0,0x12 # Switch to esame mode
23 sam64 # Switch to 64 bit addressing
24 basr %r13,0
25.Lbase:
26 larl %r2,.Lbase_addr # Check, if we have been
27 lg %r2,0(%r2) # already relocated:
28 clgr %r2,%r13 #
29 jne .Lrelocate # No : Start data mover
30 lghi %r2,0 # Yes: Start kdump kernel
31 brasl %r14,startup_kdump_relocated
32
33.Lrelocate:
34 larl %r4,startup
35 lg %r2,0x418(%r4) # Get kdump base
36 lg %r3,0x420(%r4) # Get kdump size
37
38 larl %r10,.Lcopy_start # Source of data mover
39 lghi %r8,DATAMOVER_ADDR # Target of data mover
40 mvc 0(256,%r8),0(%r10) # Copy data mover code
41
42 agr %r8,%r2 # Copy data mover to
43 mvc 0(256,%r8),0(%r10) # reserved mem
44
45 lghi %r14,DATAMOVER_ADDR # Jump to copied data mover
46 basr %r14,%r14
47.Lbase_addr:
48 .quad .Lbase
49
50#
51# kdump data mover code (runs at address DATAMOVER_ADDR)
52#
53# r2: kdump base address
54# r3: kdump size
55#
56.Lcopy_start:
57 basr %r13,0 # Base
580:
59 lgr %r11,%r2 # Save kdump base address
60 lgr %r12,%r2
61 agr %r12,%r3 # Compute kdump end address
62
63 lghi %r5,0
64 lghi %r10,COPY_PAGE_ADDR # Load copy page address
651:
66 mvc 0(256,%r10),0(%r5) # Copy old kernel to tmp
67 mvc 0(256,%r5),0(%r11) # Copy new kernel to old
68 mvc 0(256,%r11),0(%r10) # Copy tmp to new
69 aghi %r11,256
70 aghi %r5,256
71 clgr %r11,%r12
72 jl 1b
73
74 lg %r14,.Lstartup_kdump-0b(%r13)
75 basr %r14,%r14 # Start relocated kernel
76.Lstartup_kdump:
77 .long 0x00000000,0x00000000 + startup_kdump_relocated
78.Lcopy_end:
79
80#
81# Startup of kdump (relocated new kernel)
82#
83.align 2
84startup_kdump_relocated:
85 basr %r13,0
860:
87 mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW
88 mvc 464(16,%r0),.Lpgm_psw-0b(%r13) # Setup pgm check PSW
89 lhi %r1,1 # Start new kernel
90 diag %r1,%r1,0x308 # with diag 308
91
92.Lno_diag308: # No diag 308
93 sam31 # Switch to 31 bit addr mode
94 sr %r1,%r1 # Erase register r1
95 sr %r2,%r2 # Erase register r2
96 sigp %r1,%r2,0x12 # Switch to 31 bit arch mode
97 lpsw 0 # Start new kernel...
98.align 8
99.Lrestart_psw:
100 .long 0x00080000,0x80000000 + startup
101.Lpgm_psw:
102 .quad 0x0000000180000000,0x0000000000000000 + .Lno_diag308
103#else
104.align 2
105.Lep_startup_kdump:
106#ifdef CONFIG_64BIT
107 larl %r13,startup_kdump_crash
108 lpswe 0(%r13)
109.align 8
110startup_kdump_crash:
111 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
112#else
113 basr %r13,0
1140: lpsw startup_kdump_crash-0b(%r13)
115.align 8
116startup_kdump_crash:
117 .long 0x000a0000,0x00000000 + startup_kdump_crash
118#endif /* CONFIG_64BIT */
119#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 48c710206366..affa8e68124a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -16,6 +16,7 @@
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/gfp.h> 18#include <linux/gfp.h>
19#include <linux/crash_dump.h>
19#include <asm/ipl.h> 20#include <asm/ipl.h>
20#include <asm/smp.h> 21#include <asm/smp.h>
21#include <asm/setup.h> 22#include <asm/setup.h>
@@ -26,6 +27,7 @@
26#include <asm/sclp.h> 27#include <asm/sclp.h>
27#include <asm/sigp.h> 28#include <asm/sigp.h>
28#include <asm/checksum.h> 29#include <asm/checksum.h>
30#include "entry.h"
29 31
30#define IPL_PARM_BLOCK_VERSION 0 32#define IPL_PARM_BLOCK_VERSION 0
31 33
@@ -275,8 +277,8 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
275static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 277static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
276 278
277/* VM IPL PARM routines */ 279/* VM IPL PARM routines */
278size_t reipl_get_ascii_vmparm(char *dest, size_t size, 280static size_t reipl_get_ascii_vmparm(char *dest, size_t size,
279 const struct ipl_parameter_block *ipb) 281 const struct ipl_parameter_block *ipb)
280{ 282{
281 int i; 283 int i;
282 size_t len; 284 size_t len;
@@ -338,8 +340,8 @@ static size_t scpdata_length(const char* buf, size_t count)
338 return count; 340 return count;
339} 341}
340 342
341size_t reipl_append_ascii_scpdata(char *dest, size_t size, 343static size_t reipl_append_ascii_scpdata(char *dest, size_t size,
342 const struct ipl_parameter_block *ipb) 344 const struct ipl_parameter_block *ipb)
343{ 345{
344 size_t count; 346 size_t count;
345 size_t i; 347 size_t i;
@@ -1738,7 +1740,11 @@ static struct kobj_attribute on_restart_attr =
1738 1740
1739void do_restart(void) 1741void do_restart(void)
1740{ 1742{
1743 smp_restart_with_online_cpu();
1741 smp_send_stop(); 1744 smp_send_stop();
1745#ifdef CONFIG_CRASH_DUMP
1746 crash_kexec(NULL);
1747#endif
1742 on_restart_trigger.action->fn(&on_restart_trigger); 1748 on_restart_trigger.action->fn(&on_restart_trigger);
1743 stop_run(&on_restart_trigger); 1749 stop_run(&on_restart_trigger);
1744} 1750}
@@ -2009,7 +2015,7 @@ static void do_reset_calls(void)
2009 2015
2010u32 dump_prefix_page; 2016u32 dump_prefix_page;
2011 2017
2012void s390_reset_system(void) 2018void s390_reset_system(void (*func)(void *), void *data)
2013{ 2019{
2014 struct _lowcore *lc; 2020 struct _lowcore *lc;
2015 2021
@@ -2028,15 +2034,19 @@ void s390_reset_system(void)
2028 __ctl_clear_bit(0,28); 2034 __ctl_clear_bit(0,28);
2029 2035
2030 /* Set new machine check handler */ 2036 /* Set new machine check handler */
2031 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; 2037 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
2032 S390_lowcore.mcck_new_psw.addr = 2038 S390_lowcore.mcck_new_psw.addr =
2033 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; 2039 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
2034 2040
2035 /* Set new program check handler */ 2041 /* Set new program check handler */
2036 S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; 2042 S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
2037 S390_lowcore.program_new_psw.addr = 2043 S390_lowcore.program_new_psw.addr =
2038 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 2044 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
2039 2045
2046 /* Store status at absolute zero */
2047 store_status();
2048
2040 do_reset_calls(); 2049 do_reset_calls();
2050 if (func)
2051 func(data);
2041} 2052}
2042
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1f4050d45f78..b9a7fdd9c814 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -33,7 +33,8 @@ static const struct irq_class intrclass_names[] = {
33 {.name = "EXT" }, 33 {.name = "EXT" },
34 {.name = "I/O" }, 34 {.name = "I/O" },
35 {.name = "CLK", .desc = "[EXT] Clock Comparator" }, 35 {.name = "CLK", .desc = "[EXT] Clock Comparator" },
36 {.name = "IPI", .desc = "[EXT] Signal Processor" }, 36 {.name = "EXC", .desc = "[EXT] External Call" },
37 {.name = "EMS", .desc = "[EXT] Emergency Signal" },
37 {.name = "TMR", .desc = "[EXT] CPU Timer" }, 38 {.name = "TMR", .desc = "[EXT] CPU Timer" },
38 {.name = "TAL", .desc = "[EXT] Timing Alert" }, 39 {.name = "TAL", .desc = "[EXT] Timing Alert" },
39 {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, 40 {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
@@ -42,8 +43,8 @@ static const struct irq_class intrclass_names[] = {
42 {.name = "SCP", .desc = "[EXT] Service Call" }, 43 {.name = "SCP", .desc = "[EXT] Service Call" },
43 {.name = "IUC", .desc = "[EXT] IUCV" }, 44 {.name = "IUC", .desc = "[EXT] IUCV" },
44 {.name = "CPM", .desc = "[EXT] CPU Measurement" }, 45 {.name = "CPM", .desc = "[EXT] CPU Measurement" },
46 {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" },
45 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, 47 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
46 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
47 {.name = "DAS", .desc = "[I/O] DASD" }, 48 {.name = "DAS", .desc = "[I/O] DASD" },
48 {.name = "C15", .desc = "[I/O] 3215" }, 49 {.name = "C15", .desc = "[I/O] 3215" },
49 {.name = "C70", .desc = "[I/O] 3270" }, 50 {.name = "C70", .desc = "[I/O] 3270" },
@@ -53,6 +54,7 @@ static const struct irq_class intrclass_names[] = {
53 {.name = "CLW", .desc = "[I/O] CLAW" }, 54 {.name = "CLW", .desc = "[I/O] CLAW" },
54 {.name = "CTC", .desc = "[I/O] CTC" }, 55 {.name = "CTC", .desc = "[I/O] CTC" },
55 {.name = "APB", .desc = "[I/O] AP Bus" }, 56 {.name = "APB", .desc = "[I/O] AP Bus" },
57 {.name = "CSC", .desc = "[I/O] CHSC Subchannel" },
56 {.name = "NMI", .desc = "[NMI] Machine Check" }, 58 {.name = "NMI", .desc = "[NMI] Machine Check" },
57}; 59};
58 60
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 1d05d669107c..64b761aef004 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -635,7 +635,7 @@ void __kprobes jprobe_return(void)
635 asm volatile(".word 0x0002"); 635 asm volatile(".word 0x0002");
636} 636}
637 637
638void __kprobes jprobe_return_end(void) 638static void __used __kprobes jprobe_return_end(void)
639{ 639{
640 asm volatile("bcr 0,0"); 640 asm volatile("bcr 0,0");
641} 641}
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b09b9c62573e..3cd0f25ab015 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * arch/s390/kernel/machine_kexec.c 2 * arch/s390/kernel/machine_kexec.c
3 * 3 *
4 * Copyright IBM Corp. 2005,2006 4 * Copyright IBM Corp. 2005,2011
5 * 5 *
6 * Author(s): Rolf Adelsberger, 6 * Author(s): Rolf Adelsberger,
7 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
8 */ 9 */
9 10
10#include <linux/device.h> 11#include <linux/device.h>
@@ -21,12 +22,162 @@
21#include <asm/smp.h> 22#include <asm/smp.h>
22#include <asm/reset.h> 23#include <asm/reset.h>
23#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/diag.h>
26#include <asm/asm-offsets.h>
24 27
25typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 28typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
26 29
27extern const unsigned char relocate_kernel[]; 30extern const unsigned char relocate_kernel[];
28extern const unsigned long long relocate_kernel_len; 31extern const unsigned long long relocate_kernel_len;
29 32
33#ifdef CONFIG_CRASH_DUMP
34
35void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
36
37/*
38 * Create ELF notes for one CPU
39 */
40static void add_elf_notes(int cpu)
41{
42 struct save_area *sa = (void *) 4608 + store_prefix();
43 void *ptr;
44
45 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
47 ptr = fill_cpu_elf_notes(ptr, sa);
48 memset(ptr, 0, sizeof(struct elf_note));
49}
50
51/*
52 * Store status of next available physical CPU
53 */
54static int store_status_next(int start_cpu, int this_cpu)
55{
56 struct save_area *sa = (void *) 4608 + store_prefix();
57 int cpu, rc;
58
59 for (cpu = start_cpu; cpu < 65536; cpu++) {
60 if (cpu == this_cpu)
61 continue;
62 do {
63 rc = raw_sigp(cpu, sigp_stop_and_store_status);
64 } while (rc == sigp_busy);
65 if (rc != sigp_order_code_accepted)
66 continue;
67 if (sa->pref_reg)
68 return cpu;
69 }
70 return -1;
71}
72
73/*
74 * Initialize CPU ELF notes
75 */
76void setup_regs(void)
77{
78 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
79 int cpu, this_cpu, phys_cpu = 0, first = 1;
80
81 this_cpu = stap();
82
83 if (!S390_lowcore.prefixreg_save_area)
84 first = 0;
85 for_each_online_cpu(cpu) {
86 if (first) {
87 add_elf_notes(cpu);
88 first = 0;
89 continue;
90 }
91 phys_cpu = store_status_next(phys_cpu, this_cpu);
92 if (phys_cpu == -1)
93 break;
94 add_elf_notes(cpu);
95 phys_cpu++;
96 }
97 /* Copy dump CPU store status info to absolute zero */
98 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
99}
100
101#endif
102
103/*
104 * Start kdump: We expect here that a store status has been done on our CPU
105 */
106static void __do_machine_kdump(void *image)
107{
108#ifdef CONFIG_CRASH_DUMP
109 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
110
111 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
112 setup_regs();
113 start_kdump(1);
114#endif
115}
116
117/*
118 * Check if kdump checksums are valid: We call purgatory with parameter "0"
119 */
120static int kdump_csum_valid(struct kimage *image)
121{
122#ifdef CONFIG_CRASH_DUMP
123 int (*start_kdump)(int) = (void *)image->start;
124 int rc;
125
126 __arch_local_irq_stnsm(0xfb); /* disable DAT */
127 rc = start_kdump(0);
128 __arch_local_irq_stosm(0x04); /* enable DAT */
129 return rc ? 0 : -EINVAL;
130#else
131 return -EINVAL;
132#endif
133}
134
135/*
136 * Map or unmap crashkernel memory
137 */
138static void crash_map_pages(int enable)
139{
140 unsigned long size = resource_size(&crashk_res);
141
142 BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN ||
143 size % KEXEC_CRASH_MEM_ALIGN);
144 if (enable)
145 vmem_add_mapping(crashk_res.start, size);
146 else
147 vmem_remove_mapping(crashk_res.start, size);
148}
149
150/*
151 * Map crashkernel memory
152 */
153void crash_map_reserved_pages(void)
154{
155 crash_map_pages(1);
156}
157
158/*
159 * Unmap crashkernel memory
160 */
161void crash_unmap_reserved_pages(void)
162{
163 crash_map_pages(0);
164}
165
166/*
167 * Give back memory to hypervisor before new kdump is loaded
168 */
169static int machine_kexec_prepare_kdump(void)
170{
171#ifdef CONFIG_CRASH_DUMP
172 if (MACHINE_IS_VM)
173 diag10_range(PFN_DOWN(crashk_res.start),
174 PFN_DOWN(crashk_res.end - crashk_res.start + 1));
175 return 0;
176#else
177 return -EINVAL;
178#endif
179}
180
30int machine_kexec_prepare(struct kimage *image) 181int machine_kexec_prepare(struct kimage *image)
31{ 182{
32 void *reboot_code_buffer; 183 void *reboot_code_buffer;
@@ -35,6 +186,9 @@ int machine_kexec_prepare(struct kimage *image)
35 if (ipl_flags & IPL_NSS_VALID) 186 if (ipl_flags & IPL_NSS_VALID)
36 return -ENOSYS; 187 return -ENOSYS;
37 188
189 if (image->type == KEXEC_TYPE_CRASH)
190 return machine_kexec_prepare_kdump();
191
38 /* We don't support anything but the default image type for now. */ 192 /* We don't support anything but the default image type for now. */
39 if (image->type != KEXEC_TYPE_DEFAULT) 193 if (image->type != KEXEC_TYPE_DEFAULT)
40 return -EINVAL; 194 return -EINVAL;
@@ -51,27 +205,53 @@ void machine_kexec_cleanup(struct kimage *image)
51{ 205{
52} 206}
53 207
208void arch_crash_save_vmcoreinfo(void)
209{
210 VMCOREINFO_SYMBOL(lowcore_ptr);
211 VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
212}
213
54void machine_shutdown(void) 214void machine_shutdown(void)
55{ 215{
56} 216}
57 217
58static void __machine_kexec(void *data) 218/*
219 * Do normal kexec
220 */
221static void __do_machine_kexec(void *data)
59{ 222{
60 relocate_kernel_t data_mover; 223 relocate_kernel_t data_mover;
61 struct kimage *image = data; 224 struct kimage *image = data;
62 225
63 pfault_fini();
64 s390_reset_system();
65
66 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); 226 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
67 227
68 /* Call the moving routine */ 228 /* Call the moving routine */
69 (*data_mover)(&image->head, image->start); 229 (*data_mover)(&image->head, image->start);
70 for (;;);
71} 230}
72 231
232/*
233 * Reset system and call either kdump or normal kexec
234 */
235static void __machine_kexec(void *data)
236{
237 struct kimage *image = data;
238
239 pfault_fini();
240 if (image->type == KEXEC_TYPE_CRASH)
241 s390_reset_system(__do_machine_kdump, data);
242 else
243 s390_reset_system(__do_machine_kexec, data);
244 disabled_wait((unsigned long) __builtin_return_address(0));
245}
246
247/*
248 * Do either kdump or normal kexec. In case of kdump we first ask
249 * purgatory, if kdump checksums are valid.
250 */
73void machine_kexec(struct kimage *image) 251void machine_kexec(struct kimage *image)
74{ 252{
253 if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
254 return;
75 tracer_disable(); 255 tracer_disable();
76 smp_send_stop(); 256 smp_send_stop();
77 smp_switch_to_ipl_cpu(__machine_kexec, image); 257 smp_switch_to_ipl_cpu(__machine_kexec, image);
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 0fbe4e32f7ba..19b4568f4cee 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -62,3 +62,72 @@ void detect_memory_layout(struct mem_chunk chunk[])
62 arch_local_irq_restore(flags); 62 arch_local_irq_restore(flags);
63} 63}
64EXPORT_SYMBOL(detect_memory_layout); 64EXPORT_SYMBOL(detect_memory_layout);
65
66/*
67 * Create memory hole with given address, size, and type
68 */
69void create_mem_hole(struct mem_chunk chunks[], unsigned long addr,
70 unsigned long size, int type)
71{
72 unsigned long start, end, new_size;
73 int i;
74
75 for (i = 0; i < MEMORY_CHUNKS; i++) {
76 if (chunks[i].size == 0)
77 continue;
78 if (addr + size < chunks[i].addr)
79 continue;
80 if (addr >= chunks[i].addr + chunks[i].size)
81 continue;
82 start = max(addr, chunks[i].addr);
83 end = min(addr + size, chunks[i].addr + chunks[i].size);
84 new_size = end - start;
85 if (new_size == 0)
86 continue;
87 if (start == chunks[i].addr &&
88 end == chunks[i].addr + chunks[i].size) {
89 /* Remove chunk */
90 chunks[i].type = type;
91 } else if (start == chunks[i].addr) {
92 /* Make chunk smaller at start */
93 if (i >= MEMORY_CHUNKS - 1)
94 panic("Unable to create memory hole");
95 memmove(&chunks[i + 1], &chunks[i],
96 sizeof(struct mem_chunk) *
97 (MEMORY_CHUNKS - (i + 1)));
98 chunks[i + 1].addr = chunks[i].addr + new_size;
99 chunks[i + 1].size = chunks[i].size - new_size;
100 chunks[i].size = new_size;
101 chunks[i].type = type;
102 i += 1;
103 } else if (end == chunks[i].addr + chunks[i].size) {
104 /* Make chunk smaller at end */
105 if (i >= MEMORY_CHUNKS - 1)
106 panic("Unable to create memory hole");
107 memmove(&chunks[i + 1], &chunks[i],
108 sizeof(struct mem_chunk) *
109 (MEMORY_CHUNKS - (i + 1)));
110 chunks[i + 1].addr = start;
111 chunks[i + 1].size = new_size;
112 chunks[i + 1].type = type;
113 chunks[i].size -= new_size;
114 i += 1;
115 } else {
116 /* Create memory hole */
117 if (i >= MEMORY_CHUNKS - 2)
118 panic("Unable to create memory hole");
119 memmove(&chunks[i + 2], &chunks[i],
120 sizeof(struct mem_chunk) *
121 (MEMORY_CHUNKS - (i + 2)));
122 chunks[i + 1].addr = addr;
123 chunks[i + 1].size = size;
124 chunks[i + 1].type = type;
125 chunks[i + 2].addr = addr + size;
126 chunks[i + 2].size =
127 chunks[i].addr + chunks[i].size - (addr + size);
128 chunks[i + 2].type = chunks[i].type;
129 chunks[i].size = addr - chunks[i].addr;
130 i += 2;
131 }
132 }
133}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 541a7509faeb..9451b210a1b4 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/elfcore.h>
15#include <linux/smp.h> 16#include <linux/smp.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
@@ -117,7 +118,8 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
117 struct pt_regs regs; 118 struct pt_regs regs;
118 119
119 memset(&regs, 0, sizeof(regs)); 120 memset(&regs, 0, sizeof(regs));
120 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; 121 regs.psw.mask = psw_kernel_bits |
122 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
121 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; 123 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
122 regs.gprs[9] = (unsigned long) fn; 124 regs.gprs[9] = (unsigned long) fn;
123 regs.gprs[10] = (unsigned long) arg; 125 regs.gprs[10] = (unsigned long) arg;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 311e9d712888..6e0073e43f54 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -74,7 +74,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
74 74
75static void *c_start(struct seq_file *m, loff_t *pos) 75static void *c_start(struct seq_file *m, loff_t *pos)
76{ 76{
77 return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; 77 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
78} 78}
79 79
80static void *c_next(struct seq_file *m, void *v, loff_t *pos) 80static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index ef86ad243986..450931a45b68 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -42,34 +42,37 @@ enum s390_regset {
42 REGSET_GENERAL, 42 REGSET_GENERAL,
43 REGSET_FP, 43 REGSET_FP,
44 REGSET_LAST_BREAK, 44 REGSET_LAST_BREAK,
45 REGSET_SYSTEM_CALL,
45 REGSET_GENERAL_EXTENDED, 46 REGSET_GENERAL_EXTENDED,
46}; 47};
47 48
48void update_per_regs(struct task_struct *task) 49void update_per_regs(struct task_struct *task)
49{ 50{
50 static const struct per_regs per_single_step = {
51 .control = PER_EVENT_IFETCH,
52 .start = 0,
53 .end = PSW_ADDR_INSN,
54 };
55 struct pt_regs *regs = task_pt_regs(task); 51 struct pt_regs *regs = task_pt_regs(task);
56 struct thread_struct *thread = &task->thread; 52 struct thread_struct *thread = &task->thread;
57 const struct per_regs *new; 53 struct per_regs old, new;
58 struct per_regs old; 54
59 55 /* Copy user specified PER registers */
60 /* TIF_SINGLE_STEP overrides the user specified PER registers. */ 56 new.control = thread->per_user.control;
61 new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ? 57 new.start = thread->per_user.start;
62 &per_single_step : &thread->per_user; 58 new.end = thread->per_user.end;
59
60 /* merge TIF_SINGLE_STEP into user specified PER registers. */
61 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
62 new.control |= PER_EVENT_IFETCH;
63 new.start = 0;
64 new.end = PSW_ADDR_INSN;
65 }
63 66
64 /* Take care of the PER enablement bit in the PSW. */ 67 /* Take care of the PER enablement bit in the PSW. */
65 if (!(new->control & PER_EVENT_MASK)) { 68 if (!(new.control & PER_EVENT_MASK)) {
66 regs->psw.mask &= ~PSW_MASK_PER; 69 regs->psw.mask &= ~PSW_MASK_PER;
67 return; 70 return;
68 } 71 }
69 regs->psw.mask |= PSW_MASK_PER; 72 regs->psw.mask |= PSW_MASK_PER;
70 __ctl_store(old, 9, 11); 73 __ctl_store(old, 9, 11);
71 if (memcmp(new, &old, sizeof(struct per_regs)) != 0) 74 if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
72 __ctl_load(*new, 9, 11); 75 __ctl_load(new, 9, 11);
73} 76}
74 77
75void user_enable_single_step(struct task_struct *task) 78void user_enable_single_step(struct task_struct *task)
@@ -166,8 +169,8 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
166 */ 169 */
167 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); 170 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
168 if (addr == (addr_t) &dummy->regs.psw.mask) 171 if (addr == (addr_t) &dummy->regs.psw.mask)
169 /* Remove per bit from user psw. */ 172 /* Return a clean psw mask. */
170 tmp &= ~PSW_MASK_PER; 173 tmp = psw_user_bits | (tmp & PSW_MASK_USER);
171 174
172 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { 175 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
173 /* 176 /*
@@ -289,18 +292,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
289 * psw and gprs are stored on the stack 292 * psw and gprs are stored on the stack
290 */ 293 */
291 if (addr == (addr_t) &dummy->regs.psw.mask && 294 if (addr == (addr_t) &dummy->regs.psw.mask &&
292#ifdef CONFIG_COMPAT 295 ((data & ~PSW_MASK_USER) != psw_user_bits ||
293 data != PSW_MASK_MERGE(psw_user32_bits, data) && 296 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
294#endif
295 data != PSW_MASK_MERGE(psw_user_bits, data))
296 /* Invalid psw mask. */ 297 /* Invalid psw mask. */
297 return -EINVAL; 298 return -EINVAL;
298#ifndef CONFIG_64BIT
299 if (addr == (addr_t) &dummy->regs.psw.addr) 299 if (addr == (addr_t) &dummy->regs.psw.addr)
300 /* I'd like to reject addresses without the 300 /*
301 high order bit but older gdb's rely on it */ 301 * The debugger changed the instruction address,
302 data |= PSW_ADDR_AMODE; 302 * reset system call restart, see signal.c:do_signal
303#endif 303 */
304 task_thread_info(child)->system_call = 0;
305
304 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 306 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
305 307
306 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 308 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -495,21 +497,21 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
495 __u32 tmp; 497 __u32 tmp;
496 498
497 if (addr < (addr_t) &dummy32->regs.acrs) { 499 if (addr < (addr_t) &dummy32->regs.acrs) {
500 struct pt_regs *regs = task_pt_regs(child);
498 /* 501 /*
499 * psw and gprs are stored on the stack 502 * psw and gprs are stored on the stack
500 */ 503 */
501 if (addr == (addr_t) &dummy32->regs.psw.mask) { 504 if (addr == (addr_t) &dummy32->regs.psw.mask) {
502 /* Fake a 31 bit psw mask. */ 505 /* Fake a 31 bit psw mask. */
503 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); 506 tmp = (__u32)(regs->psw.mask >> 32);
504 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp); 507 tmp = psw32_user_bits | (tmp & PSW32_MASK_USER);
505 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 508 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
506 /* Fake a 31 bit psw address. */ 509 /* Fake a 31 bit psw address. */
507 tmp = (__u32) task_pt_regs(child)->psw.addr | 510 tmp = (__u32) regs->psw.addr |
508 PSW32_ADDR_AMODE31; 511 (__u32)(regs->psw.mask & PSW_MASK_BA);
509 } else { 512 } else {
510 /* gpr 0-15 */ 513 /* gpr 0-15 */
511 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw + 514 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
512 addr*2 + 4);
513 } 515 }
514 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 516 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
515 /* 517 /*
@@ -594,24 +596,32 @@ static int __poke_user_compat(struct task_struct *child,
594 addr_t offset; 596 addr_t offset;
595 597
596 if (addr < (addr_t) &dummy32->regs.acrs) { 598 if (addr < (addr_t) &dummy32->regs.acrs) {
599 struct pt_regs *regs = task_pt_regs(child);
597 /* 600 /*
598 * psw, gprs, acrs and orig_gpr2 are stored on the stack 601 * psw, gprs, acrs and orig_gpr2 are stored on the stack
599 */ 602 */
600 if (addr == (addr_t) &dummy32->regs.psw.mask) { 603 if (addr == (addr_t) &dummy32->regs.psw.mask) {
601 /* Build a 64 bit psw mask from 31 bit mask. */ 604 /* Build a 64 bit psw mask from 31 bit mask. */
602 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) 605 if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
603 /* Invalid psw mask. */ 606 /* Invalid psw mask. */
604 return -EINVAL; 607 return -EINVAL;
605 task_pt_regs(child)->psw.mask = 608 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
606 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); 609 (regs->psw.mask & PSW_MASK_BA) |
610 (__u64)(tmp & PSW32_MASK_USER) << 32;
607 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 611 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
608 /* Build a 64 bit psw address from 31 bit address. */ 612 /* Build a 64 bit psw address from 31 bit address. */
609 task_pt_regs(child)->psw.addr = 613 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
610 (__u64) tmp & PSW32_ADDR_INSN; 614 /* Transfer 31 bit amode bit to psw mask. */
615 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
616 (__u64)(tmp & PSW32_ADDR_AMODE);
617 /*
618 * The debugger changed the instruction address,
619 * reset system call restart, see signal.c:do_signal
620 */
621 task_thread_info(child)->system_call = 0;
611 } else { 622 } else {
612 /* gpr 0-15 */ 623 /* gpr 0-15 */
613 *(__u32*)((addr_t) &task_pt_regs(child)->psw 624 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
614 + addr*2 + 4) = tmp;
615 } 625 }
616 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 626 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
617 /* 627 /*
@@ -735,7 +745,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
735 * debugger stored an invalid system call number. Skip 745 * debugger stored an invalid system call number. Skip
736 * the system call and the system call restart handling. 746 * the system call and the system call restart handling.
737 */ 747 */
738 regs->svcnr = 0; 748 clear_thread_flag(TIF_SYSCALL);
739 ret = -1; 749 ret = -1;
740 } 750 }
741 751
@@ -897,6 +907,26 @@ static int s390_last_break_get(struct task_struct *target,
897 907
898#endif 908#endif
899 909
910static int s390_system_call_get(struct task_struct *target,
911 const struct user_regset *regset,
912 unsigned int pos, unsigned int count,
913 void *kbuf, void __user *ubuf)
914{
915 unsigned int *data = &task_thread_info(target)->system_call;
916 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
917 data, 0, sizeof(unsigned int));
918}
919
920static int s390_system_call_set(struct task_struct *target,
921 const struct user_regset *regset,
922 unsigned int pos, unsigned int count,
923 const void *kbuf, const void __user *ubuf)
924{
925 unsigned int *data = &task_thread_info(target)->system_call;
926 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
927 data, 0, sizeof(unsigned int));
928}
929
900static const struct user_regset s390_regsets[] = { 930static const struct user_regset s390_regsets[] = {
901 [REGSET_GENERAL] = { 931 [REGSET_GENERAL] = {
902 .core_note_type = NT_PRSTATUS, 932 .core_note_type = NT_PRSTATUS,
@@ -923,6 +953,14 @@ static const struct user_regset s390_regsets[] = {
923 .get = s390_last_break_get, 953 .get = s390_last_break_get,
924 }, 954 },
925#endif 955#endif
956 [REGSET_SYSTEM_CALL] = {
957 .core_note_type = NT_S390_SYSTEM_CALL,
958 .n = 1,
959 .size = sizeof(unsigned int),
960 .align = sizeof(unsigned int),
961 .get = s390_system_call_get,
962 .set = s390_system_call_set,
963 },
926}; 964};
927 965
928static const struct user_regset_view user_s390_view = { 966static const struct user_regset_view user_s390_view = {
@@ -1102,6 +1140,14 @@ static const struct user_regset s390_compat_regsets[] = {
1102 .align = sizeof(long), 1140 .align = sizeof(long),
1103 .get = s390_compat_last_break_get, 1141 .get = s390_compat_last_break_get,
1104 }, 1142 },
1143 [REGSET_SYSTEM_CALL] = {
1144 .core_note_type = NT_S390_SYSTEM_CALL,
1145 .n = 1,
1146 .size = sizeof(compat_uint_t),
1147 .align = sizeof(compat_uint_t),
1148 .get = s390_system_call_get,
1149 .set = s390_system_call_set,
1150 },
1105 [REGSET_GENERAL_EXTENDED] = { 1151 [REGSET_GENERAL_EXTENDED] = {
1106 .core_note_type = NT_S390_HIGH_GPRS, 1152 .core_note_type = NT_S390_HIGH_GPRS,
1107 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1153 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 303d961c3bb5..ad67c214be04 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -10,6 +10,12 @@
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11 11
12# 12#
13# store_status: Empty implementation until kdump is supported on 31 bit
14#
15ENTRY(store_status)
16 br %r14
17
18#
13# do_reipl_asm 19# do_reipl_asm
14# Parameter: r2 = schid of reipl device 20# Parameter: r2 = schid of reipl device
15# 21#
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index e690975403f4..732a793ec53a 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -17,11 +17,11 @@
17# 17#
18ENTRY(store_status) 18ENTRY(store_status)
19 /* Save register one and load save area base */ 19 /* Save register one and load save area base */
20 stg %r1,__LC_SAVE_AREA_64(%r0) 20 stg %r1,__LC_SAVE_AREA+120(%r0)
21 lghi %r1,SAVE_AREA_BASE 21 lghi %r1,SAVE_AREA_BASE
22 /* General purpose registers */ 22 /* General purpose registers */
23 stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) 23 stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
24 lg %r2,__LC_SAVE_AREA_64(%r0) 24 lg %r2,__LC_SAVE_AREA+120(%r0)
25 stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) 25 stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
26 /* Control registers */ 26 /* Control registers */
27 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) 27 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
@@ -62,8 +62,11 @@ ENTRY(store_status)
62 larl %r2,store_status 62 larl %r2,store_status
63 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) 63 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
64 br %r14 64 br %r14
65.align 8 65
66 .section .bss
67 .align 8
66.Lclkcmp: .quad 0x0000000000000000 68.Lclkcmp: .quad 0x0000000000000000
69 .previous
67 70
68# 71#
69# do_reipl_asm 72# do_reipl_asm
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7b371c37061d..8ac6bfa2786c 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -42,6 +42,9 @@
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/topology.h> 43#include <linux/topology.h>
44#include <linux/ftrace.h> 44#include <linux/ftrace.h>
45#include <linux/kexec.h>
46#include <linux/crash_dump.h>
47#include <linux/memory.h>
45 48
46#include <asm/ipl.h> 49#include <asm/ipl.h>
47#include <asm/uaccess.h> 50#include <asm/uaccess.h>
@@ -57,12 +60,13 @@
57#include <asm/ebcdic.h> 60#include <asm/ebcdic.h>
58#include <asm/compat.h> 61#include <asm/compat.h>
59#include <asm/kvm_virtio.h> 62#include <asm/kvm_virtio.h>
63#include <asm/diag.h>
60 64
61long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | 65long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
62 PSW_MASK_MCHECK | PSW_DEFAULT_KEY); 66 PSW_MASK_EA | PSW_MASK_BA;
63long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | 67long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
64 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | 68 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
65 PSW_MASK_PSTATE | PSW_DEFAULT_KEY); 69 PSW_MASK_PSTATE | PSW_ASC_HOME;
66 70
67/* 71/*
68 * User copy operations. 72 * User copy operations.
@@ -274,22 +278,14 @@ early_param("mem", early_parse_mem);
274unsigned int user_mode = HOME_SPACE_MODE; 278unsigned int user_mode = HOME_SPACE_MODE;
275EXPORT_SYMBOL_GPL(user_mode); 279EXPORT_SYMBOL_GPL(user_mode);
276 280
277static int set_amode_and_uaccess(unsigned long user_amode, 281static int set_amode_primary(void)
278 unsigned long user32_amode)
279{ 282{
280 psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | 283 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
281 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | 284 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
282 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
283#ifdef CONFIG_COMPAT 285#ifdef CONFIG_COMPAT
284 psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | 286 psw32_user_bits =
285 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | 287 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
286 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
287 psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
288 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
289 PSW32_MASK_PSTATE;
290#endif 288#endif
291 psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
292 PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
293 289
294 if (MACHINE_HAS_MVCOS) { 290 if (MACHINE_HAS_MVCOS) {
295 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); 291 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
@@ -325,7 +321,7 @@ early_param("user_mode", early_parse_user_mode);
325static void setup_addressing_mode(void) 321static void setup_addressing_mode(void)
326{ 322{
327 if (user_mode == PRIMARY_SPACE_MODE) { 323 if (user_mode == PRIMARY_SPACE_MODE) {
328 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) 324 if (set_amode_primary())
329 pr_info("Address spaces switched, " 325 pr_info("Address spaces switched, "
330 "mvcos available\n"); 326 "mvcos available\n");
331 else 327 else
@@ -344,24 +340,25 @@ setup_lowcore(void)
344 */ 340 */
345 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); 341 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
346 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 342 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
347 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 343 lc->restart_psw.mask = psw_kernel_bits;
348 lc->restart_psw.addr = 344 lc->restart_psw.addr =
349 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 345 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
350 if (user_mode != HOME_SPACE_MODE) 346 lc->external_new_psw.mask = psw_kernel_bits |
351 lc->restart_psw.mask |= PSW_ASC_HOME; 347 PSW_MASK_DAT | PSW_MASK_MCHECK;
352 lc->external_new_psw.mask = psw_kernel_bits;
353 lc->external_new_psw.addr = 348 lc->external_new_psw.addr =
354 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 349 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
355 lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; 350 lc->svc_new_psw.mask = psw_kernel_bits |
351 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
356 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 352 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
357 lc->program_new_psw.mask = psw_kernel_bits; 353 lc->program_new_psw.mask = psw_kernel_bits |
354 PSW_MASK_DAT | PSW_MASK_MCHECK;
358 lc->program_new_psw.addr = 355 lc->program_new_psw.addr =
359 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 356 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
360 lc->mcck_new_psw.mask = 357 lc->mcck_new_psw.mask = psw_kernel_bits;
361 psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
362 lc->mcck_new_psw.addr = 358 lc->mcck_new_psw.addr =
363 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 359 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
364 lc->io_new_psw.mask = psw_kernel_bits; 360 lc->io_new_psw.mask = psw_kernel_bits |
361 PSW_MASK_DAT | PSW_MASK_MCHECK;
365 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 362 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
366 lc->clock_comparator = -1ULL; 363 lc->clock_comparator = -1ULL;
367 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 364 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
@@ -435,10 +432,14 @@ static void __init setup_resources(void)
435 for (i = 0; i < MEMORY_CHUNKS; i++) { 432 for (i = 0; i < MEMORY_CHUNKS; i++) {
436 if (!memory_chunk[i].size) 433 if (!memory_chunk[i].size)
437 continue; 434 continue;
435 if (memory_chunk[i].type == CHUNK_OLDMEM ||
436 memory_chunk[i].type == CHUNK_CRASHK)
437 continue;
438 res = alloc_bootmem_low(sizeof(*res)); 438 res = alloc_bootmem_low(sizeof(*res));
439 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 439 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
440 switch (memory_chunk[i].type) { 440 switch (memory_chunk[i].type) {
441 case CHUNK_READ_WRITE: 441 case CHUNK_READ_WRITE:
442 case CHUNK_CRASHK:
442 res->name = "System RAM"; 443 res->name = "System RAM";
443 break; 444 break;
444 case CHUNK_READ_ONLY: 445 case CHUNK_READ_ONLY:
@@ -479,6 +480,7 @@ static void __init setup_memory_end(void)
479 unsigned long max_mem; 480 unsigned long max_mem;
480 int i; 481 int i;
481 482
483
482#ifdef CONFIG_ZFCPDUMP 484#ifdef CONFIG_ZFCPDUMP
483 if (ipl_info.type == IPL_TYPE_FCP_DUMP) { 485 if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
484 memory_end = ZFCPDUMP_HSA_SIZE; 486 memory_end = ZFCPDUMP_HSA_SIZE;
@@ -545,11 +547,201 @@ static void __init setup_restart_psw(void)
545 * Setup restart PSW for absolute zero lowcore. This is necesary 547 * Setup restart PSW for absolute zero lowcore. This is necesary
546 * if PSW restart is done on an offline CPU that has lowcore zero 548 * if PSW restart is done on an offline CPU that has lowcore zero
547 */ 549 */
548 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 550 psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
549 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 551 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
550 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); 552 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
551} 553}
552 554
555static void __init setup_vmcoreinfo(void)
556{
557#ifdef CONFIG_KEXEC
558 unsigned long ptr = paddr_vmcoreinfo_note();
559
560 copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
561#endif
562}
563
564#ifdef CONFIG_CRASH_DUMP
565
566/*
567 * Find suitable location for crashkernel memory
568 */
569static unsigned long __init find_crash_base(unsigned long crash_size,
570 char **msg)
571{
572 unsigned long crash_base;
573 struct mem_chunk *chunk;
574 int i;
575
576 if (memory_chunk[0].size < crash_size) {
577 *msg = "first memory chunk must be at least crashkernel size";
578 return 0;
579 }
580 if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
581 return OLDMEM_BASE;
582
583 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
584 chunk = &memory_chunk[i];
585 if (chunk->size == 0)
586 continue;
587 if (chunk->type != CHUNK_READ_WRITE)
588 continue;
589 if (chunk->size < crash_size)
590 continue;
591 crash_base = (chunk->addr + chunk->size) - crash_size;
592 if (crash_base < crash_size)
593 continue;
594 if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
595 continue;
596 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
597 continue;
598 return crash_base;
599 }
600 *msg = "no suitable area found";
601 return 0;
602}
603
604/*
605 * Check if crash_base and crash_size is valid
606 */
607static int __init verify_crash_base(unsigned long crash_base,
608 unsigned long crash_size,
609 char **msg)
610{
611 struct mem_chunk *chunk;
612 int i;
613
614 /*
615 * Because we do the swap to zero, we must have at least 'crash_size'
616 * bytes free space before crash_base
617 */
618 if (crash_size > crash_base) {
619 *msg = "crashkernel offset must be greater than size";
620 return -EINVAL;
621 }
622
623 /* First memory chunk must be at least crash_size */
624 if (memory_chunk[0].size < crash_size) {
625 *msg = "first memory chunk must be at least crashkernel size";
626 return -EINVAL;
627 }
628 /* Check if we fit into the respective memory chunk */
629 for (i = 0; i < MEMORY_CHUNKS; i++) {
630 chunk = &memory_chunk[i];
631 if (chunk->size == 0)
632 continue;
633 if (crash_base < chunk->addr)
634 continue;
635 if (crash_base >= chunk->addr + chunk->size)
636 continue;
637 /* we have found the memory chunk */
638 if (crash_base + crash_size > chunk->addr + chunk->size) {
639 *msg = "selected memory chunk is too small for "
640 "crashkernel memory";
641 return -EINVAL;
642 }
643 return 0;
644 }
645 *msg = "invalid memory range specified";
646 return -EINVAL;
647}
648
649/*
650 * Reserve kdump memory by creating a memory hole in the mem_chunk array
651 */
652static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
653 int type)
654{
655
656 create_mem_hole(memory_chunk, addr, size, type);
657}
658
659/*
660 * When kdump is enabled, we have to ensure that no memory from
661 * the area [0 - crashkernel memory size] and
662 * [crashk_res.start - crashk_res.end] is set offline.
663 */
664static int kdump_mem_notifier(struct notifier_block *nb,
665 unsigned long action, void *data)
666{
667 struct memory_notify *arg = data;
668
669 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
670 return NOTIFY_BAD;
671 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
672 return NOTIFY_OK;
673 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
674 return NOTIFY_OK;
675 return NOTIFY_BAD;
676}
677
678static struct notifier_block kdump_mem_nb = {
679 .notifier_call = kdump_mem_notifier,
680};
681
682#endif
683
684/*
685 * Make sure that oldmem, where the dump is stored, is protected
686 */
687static void reserve_oldmem(void)
688{
689#ifdef CONFIG_CRASH_DUMP
690 if (!OLDMEM_BASE)
691 return;
692
693 reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
694 reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE,
695 CHUNK_OLDMEM);
696 if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size)
697 saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
698 else
699 saved_max_pfn = PFN_DOWN(real_memory_size) - 1;
700#endif
701}
702
703/*
704 * Reserve memory for kdump kernel to be loaded with kexec
705 */
706static void __init reserve_crashkernel(void)
707{
708#ifdef CONFIG_CRASH_DUMP
709 unsigned long long crash_base, crash_size;
710 char *msg;
711 int rc;
712
713 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
714 &crash_base);
715 if (rc || crash_size == 0)
716 return;
717 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
718 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
719 if (register_memory_notifier(&kdump_mem_nb))
720 return;
721 if (!crash_base)
722 crash_base = find_crash_base(crash_size, &msg);
723 if (!crash_base) {
724 pr_info("crashkernel reservation failed: %s\n", msg);
725 unregister_memory_notifier(&kdump_mem_nb);
726 return;
727 }
728 if (verify_crash_base(crash_base, crash_size, &msg)) {
729 pr_info("crashkernel reservation failed: %s\n", msg);
730 unregister_memory_notifier(&kdump_mem_nb);
731 return;
732 }
733 if (!OLDMEM_BASE && MACHINE_IS_VM)
734 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
735 crashk_res.start = crash_base;
736 crashk_res.end = crash_base + crash_size - 1;
737 insert_resource(&iomem_resource, &crashk_res);
738 reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK);
739 pr_info("Reserving %lluMB of memory at %lluMB "
740 "for crashkernel (System RAM: %luMB)\n",
741 crash_size >> 20, crash_base >> 20, memory_end >> 20);
742#endif
743}
744
553static void __init 745static void __init
554setup_memory(void) 746setup_memory(void)
555{ 747{
@@ -580,6 +772,14 @@ setup_memory(void)
580 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { 772 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
581 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; 773 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
582 774
775#ifdef CONFIG_CRASH_DUMP
776 if (OLDMEM_BASE) {
777 /* Move initrd behind kdump oldmem */
778 if (start + INITRD_SIZE > OLDMEM_BASE &&
779 start < OLDMEM_BASE + OLDMEM_SIZE)
780 start = OLDMEM_BASE + OLDMEM_SIZE;
781 }
782#endif
583 if (start + INITRD_SIZE > memory_end) { 783 if (start + INITRD_SIZE > memory_end) {
584 pr_err("initrd extends beyond end of " 784 pr_err("initrd extends beyond end of "
585 "memory (0x%08lx > 0x%08lx) " 785 "memory (0x%08lx > 0x%08lx) "
@@ -610,7 +810,8 @@ setup_memory(void)
610 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 810 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
611 unsigned long start_chunk, end_chunk, pfn; 811 unsigned long start_chunk, end_chunk, pfn;
612 812
613 if (memory_chunk[i].type != CHUNK_READ_WRITE) 813 if (memory_chunk[i].type != CHUNK_READ_WRITE &&
814 memory_chunk[i].type != CHUNK_CRASHK)
614 continue; 815 continue;
615 start_chunk = PFN_DOWN(memory_chunk[i].addr); 816 start_chunk = PFN_DOWN(memory_chunk[i].addr);
616 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); 817 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
@@ -644,6 +845,15 @@ setup_memory(void)
644 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, 845 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
645 BOOTMEM_DEFAULT); 846 BOOTMEM_DEFAULT);
646 847
848#ifdef CONFIG_CRASH_DUMP
849 if (crashk_res.start)
850 reserve_bootmem(crashk_res.start,
851 crashk_res.end - crashk_res.start + 1,
852 BOOTMEM_DEFAULT);
853 if (is_kdump_kernel())
854 reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
855 PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
856#endif
647#ifdef CONFIG_BLK_DEV_INITRD 857#ifdef CONFIG_BLK_DEV_INITRD
648 if (INITRD_START && INITRD_SIZE) { 858 if (INITRD_START && INITRD_SIZE) {
649 if (INITRD_START + INITRD_SIZE <= memory_end) { 859 if (INITRD_START + INITRD_SIZE <= memory_end) {
@@ -812,8 +1022,11 @@ setup_arch(char **cmdline_p)
812 setup_ipl(); 1022 setup_ipl();
813 setup_memory_end(); 1023 setup_memory_end();
814 setup_addressing_mode(); 1024 setup_addressing_mode();
1025 reserve_oldmem();
1026 reserve_crashkernel();
815 setup_memory(); 1027 setup_memory();
816 setup_resources(); 1028 setup_resources();
1029 setup_vmcoreinfo();
817 setup_restart_psw(); 1030 setup_restart_psw();
818 setup_lowcore(); 1031 setup_lowcore();
819 1032
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 9a40e1cc5ec3..05a85bc14c98 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -30,6 +30,7 @@
30#include <asm/ucontext.h> 30#include <asm/ucontext.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/lowcore.h> 32#include <asm/lowcore.h>
33#include <asm/compat.h>
33#include "entry.h" 34#include "entry.h"
34 35
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -116,7 +117,8 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
116 117
117 /* Copy a 'clean' PSW mask to the user to avoid leaking 118 /* Copy a 'clean' PSW mask to the user to avoid leaking
118 information about whether PER is currently on. */ 119 information about whether PER is currently on. */
119 user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask); 120 user_sregs.regs.psw.mask = psw_user_bits |
121 (regs->psw.mask & PSW_MASK_USER);
120 user_sregs.regs.psw.addr = regs->psw.addr; 122 user_sregs.regs.psw.addr = regs->psw.addr;
121 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 123 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
122 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 124 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
@@ -143,9 +145,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
143 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); 145 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
144 if (err) 146 if (err)
145 return err; 147 return err;
146 regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask, 148 /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
147 user_sregs.regs.psw.mask); 149 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
148 regs->psw.addr = PSW_ADDR_AMODE | user_sregs.regs.psw.addr; 150 (user_sregs.regs.psw.mask & PSW_MASK_USER);
151 /* Check for invalid amode */
152 if (regs->psw.mask & PSW_MASK_EA)
153 regs->psw.mask |= PSW_MASK_BA;
154 regs->psw.addr = user_sregs.regs.psw.addr;
149 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); 155 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
150 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, 156 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
151 sizeof(sregs->regs.acrs)); 157 sizeof(sregs->regs.acrs));
@@ -156,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
156 current->thread.fp_regs.fpc &= FPC_VALID_MASK; 162 current->thread.fp_regs.fpc &= FPC_VALID_MASK;
157 163
158 restore_fp_regs(&current->thread.fp_regs); 164 restore_fp_regs(&current->thread.fp_regs);
159 regs->svcnr = 0; /* disable syscall checks */ 165 clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
160 return 0; 166 return 0;
161} 167}
162 168
@@ -288,6 +294,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
288 294
289 /* Set up registers for signal handler */ 295 /* Set up registers for signal handler */
290 regs->gprs[15] = (unsigned long) frame; 296 regs->gprs[15] = (unsigned long) frame;
297 regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
291 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 298 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
292 299
293 regs->gprs[2] = map_signal(sig); 300 regs->gprs[2] = map_signal(sig);
@@ -356,6 +363,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
356 363
357 /* Set up registers for signal handler */ 364 /* Set up registers for signal handler */
358 regs->gprs[15] = (unsigned long) frame; 365 regs->gprs[15] = (unsigned long) frame;
366 regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
359 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 367 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
360 368
361 regs->gprs[2] = map_signal(sig); 369 regs->gprs[2] = map_signal(sig);
@@ -401,7 +409,6 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
401 */ 409 */
402void do_signal(struct pt_regs *regs) 410void do_signal(struct pt_regs *regs)
403{ 411{
404 unsigned long retval = 0, continue_addr = 0, restart_addr = 0;
405 siginfo_t info; 412 siginfo_t info;
406 int signr; 413 int signr;
407 struct k_sigaction ka; 414 struct k_sigaction ka;
@@ -421,54 +428,45 @@ void do_signal(struct pt_regs *regs)
421 else 428 else
422 oldset = &current->blocked; 429 oldset = &current->blocked;
423 430
424 /* Are we from a system call? */ 431 /*
425 if (regs->svcnr) { 432 * Get signal to deliver. When running under ptrace, at this point
426 continue_addr = regs->psw.addr; 433 * the debugger may change all our registers, including the system
427 restart_addr = continue_addr - regs->ilc; 434 * call information.
428 retval = regs->gprs[2]; 435 */
429 436 current_thread_info()->system_call =
430 /* Prepare for system call restart. We do this here so that a 437 test_thread_flag(TIF_SYSCALL) ? regs->svc_code : 0;
431 debugger will see the already changed PSW. */
432 switch (retval) {
433 case -ERESTARTNOHAND:
434 case -ERESTARTSYS:
435 case -ERESTARTNOINTR:
436 regs->gprs[2] = regs->orig_gpr2;
437 regs->psw.addr = restart_addr;
438 break;
439 case -ERESTART_RESTARTBLOCK:
440 regs->gprs[2] = -EINTR;
441 }
442 regs->svcnr = 0; /* Don't deal with this again. */
443 }
444
445 /* Get signal to deliver. When running under ptrace, at this point
446 the debugger may change all our registers ... */
447 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 438 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
448 439
449 /* Depending on the signal settings we may need to revert the
450 decision to restart the system call. */
451 if (signr > 0 && regs->psw.addr == restart_addr) {
452 if (retval == -ERESTARTNOHAND
453 || (retval == -ERESTARTSYS
454 && !(current->sighand->action[signr-1].sa.sa_flags
455 & SA_RESTART))) {
456 regs->gprs[2] = -EINTR;
457 regs->psw.addr = continue_addr;
458 }
459 }
460
461 if (signr > 0) { 440 if (signr > 0) {
462 /* Whee! Actually deliver the signal. */ 441 /* Whee! Actually deliver the signal. */
463 int ret; 442 if (current_thread_info()->system_call) {
464#ifdef CONFIG_COMPAT 443 regs->svc_code = current_thread_info()->system_call;
465 if (is_compat_task()) { 444 /* Check for system call restarting. */
466 ret = handle_signal32(signr, &ka, &info, oldset, regs); 445 switch (regs->gprs[2]) {
467 } 446 case -ERESTART_RESTARTBLOCK:
468 else 447 case -ERESTARTNOHAND:
469#endif 448 regs->gprs[2] = -EINTR;
470 ret = handle_signal(signr, &ka, &info, oldset, regs); 449 break;
471 if (!ret) { 450 case -ERESTARTSYS:
451 if (!(ka.sa.sa_flags & SA_RESTART)) {
452 regs->gprs[2] = -EINTR;
453 break;
454 }
455 /* fallthrough */
456 case -ERESTARTNOINTR:
457 regs->gprs[2] = regs->orig_gpr2;
458 regs->psw.addr =
459 __rewind_psw(regs->psw,
460 regs->svc_code >> 16);
461 break;
462 }
463 /* No longer in a system call */
464 clear_thread_flag(TIF_SYSCALL);
465 }
466
467 if ((is_compat_task() ?
468 handle_signal32(signr, &ka, &info, oldset, regs) :
469 handle_signal(signr, &ka, &info, oldset, regs)) == 0) {
472 /* 470 /*
473 * A signal was successfully delivered; the saved 471 * A signal was successfully delivered; the saved
474 * sigmask will have been stored in the signal frame, 472 * sigmask will have been stored in the signal frame,
@@ -482,11 +480,32 @@ void do_signal(struct pt_regs *regs)
482 * Let tracing know that we've done the handler setup. 480 * Let tracing know that we've done the handler setup.
483 */ 481 */
484 tracehook_signal_handler(signr, &info, &ka, regs, 482 tracehook_signal_handler(signr, &info, &ka, regs,
485 test_thread_flag(TIF_SINGLE_STEP)); 483 test_thread_flag(TIF_SINGLE_STEP));
486 } 484 }
487 return; 485 return;
488 } 486 }
489 487
488 /* No handlers present - check for system call restart */
489 if (current_thread_info()->system_call) {
490 regs->svc_code = current_thread_info()->system_call;
491 switch (regs->gprs[2]) {
492 case -ERESTART_RESTARTBLOCK:
493 /* Restart with sys_restart_syscall */
494 regs->svc_code = __NR_restart_syscall;
495 /* fallthrough */
496 case -ERESTARTNOHAND:
497 case -ERESTARTSYS:
498 case -ERESTARTNOINTR:
499 /* Restart system call with magic TIF bit. */
500 regs->gprs[2] = regs->orig_gpr2;
501 set_thread_flag(TIF_SYSCALL);
502 break;
503 default:
504 clear_thread_flag(TIF_SYSCALL);
505 break;
506 }
507 }
508
490 /* 509 /*
491 * If there's no signal to deliver, we just put the saved sigmask back. 510 * If there's no signal to deliver, we just put the saved sigmask back.
492 */ 511 */
@@ -494,13 +513,6 @@ void do_signal(struct pt_regs *regs)
494 clear_thread_flag(TIF_RESTORE_SIGMASK); 513 clear_thread_flag(TIF_RESTORE_SIGMASK);
495 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 514 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
496 } 515 }
497
498 /* Restart a different system call. */
499 if (retval == -ERESTART_RESTARTBLOCK
500 && regs->psw.addr == continue_addr) {
501 regs->gprs[2] = __NR_restart_syscall;
502 set_thread_flag(TIF_RESTART_SVC);
503 }
504} 516}
505 517
506void do_notify_resume(struct pt_regs *regs) 518void do_notify_resume(struct pt_regs *regs)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6ab16ac64d29..3ea872890da2 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -38,6 +38,7 @@
38#include <linux/timex.h> 38#include <linux/timex.h>
39#include <linux/bootmem.h> 39#include <linux/bootmem.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/crash_dump.h>
41#include <asm/asm-offsets.h> 42#include <asm/asm-offsets.h>
42#include <asm/ipl.h> 43#include <asm/ipl.h>
43#include <asm/setup.h> 44#include <asm/setup.h>
@@ -97,6 +98,29 @@ static inline int cpu_stopped(int cpu)
97 return raw_cpu_stopped(cpu_logical_map(cpu)); 98 return raw_cpu_stopped(cpu_logical_map(cpu));
98} 99}
99 100
101/*
102 * Ensure that PSW restart is done on an online CPU
103 */
104void smp_restart_with_online_cpu(void)
105{
106 int cpu;
107
108 for_each_online_cpu(cpu) {
109 if (stap() == __cpu_logical_map[cpu]) {
110 /* We are online: Enable DAT again and return */
111 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
112 return;
113 }
114 }
115 /* We are not online: Do PSW restart on an online CPU */
116 while (sigp(cpu, sigp_restart) == sigp_busy)
117 cpu_relax();
118 /* And stop ourself */
119 while (raw_sigp(stap(), sigp_stop) == sigp_busy)
120 cpu_relax();
121 for (;;);
122}
123
100void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) 124void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
101{ 125{
102 struct _lowcore *lc, *current_lc; 126 struct _lowcore *lc, *current_lc;
@@ -106,14 +130,16 @@ void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
106 130
107 if (smp_processor_id() == 0) 131 if (smp_processor_id() == 0)
108 func(data); 132 func(data);
109 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); 133 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE |
134 PSW_MASK_EA | PSW_MASK_BA);
110 /* Disable lowcore protection */ 135 /* Disable lowcore protection */
111 __ctl_clear_bit(0, 28); 136 __ctl_clear_bit(0, 28);
112 current_lc = lowcore_ptr[smp_processor_id()]; 137 current_lc = lowcore_ptr[smp_processor_id()];
113 lc = lowcore_ptr[0]; 138 lc = lowcore_ptr[0];
114 if (!lc) 139 if (!lc)
115 lc = current_lc; 140 lc = current_lc;
116 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 141 lc->restart_psw.mask =
142 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
117 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; 143 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
118 if (!cpu_online(0)) 144 if (!cpu_online(0))
119 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); 145 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
@@ -135,7 +161,7 @@ void smp_send_stop(void)
135 int cpu, rc; 161 int cpu, rc;
136 162
137 /* Disable all interrupts/machine checks */ 163 /* Disable all interrupts/machine checks */
138 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 164 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
139 trace_hardirqs_off(); 165 trace_hardirqs_off();
140 166
141 /* stop all processors */ 167 /* stop all processors */
@@ -161,7 +187,10 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
161{ 187{
162 unsigned long bits; 188 unsigned long bits;
163 189
164 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; 190 if (ext_int_code == 0x1202)
191 kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++;
192 else
193 kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++;
165 /* 194 /*
166 * handle bit signal external calls 195 * handle bit signal external calls
167 */ 196 */
@@ -183,12 +212,19 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
183 */ 212 */
184static void smp_ext_bitcall(int cpu, int sig) 213static void smp_ext_bitcall(int cpu, int sig)
185{ 214{
215 int order;
216
186 /* 217 /*
187 * Set signaling bit in lowcore of target cpu and kick it 218 * Set signaling bit in lowcore of target cpu and kick it
188 */ 219 */
189 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 220 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
190 while (sigp(cpu, sigp_emergency_signal) == sigp_busy) 221 while (1) {
222 order = smp_vcpu_scheduled(cpu) ?
223 sigp_external_call : sigp_emergency_signal;
224 if (sigp(cpu, order) != sigp_busy)
225 break;
191 udelay(10); 226 udelay(10);
227 }
192} 228}
193 229
194void arch_send_call_function_ipi_mask(const struct cpumask *mask) 230void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -281,11 +317,13 @@ void smp_ctl_clear_bit(int cr, int bit)
281} 317}
282EXPORT_SYMBOL(smp_ctl_clear_bit); 318EXPORT_SYMBOL(smp_ctl_clear_bit);
283 319
284#ifdef CONFIG_ZFCPDUMP 320#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
285 321
286static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 322static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
287{ 323{
288 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 324 if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE)
325 return;
326 if (is_kdump_kernel())
289 return; 327 return;
290 if (cpu >= NR_CPUS) { 328 if (cpu >= NR_CPUS) {
291 pr_warning("CPU %i exceeds the maximum %i and is excluded from " 329 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
@@ -403,6 +441,18 @@ static void __init smp_detect_cpus(void)
403 info = kmalloc(sizeof(*info), GFP_KERNEL); 441 info = kmalloc(sizeof(*info), GFP_KERNEL);
404 if (!info) 442 if (!info)
405 panic("smp_detect_cpus failed to allocate memory\n"); 443 panic("smp_detect_cpus failed to allocate memory\n");
444#ifdef CONFIG_CRASH_DUMP
445 if (OLDMEM_BASE && !is_kdump_kernel()) {
446 struct save_area *save_area;
447
448 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
449 if (!save_area)
450 panic("could not allocate memory for save area\n");
451 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
452 0x200, 0);
453 zfcpdump_save_areas[0] = save_area;
454 }
455#endif
406 /* Use sigp detection algorithm if sclp doesn't work. */ 456 /* Use sigp detection algorithm if sclp doesn't work. */
407 if (sclp_get_cpu_info(info)) { 457 if (sclp_get_cpu_info(info)) {
408 smp_use_sigp_detection = 1; 458 smp_use_sigp_detection = 1;
@@ -463,7 +513,8 @@ int __cpuinit start_secondary(void *cpuvoid)
463 set_cpu_online(smp_processor_id(), true); 513 set_cpu_online(smp_processor_id(), true);
464 ipi_call_unlock(); 514 ipi_call_unlock();
465 __ctl_clear_bit(0, 28); /* Disable lowcore protection */ 515 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
466 S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 516 S390_lowcore.restart_psw.mask =
517 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
467 S390_lowcore.restart_psw.addr = 518 S390_lowcore.restart_psw.addr =
468 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 519 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
469 __ctl_set_bit(0, 28); /* Enable lowcore protection */ 520 __ctl_set_bit(0, 28); /* Enable lowcore protection */
@@ -511,7 +562,8 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
511 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); 562 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
512 lowcore->async_stack = async_stack + ASYNC_SIZE; 563 lowcore->async_stack = async_stack + ASYNC_SIZE;
513 lowcore->panic_stack = panic_stack + PAGE_SIZE; 564 lowcore->panic_stack = panic_stack + PAGE_SIZE;
514 lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 565 lowcore->restart_psw.mask =
566 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
515 lowcore->restart_psw.addr = 567 lowcore->restart_psw.addr =
516 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 568 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
517 if (user_mode != HOME_SPACE_MODE) 569 if (user_mode != HOME_SPACE_MODE)
@@ -712,6 +764,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
712 /* request the 0x1201 emergency signal external interrupt */ 764 /* request the 0x1201 emergency signal external interrupt */
713 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 765 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
714 panic("Couldn't request external interrupt 0x1201"); 766 panic("Couldn't request external interrupt 0x1201");
767 /* request the 0x1202 external call external interrupt */
768 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
769 panic("Couldn't request external interrupt 0x1202");
715 770
716 /* Reallocate current lowcore, but keep its contents. */ 771 /* Reallocate current lowcore, but keep its contents. */
717 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 772 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index b6f9afed74ec..47df775c844d 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/pfn.h> 9#include <linux/pfn.h>
10#include <linux/suspend.h>
10#include <linux/mm.h> 11#include <linux/mm.h>
11#include <asm/system.h> 12#include <asm/system.h>
12 13
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 5c9e439bf3f6..2a94b774695c 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -442,7 +442,7 @@ void s390_adjust_jiffies(void)
442 */ 442 */
443 FP_UNPACK_SP(SA, &fmil); 443 FP_UNPACK_SP(SA, &fmil);
444 if ((info->capability >> 23) == 0) 444 if ((info->capability >> 23) == 0)
445 FP_FROM_INT_S(SB, info->capability, 32, int); 445 FP_FROM_INT_S(SB, (long) info->capability, 64, long);
446 else 446 else
447 FP_UNPACK_SP(SB, &info->capability); 447 FP_UNPACK_SP(SB, &info->capability);
448 FP_DIV_S(SR, SA, SB); 448 FP_DIV_S(SR, SA, SB);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 8d65bd0383fc..ebbfab3c6e5a 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -48,6 +48,7 @@
48#include <asm/timer.h> 48#include <asm/timer.h>
49#include <asm/etr.h> 49#include <asm/etr.h>
50#include <asm/cio.h> 50#include <asm/cio.h>
51#include "entry.h"
51 52
52/* change this if you have some constant time drift */ 53/* change this if you have some constant time drift */
53#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 54#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 0cd340b72632..77b8942b9a15 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -299,8 +299,8 @@ out:
299} 299}
300__initcall(init_topology_update); 300__initcall(init_topology_update);
301 301
302static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask, 302static void __init alloc_masks(struct sysinfo_15_1_x *info,
303 int offset) 303 struct mask_info *mask, int offset)
304{ 304{
305 int i, nr_masks; 305 int i, nr_masks;
306 306
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index ffabcd9d3363..a9807dd86276 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -200,7 +200,7 @@ void show_registers(struct pt_regs *regs)
200 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 200 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
201 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 201 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
202#ifdef CONFIG_64BIT 202#ifdef CONFIG_64BIT
203 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); 203 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
204#endif 204#endif
205 printk("\n%s GPRS: " FOURLONG, mode, 205 printk("\n%s GPRS: " FOURLONG, mode,
206 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 206 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
@@ -334,7 +334,8 @@ void __kprobes do_per_trap(struct pt_regs *regs)
334 info.si_signo = SIGTRAP; 334 info.si_signo = SIGTRAP;
335 info.si_errno = 0; 335 info.si_errno = 0;
336 info.si_code = TRAP_HWBKPT; 336 info.si_code = TRAP_HWBKPT;
337 info.si_addr = (void *) current->thread.per_event.address; 337 info.si_addr =
338 (void __force __user *) current->thread.per_event.address;
338 force_sig_info(SIGTRAP, &info, current); 339 force_sig_info(SIGTRAP, &info, current);
339} 340}
340 341
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 2d6228f60cd6..bb48977f5469 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -170,7 +170,8 @@ void __kprobes vtime_stop_cpu(void)
170 psw_t psw; 170 psw_t psw;
171 171
172 /* Wait for external, I/O or machine check interrupt. */ 172 /* Wait for external, I/O or machine check interrupt. */
173 psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; 173 psw.mask = psw_kernel_bits | PSW_MASK_WAIT |
174 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
174 175
175 idle->nohz_delay = 0; 176 idle->nohz_delay = 0;
176 177
@@ -183,7 +184,8 @@ void __kprobes vtime_stop_cpu(void)
183 * set_cpu_timer(VTIMER_MAX_SLICE); 184 * set_cpu_timer(VTIMER_MAX_SLICE);
184 * idle->idle_enter = get_clock(); 185 * idle->idle_enter = get_clock();
185 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 186 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
186 * PSW_MASK_IO | PSW_MASK_EXT); 187 * PSW_MASK_DAT | PSW_MASK_IO |
188 * PSW_MASK_EXT | PSW_MASK_MCHECK);
187 * The difference is that the inline assembly makes sure that 189 * The difference is that the inline assembly makes sure that
188 * the last three instruction are stpt, stck and lpsw in that 190 * the last three instruction are stpt, stck and lpsw in that
189 * order. This is done to increase the precision. 191 * order. This is done to increase the precision.
@@ -216,7 +218,8 @@ void __kprobes vtime_stop_cpu(void)
216 * vq->idle = get_cpu_timer(); 218 * vq->idle = get_cpu_timer();
217 * idle->idle_enter = get_clock(); 219 * idle->idle_enter = get_clock();
218 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 220 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
219 * PSW_MASK_IO | PSW_MASK_EXT); 221 * PSW_MASK_DAT | PSW_MASK_IO |
222 * PSW_MASK_EXT | PSW_MASK_MCHECK);
220 * The difference is that the inline assembly makes sure that 223 * The difference is that the inline assembly makes sure that
221 * the last three instruction are stpt, stck and lpsw in that 224 * the last three instruction are stpt, stck and lpsw in that
222 * order. This is done to increase the precision. 225 * order. This is done to increase the precision.
@@ -458,7 +461,7 @@ void add_virt_timer_periodic(void *new)
458} 461}
459EXPORT_SYMBOL(add_virt_timer_periodic); 462EXPORT_SYMBOL(add_virt_timer_periodic);
460 463
461int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) 464static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
462{ 465{
463 struct vtimer_queue *vq; 466 struct vtimer_queue *vq;
464 unsigned long flags; 467 unsigned long flags;
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 9e4c84187cf5..87cedd61be04 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * diag.c - handling diagnose instructions 2 * diag.c - handling diagnose instructions
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2011
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -15,6 +15,34 @@
15#include <linux/kvm_host.h> 15#include <linux/kvm_host.h>
16#include "kvm-s390.h" 16#include "kvm-s390.h"
17 17
18static int diag_release_pages(struct kvm_vcpu *vcpu)
19{
20 unsigned long start, end;
21 unsigned long prefix = vcpu->arch.sie_block->prefix;
22
23 start = vcpu->arch.guest_gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
24 end = vcpu->arch.guest_gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
25
26 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end
27 || start < 2 * PAGE_SIZE)
28 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
29
30 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
31 vcpu->stat.diagnose_10++;
32
33 /* we checked for start > end above */
34 if (end < prefix || start >= prefix + 2 * PAGE_SIZE) {
35 gmap_discard(start, end, vcpu->arch.gmap);
36 } else {
37 if (start < prefix)
38 gmap_discard(start, prefix, vcpu->arch.gmap);
39 if (end >= prefix)
40 gmap_discard(prefix + 2 * PAGE_SIZE,
41 end, vcpu->arch.gmap);
42 }
43 return 0;
44}
45
18static int __diag_time_slice_end(struct kvm_vcpu *vcpu) 46static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
19{ 47{
20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 48 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
@@ -57,6 +85,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
57 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; 85 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
58 86
59 switch (code) { 87 switch (code) {
88 case 0x10:
89 return diag_release_pages(vcpu);
60 case 0x44: 90 case 0x44:
61 return __diag_time_slice_end(vcpu); 91 return __diag_time_slice_end(vcpu);
62 case 0x308: 92 case 0x308:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9610ba41b974..0bd3bea1e4cd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -71,6 +71,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
71 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 71 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
72 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 72 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
73 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 73 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
74 { "diagnose_10", VCPU_STAT(diagnose_10) },
74 { "diagnose_44", VCPU_STAT(diagnose_44) }, 75 { "diagnose_44", VCPU_STAT(diagnose_44) },
75 { NULL } 76 { NULL }
76}; 77};
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index a65229d91c92..db92f044024c 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -32,7 +32,8 @@ static void __udelay_disabled(unsigned long long usecs)
32 u64 clock_saved; 32 u64 clock_saved;
33 u64 end; 33 u64 end;
34 34
35 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; 35 mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_WAIT |
36 PSW_MASK_EXT | PSW_MASK_MCHECK;
36 end = get_clock() + (usecs << 12); 37 end = get_clock() + (usecs << 12);
37 clock_saved = local_tick_disable(); 38 clock_saved = local_tick_disable();
38 __ctl_store(cr0_saved, 0, 0); 39 __ctl_store(cr0_saved, 0, 0);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 74833831417f..342ae35a5ba9 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -342,7 +342,8 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
342 if (segment_eq(get_fs(), KERNEL_DS)) 342 if (segment_eq(get_fs(), KERNEL_DS))
343 return __futex_atomic_op_pt(op, uaddr, oparg, old); 343 return __futex_atomic_op_pt(op, uaddr, oparg, old);
344 spin_lock(&current->mm->page_table_lock); 344 spin_lock(&current->mm->page_table_lock);
345 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); 345 uaddr = (u32 __force __user *)
346 __dat_user_addr((__force unsigned long) uaddr);
346 if (!uaddr) { 347 if (!uaddr) {
347 spin_unlock(&current->mm->page_table_lock); 348 spin_unlock(&current->mm->page_table_lock);
348 return -EFAULT; 349 return -EFAULT;
@@ -378,7 +379,8 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
378 if (segment_eq(get_fs(), KERNEL_DS)) 379 if (segment_eq(get_fs(), KERNEL_DS))
379 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); 380 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
380 spin_lock(&current->mm->page_table_lock); 381 spin_lock(&current->mm->page_table_lock);
381 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); 382 uaddr = (u32 __force __user *)
383 __dat_user_addr((__force unsigned long) uaddr);
382 if (!uaddr) { 384 if (!uaddr) {
383 spin_unlock(&current->mm->page_table_lock); 385 spin_unlock(&current->mm->page_table_lock);
384 return -EFAULT; 386 return -EFAULT;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 9564fc779b27..1766def5bc3f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -307,7 +307,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
307 307
308#ifdef CONFIG_PGSTE 308#ifdef CONFIG_PGSTE
309 if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { 309 if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
310 address = gmap_fault(address, 310 address = __gmap_fault(address,
311 (struct gmap *) S390_lowcore.gmap); 311 (struct gmap *) S390_lowcore.gmap);
312 if (address == -EFAULT) { 312 if (address == -EFAULT) {
313 fault = VM_FAULT_BADMAP; 313 fault = VM_FAULT_BADMAP;
@@ -393,7 +393,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
393 int fault; 393 int fault;
394 394
395 /* Protection exception is suppressing, decrement psw address. */ 395 /* Protection exception is suppressing, decrement psw address. */
396 regs->psw.addr -= (pgm_int_code >> 16); 396 regs->psw.addr = __rewind_psw(regs->psw, pgm_int_code >> 16);
397 /* 397 /*
398 * Check for low-address protection. This needs to be treated 398 * Check for low-address protection. This needs to be treated
399 * as a special case because the translation exception code 399 * as a special case because the translation exception code
@@ -454,7 +454,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
454 struct pt_regs regs; 454 struct pt_regs regs;
455 int access, fault; 455 int access, fault;
456 456
457 regs.psw.mask = psw_kernel_bits; 457 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
458 if (!irqs_disabled()) 458 if (!irqs_disabled())
459 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; 459 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
460 regs.psw.addr = (unsigned long) __builtin_return_address(0); 460 regs.psw.addr = (unsigned long) __builtin_return_address(0);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 5dbbaa6e594c..1cb8427bedfb 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/gfp.h>
14#include <asm/system.h> 15#include <asm/system.h>
15 16
16/* 17/*
@@ -60,6 +61,9 @@ long probe_kernel_write(void *dst, const void *src, size_t size)
60 return copied < 0 ? -EFAULT : 0; 61 return copied < 0 ? -EFAULT : 0;
61} 62}
62 63
64/*
65 * Copy memory in real mode (kernel to kernel)
66 */
63int memcpy_real(void *dest, void *src, size_t count) 67int memcpy_real(void *dest, void *src, size_t count)
64{ 68{
65 register unsigned long _dest asm("2") = (unsigned long) dest; 69 register unsigned long _dest asm("2") = (unsigned long) dest;
@@ -101,3 +105,55 @@ void copy_to_absolute_zero(void *dest, void *src, size_t count)
101 __ctl_load(cr0, 0, 0); 105 __ctl_load(cr0, 0, 0);
102 preempt_enable(); 106 preempt_enable();
103} 107}
108
109/*
110 * Copy memory from kernel (real) to user (virtual)
111 */
112int copy_to_user_real(void __user *dest, void *src, size_t count)
113{
114 int offs = 0, size, rc;
115 char *buf;
116
117 buf = (char *) __get_free_page(GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120 rc = -EFAULT;
121 while (offs < count) {
122 size = min(PAGE_SIZE, count - offs);
123 if (memcpy_real(buf, src + offs, size))
124 goto out;
125 if (copy_to_user(dest + offs, buf, size))
126 goto out;
127 offs += size;
128 }
129 rc = 0;
130out:
131 free_page((unsigned long) buf);
132 return rc;
133}
134
135/*
136 * Copy memory from user (virtual) to kernel (real)
137 */
138int copy_from_user_real(void *dest, void __user *src, size_t count)
139{
140 int offs = 0, size, rc;
141 char *buf;
142
143 buf = (char *) __get_free_page(GFP_KERNEL);
144 if (!buf)
145 return -ENOMEM;
146 rc = -EFAULT;
147 while (offs < count) {
148 size = min(PAGE_SIZE, count - offs);
149 if (copy_from_user(buf, src + offs, size))
150 goto out;
151 if (memcpy_real(dest + offs, buf, size))
152 goto out;
153 offs += size;
154 }
155 rc = 0;
156out:
157 free_page((unsigned long) buf);
158 return rc;
159}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index c9a9f7f18188..f09c74881b7e 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -26,6 +26,7 @@
26 26
27#include <linux/personality.h> 27#include <linux/personality.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/mman.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/random.h> 31#include <linux/random.h>
31#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d013ed39743b..b36537a5f43e 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -5,6 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/hugetlb.h> 7#include <linux/hugetlb.h>
8#include <asm/cacheflush.h>
8#include <asm/pgtable.h> 9#include <asm/pgtable.h>
9 10
10static void change_page_attr(unsigned long addr, int numpages, 11static void change_page_attr(unsigned long addr, int numpages,
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5d56c2b95b14..301c84d3b542 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2007,2009 2 * Copyright IBM Corp. 2007,2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */ 4 */
5 5
@@ -222,6 +222,7 @@ void gmap_free(struct gmap *gmap)
222 222
223 /* Free all segment & region tables. */ 223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem); 224 down_read(&gmap->mm->mmap_sem);
225 spin_lock(&gmap->mm->page_table_lock);
225 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { 226 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
226 table = (unsigned long *) page_to_phys(page); 227 table = (unsigned long *) page_to_phys(page);
227 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) 228 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
@@ -230,6 +231,7 @@ void gmap_free(struct gmap *gmap)
230 gmap_unlink_segment(gmap, table); 231 gmap_unlink_segment(gmap, table);
231 __free_pages(page, ALLOC_ORDER); 232 __free_pages(page, ALLOC_ORDER);
232 } 233 }
234 spin_unlock(&gmap->mm->page_table_lock);
233 up_read(&gmap->mm->mmap_sem); 235 up_read(&gmap->mm->mmap_sem);
234 list_del(&gmap->list); 236 list_del(&gmap->list);
235 kfree(gmap); 237 kfree(gmap);
@@ -256,6 +258,9 @@ void gmap_disable(struct gmap *gmap)
256} 258}
257EXPORT_SYMBOL_GPL(gmap_disable); 259EXPORT_SYMBOL_GPL(gmap_disable);
258 260
261/*
262 * gmap_alloc_table is assumed to be called with mmap_sem held
263 */
259static int gmap_alloc_table(struct gmap *gmap, 264static int gmap_alloc_table(struct gmap *gmap,
260 unsigned long *table, unsigned long init) 265 unsigned long *table, unsigned long init)
261{ 266{
@@ -267,14 +272,12 @@ static int gmap_alloc_table(struct gmap *gmap,
267 return -ENOMEM; 272 return -ENOMEM;
268 new = (unsigned long *) page_to_phys(page); 273 new = (unsigned long *) page_to_phys(page);
269 crst_table_init(new, init); 274 crst_table_init(new, init);
270 down_read(&gmap->mm->mmap_sem);
271 if (*table & _REGION_ENTRY_INV) { 275 if (*table & _REGION_ENTRY_INV) {
272 list_add(&page->lru, &gmap->crst_list); 276 list_add(&page->lru, &gmap->crst_list);
273 *table = (unsigned long) new | _REGION_ENTRY_LENGTH | 277 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
274 (*table & _REGION_ENTRY_TYPE_MASK); 278 (*table & _REGION_ENTRY_TYPE_MASK);
275 } else 279 } else
276 __free_pages(page, ALLOC_ORDER); 280 __free_pages(page, ALLOC_ORDER);
277 up_read(&gmap->mm->mmap_sem);
278 return 0; 281 return 0;
279} 282}
280 283
@@ -299,6 +302,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
299 302
300 flush = 0; 303 flush = 0;
301 down_read(&gmap->mm->mmap_sem); 304 down_read(&gmap->mm->mmap_sem);
305 spin_lock(&gmap->mm->page_table_lock);
302 for (off = 0; off < len; off += PMD_SIZE) { 306 for (off = 0; off < len; off += PMD_SIZE) {
303 /* Walk the guest addr space page table */ 307 /* Walk the guest addr space page table */
304 table = gmap->table + (((to + off) >> 53) & 0x7ff); 308 table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -320,6 +324,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
320 *table = _SEGMENT_ENTRY_INV; 324 *table = _SEGMENT_ENTRY_INV;
321 } 325 }
322out: 326out:
327 spin_unlock(&gmap->mm->page_table_lock);
323 up_read(&gmap->mm->mmap_sem); 328 up_read(&gmap->mm->mmap_sem);
324 if (flush) 329 if (flush)
325 gmap_flush_tlb(gmap); 330 gmap_flush_tlb(gmap);
@@ -350,6 +355,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
350 355
351 flush = 0; 356 flush = 0;
352 down_read(&gmap->mm->mmap_sem); 357 down_read(&gmap->mm->mmap_sem);
358 spin_lock(&gmap->mm->page_table_lock);
353 for (off = 0; off < len; off += PMD_SIZE) { 359 for (off = 0; off < len; off += PMD_SIZE) {
354 /* Walk the gmap address space page table */ 360 /* Walk the gmap address space page table */
355 table = gmap->table + (((to + off) >> 53) & 0x7ff); 361 table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -373,19 +379,24 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
373 flush |= gmap_unlink_segment(gmap, table); 379 flush |= gmap_unlink_segment(gmap, table);
374 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); 380 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
375 } 381 }
382 spin_unlock(&gmap->mm->page_table_lock);
376 up_read(&gmap->mm->mmap_sem); 383 up_read(&gmap->mm->mmap_sem);
377 if (flush) 384 if (flush)
378 gmap_flush_tlb(gmap); 385 gmap_flush_tlb(gmap);
379 return 0; 386 return 0;
380 387
381out_unmap: 388out_unmap:
389 spin_unlock(&gmap->mm->page_table_lock);
382 up_read(&gmap->mm->mmap_sem); 390 up_read(&gmap->mm->mmap_sem);
383 gmap_unmap_segment(gmap, to, len); 391 gmap_unmap_segment(gmap, to, len);
384 return -ENOMEM; 392 return -ENOMEM;
385} 393}
386EXPORT_SYMBOL_GPL(gmap_map_segment); 394EXPORT_SYMBOL_GPL(gmap_map_segment);
387 395
388unsigned long gmap_fault(unsigned long address, struct gmap *gmap) 396/*
397 * this function is assumed to be called with mmap_sem held
398 */
399unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
389{ 400{
390 unsigned long *table, vmaddr, segment; 401 unsigned long *table, vmaddr, segment;
391 struct mm_struct *mm; 402 struct mm_struct *mm;
@@ -445,16 +456,75 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
445 page = pmd_page(*pmd); 456 page = pmd_page(*pmd);
446 mp = (struct gmap_pgtable *) page->index; 457 mp = (struct gmap_pgtable *) page->index;
447 rmap->entry = table; 458 rmap->entry = table;
459 spin_lock(&mm->page_table_lock);
448 list_add(&rmap->list, &mp->mapper); 460 list_add(&rmap->list, &mp->mapper);
461 spin_unlock(&mm->page_table_lock);
449 /* Set gmap segment table entry to page table. */ 462 /* Set gmap segment table entry to page table. */
450 *table = pmd_val(*pmd) & PAGE_MASK; 463 *table = pmd_val(*pmd) & PAGE_MASK;
451 return vmaddr | (address & ~PMD_MASK); 464 return vmaddr | (address & ~PMD_MASK);
452 } 465 }
453 return -EFAULT; 466 return -EFAULT;
467}
454 468
469unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
470{
471 unsigned long rc;
472
473 down_read(&gmap->mm->mmap_sem);
474 rc = __gmap_fault(address, gmap);
475 up_read(&gmap->mm->mmap_sem);
476
477 return rc;
455} 478}
456EXPORT_SYMBOL_GPL(gmap_fault); 479EXPORT_SYMBOL_GPL(gmap_fault);
457 480
481void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
482{
483
484 unsigned long *table, address, size;
485 struct vm_area_struct *vma;
486 struct gmap_pgtable *mp;
487 struct page *page;
488
489 down_read(&gmap->mm->mmap_sem);
490 address = from;
491 while (address < to) {
492 /* Walk the gmap address space page table */
493 table = gmap->table + ((address >> 53) & 0x7ff);
494 if (unlikely(*table & _REGION_ENTRY_INV)) {
495 address = (address + PMD_SIZE) & PMD_MASK;
496 continue;
497 }
498 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
499 table = table + ((address >> 42) & 0x7ff);
500 if (unlikely(*table & _REGION_ENTRY_INV)) {
501 address = (address + PMD_SIZE) & PMD_MASK;
502 continue;
503 }
504 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
505 table = table + ((address >> 31) & 0x7ff);
506 if (unlikely(*table & _REGION_ENTRY_INV)) {
507 address = (address + PMD_SIZE) & PMD_MASK;
508 continue;
509 }
510 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
511 table = table + ((address >> 20) & 0x7ff);
512 if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
513 address = (address + PMD_SIZE) & PMD_MASK;
514 continue;
515 }
516 page = pfn_to_page(*table >> PAGE_SHIFT);
517 mp = (struct gmap_pgtable *) page->index;
518 vma = find_vma(gmap->mm, mp->vmaddr);
519 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
520 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
521 size, NULL);
522 address = (address + PMD_SIZE) & PMD_MASK;
523 }
524 up_read(&gmap->mm->mmap_sem);
525}
526EXPORT_SYMBOL_GPL(gmap_discard);
527
458void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) 528void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
459{ 529{
460 struct gmap_rmap *rmap, *next; 530 struct gmap_rmap *rmap, *next;
@@ -662,8 +732,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
662 732
663void __tlb_remove_table(void *_table) 733void __tlb_remove_table(void *_table)
664{ 734{
665 void *table = (void *)((unsigned long) _table & PAGE_MASK); 735 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
666 unsigned type = (unsigned long) _table & ~PAGE_MASK; 736 void *table = (void *)((unsigned long) _table & ~mask);
737 unsigned type = (unsigned long) _table & mask;
667 738
668 if (type) 739 if (type)
669 __page_table_free_rcu(table, type); 740 __page_table_free_rcu(table, type);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 781ff5169560..4799383e2df9 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -335,6 +335,9 @@ void __init vmem_map_init(void)
335 ro_start = ((unsigned long)&_stext) & PAGE_MASK; 335 ro_start = ((unsigned long)&_stext) & PAGE_MASK;
336 ro_end = PFN_ALIGN((unsigned long)&_eshared); 336 ro_end = PFN_ALIGN((unsigned long)&_eshared);
337 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 337 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
338 if (memory_chunk[i].type == CHUNK_CRASHK ||
339 memory_chunk[i].type == CHUNK_OLDMEM)
340 continue;
338 start = memory_chunk[i].addr; 341 start = memory_chunk[i].addr;
339 end = memory_chunk[i].addr + memory_chunk[i].size; 342 end = memory_chunk[i].addr + memory_chunk[i].size;
340 if (start >= ro_end || end <= ro_start) 343 if (start >= ro_end || end <= ro_start)
@@ -368,6 +371,9 @@ static int __init vmem_convert_memory_chunk(void)
368 for (i = 0; i < MEMORY_CHUNKS; i++) { 371 for (i = 0; i < MEMORY_CHUNKS; i++) {
369 if (!memory_chunk[i].size) 372 if (!memory_chunk[i].size)
370 continue; 373 continue;
374 if (memory_chunk[i].type == CHUNK_CRASHK ||
375 memory_chunk[i].type == CHUNK_OLDMEM)
376 continue;
371 seg = kzalloc(sizeof(*seg), GFP_KERNEL); 377 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372 if (!seg) 378 if (!seg)
373 panic("Out of memory...\n"); 379 panic("Out of memory...\n");
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 4552ce40c81a..f43c0e4282af 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -994,7 +994,7 @@ allocate_error:
994 * 994 *
995 * Returns 0 on success, !0 on failure. 995 * Returns 0 on success, !0 on failure.
996 */ 996 */
997int hwsampler_deallocate() 997int hwsampler_deallocate(void)
998{ 998{
999 int rc; 999 int rc;
1000 1000
@@ -1035,7 +1035,7 @@ unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
1035 return cb->sample_overflow; 1035 return cb->sample_overflow;
1036} 1036}
1037 1037
1038int hwsampler_setup() 1038int hwsampler_setup(void)
1039{ 1039{
1040 int rc; 1040 int rc;
1041 int cpu; 1041 int cpu;
@@ -1102,7 +1102,7 @@ setup_exit:
1102 return rc; 1102 return rc;
1103} 1103}
1104 1104
1105int hwsampler_shutdown() 1105int hwsampler_shutdown(void)
1106{ 1106{
1107 int rc; 1107 int rc;
1108 1108
@@ -1203,7 +1203,7 @@ start_all_exit:
1203 * 1203 *
1204 * Returns 0 on success, !0 on failure. 1204 * Returns 0 on success, !0 on failure.
1205 */ 1205 */
1206int hwsampler_stop_all() 1206int hwsampler_stop_all(void)
1207{ 1207{
1208 int tmp_rc, rc, cpu; 1208 int tmp_rc, rc, cpu;
1209 struct hws_cpu_buffer *cb; 1209 struct hws_cpu_buffer *cb;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a1d3ddba99cc..65894f05a801 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -11,7 +11,6 @@
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
15#include <linux/kmod.h> 14#include <linux/kmod.h>
16#include <linux/init.h> 15#include <linux/init.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
@@ -1594,7 +1593,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1594 unsigned long long now; 1593 unsigned long long now;
1595 int expires; 1594 int expires;
1596 1595
1597 kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++;
1598 if (IS_ERR(irb)) { 1596 if (IS_ERR(irb)) {
1599 switch (PTR_ERR(irb)) { 1597 switch (PTR_ERR(irb)) {
1600 case -EIO: 1598 case -EIO:
@@ -2061,13 +2059,14 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2061/* 2059/*
2062 * Wakeup helper for the 'sleep_on' functions. 2060 * Wakeup helper for the 'sleep_on' functions.
2063 */ 2061 */
2064static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2062void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2065{ 2063{
2066 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2064 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2067 cqr->callback_data = DASD_SLEEPON_END_TAG; 2065 cqr->callback_data = DASD_SLEEPON_END_TAG;
2068 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2066 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2069 wake_up(&generic_waitq); 2067 wake_up(&generic_waitq);
2070} 2068}
2069EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2071 2070
2072static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2071static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2073{ 2072{
@@ -2167,7 +2166,9 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2167 } else 2166 } else
2168 wait_event(generic_waitq, !(device->stopped)); 2167 wait_event(generic_waitq, !(device->stopped));
2169 2168
2170 cqr->callback = dasd_wakeup_cb; 2169 if (!cqr->callback)
2170 cqr->callback = dasd_wakeup_cb;
2171
2171 cqr->callback_data = DASD_SLEEPON_START_TAG; 2172 cqr->callback_data = DASD_SLEEPON_START_TAG;
2172 dasd_add_request_tail(cqr); 2173 dasd_add_request_tail(cqr);
2173 if (interruptible) { 2174 if (interruptible) {
@@ -2263,7 +2264,11 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2263 cqr->callback = dasd_wakeup_cb; 2264 cqr->callback = dasd_wakeup_cb;
2264 cqr->callback_data = DASD_SLEEPON_START_TAG; 2265 cqr->callback_data = DASD_SLEEPON_START_TAG;
2265 cqr->status = DASD_CQR_QUEUED; 2266 cqr->status = DASD_CQR_QUEUED;
2266 list_add(&cqr->devlist, &device->ccw_queue); 2267 /*
2268 * add new request as second
2269 * first the terminated cqr needs to be finished
2270 */
2271 list_add(&cqr->devlist, device->ccw_queue.next);
2267 2272
2268 /* let the bh start the request to keep them in order */ 2273 /* let the bh start the request to keep them in order */
2269 dasd_schedule_device_bh(device); 2274 dasd_schedule_device_bh(device);
@@ -3284,6 +3289,9 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
3284 if (IS_ERR(device)) 3289 if (IS_ERR(device))
3285 return PTR_ERR(device); 3290 return PTR_ERR(device);
3286 3291
3292 /* mark device as suspended */
3293 set_bit(DASD_FLAG_SUSPENDED, &device->flags);
3294
3287 if (device->discipline->freeze) 3295 if (device->discipline->freeze)
3288 rc = device->discipline->freeze(device); 3296 rc = device->discipline->freeze(device);
3289 3297
@@ -3358,6 +3366,7 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
3358 if (device->block) 3366 if (device->block)
3359 dasd_schedule_block_bh(device->block); 3367 dasd_schedule_block_bh(device->block);
3360 3368
3369 clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
3361 dasd_put_device(device); 3370 dasd_put_device(device);
3362 return 0; 3371 return 0;
3363} 3372}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6e835c9fdfcb..6ab29680586a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -844,6 +844,30 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
844 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 844 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
845} 845}
846 846
847/*
848 * Wakeup helper for read_conf
849 * if the cqr is not done and needs some error recovery
850 * the buffer has to be re-initialized with the EBCDIC "V1.0"
851 * to show support for virtual device SNEQ
852 */
853static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
854{
855 struct ccw1 *ccw;
856 __u8 *rcd_buffer;
857
858 if (cqr->status != DASD_CQR_DONE) {
859 ccw = cqr->cpaddr;
860 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
861 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
862
863 rcd_buffer[0] = 0xE5;
864 rcd_buffer[1] = 0xF1;
865 rcd_buffer[2] = 0x4B;
866 rcd_buffer[3] = 0xF0;
867 }
868 dasd_wakeup_cb(cqr, data);
869}
870
847static int dasd_eckd_read_conf_immediately(struct dasd_device *device, 871static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
848 struct dasd_ccw_req *cqr, 872 struct dasd_ccw_req *cqr,
849 __u8 *rcd_buffer, 873 __u8 *rcd_buffer,
@@ -863,6 +887,7 @@ static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
863 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 887 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
864 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 888 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
865 cqr->retries = 5; 889 cqr->retries = 5;
890 cqr->callback = read_conf_cb;
866 rc = dasd_sleep_on_immediatly(cqr); 891 rc = dasd_sleep_on_immediatly(cqr);
867 return rc; 892 return rc;
868} 893}
@@ -900,6 +925,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
900 goto out_error; 925 goto out_error;
901 } 926 }
902 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); 927 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
928 cqr->callback = read_conf_cb;
903 ret = dasd_sleep_on(cqr); 929 ret = dasd_sleep_on(cqr);
904 /* 930 /*
905 * on success we update the user input parms 931 * on success we update the user input parms
@@ -1075,6 +1101,12 @@ static void do_path_verification_work(struct work_struct *work)
1075 data = container_of(work, struct path_verification_work_data, worker); 1101 data = container_of(work, struct path_verification_work_data, worker);
1076 device = data->device; 1102 device = data->device;
1077 1103
1104 /* delay path verification until device was resumed */
1105 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1106 schedule_work(work);
1107 return;
1108 }
1109
1078 opm = 0; 1110 opm = 0;
1079 npm = 0; 1111 npm = 0;
1080 ppm = 0; 1112 ppm = 0;
@@ -2021,9 +2053,13 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2021 /* first of all check for state change pending interrupt */ 2053 /* first of all check for state change pending interrupt */
2022 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 2054 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
2023 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 2055 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
2024 /* for alias only and not in offline processing*/ 2056 /*
2057 * for alias only, not in offline processing
2058 * and only if not suspended
2059 */
2025 if (!device->block && private->lcu && 2060 if (!device->block && private->lcu &&
2026 !test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2061 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2062 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
2027 /* 2063 /*
2028 * the state change could be caused by an alias 2064 * the state change could be caused by an alias
2029 * reassignment remove device from alias handling 2065 * reassignment remove device from alias handling
@@ -2350,7 +2386,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2350 new_track = 1; 2386 new_track = 1;
2351 end_idaw = 0; 2387 end_idaw = 0;
2352 len_to_track_end = 0; 2388 len_to_track_end = 0;
2353 idaw_dst = 0; 2389 idaw_dst = NULL;
2354 idaw_len = 0; 2390 idaw_len = 0;
2355 rq_for_each_segment(bv, req, iter) { 2391 rq_for_each_segment(bv, req, iter) {
2356 dst = page_address(bv->bv_page) + bv->bv_offset; 2392 dst = page_address(bv->bv_page) + bv->bv_offset;
@@ -2412,7 +2448,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2412 if (end_idaw) { 2448 if (end_idaw) {
2413 idaws = idal_create_words(idaws, idaw_dst, 2449 idaws = idal_create_words(idaws, idaw_dst,
2414 idaw_len); 2450 idaw_len);
2415 idaw_dst = 0; 2451 idaw_dst = NULL;
2416 idaw_len = 0; 2452 idaw_len = 0;
2417 end_idaw = 0; 2453 end_idaw = 0;
2418 } 2454 }
@@ -3998,6 +4034,7 @@ static struct ccw_driver dasd_eckd_driver = {
3998 .thaw = dasd_generic_restore_device, 4034 .thaw = dasd_generic_restore_device,
3999 .restore = dasd_generic_restore_device, 4035 .restore = dasd_generic_restore_device,
4000 .uc_handler = dasd_generic_uc_handler, 4036 .uc_handler = dasd_generic_uc_handler,
4037 .int_class = IOINT_DAS,
4001}; 4038};
4002 4039
4003/* 4040/*
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 4b71b1164868..a62a75358eb9 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -79,6 +79,7 @@ static struct ccw_driver dasd_fba_driver = {
79 .freeze = dasd_generic_pm_freeze, 79 .freeze = dasd_generic_pm_freeze,
80 .thaw = dasd_generic_restore_device, 80 .thaw = dasd_generic_restore_device,
81 .restore = dasd_generic_restore_device, 81 .restore = dasd_generic_restore_device,
82 .int_class = IOINT_DAS,
82}; 83};
83 84
84static void 85static void
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 1dd12bd85a69..afe8c33422ed 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -516,6 +516,7 @@ struct dasd_block {
516 */ 516 */
517#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */ 517#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
518#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */ 518#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
519#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
519 520
520 521
521void dasd_put_device_wake(struct dasd_device *); 522void dasd_put_device_wake(struct dasd_device *);
@@ -643,6 +644,7 @@ struct dasd_ccw_req *
643dasd_smalloc_request(int , int, int, struct dasd_device *); 644dasd_smalloc_request(int , int, int, struct dasd_device *);
644void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); 645void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
645void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); 646void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
647void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
646 648
647static inline int 649static inline int
648dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device) 650dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 694464c65fcd..934458ad55e5 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,7 +9,6 @@
9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> 9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
10 */ 10 */
11 11
12#include <linux/kernel_stat.h>
13#include <linux/module.h> 12#include <linux/module.h>
14#include <linux/types.h> 13#include <linux/types.h>
15#include <linux/kdev_t.h> 14#include <linux/kdev_t.h>
@@ -362,7 +361,6 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
362 int cstat, dstat; 361 int cstat, dstat;
363 int count; 362 int count;
364 363
365 kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
366 raw = dev_get_drvdata(&cdev->dev); 364 raw = dev_get_drvdata(&cdev->dev);
367 req = (struct raw3215_req *) intparm; 365 req = (struct raw3215_req *) intparm;
368 cstat = irb->scsw.cmd.cstat; 366 cstat = irb->scsw.cmd.cstat;
@@ -776,6 +774,7 @@ static struct ccw_driver raw3215_ccw_driver = {
776 .freeze = &raw3215_pm_stop, 774 .freeze = &raw3215_pm_stop,
777 .thaw = &raw3215_pm_start, 775 .thaw = &raw3215_pm_start,
778 .restore = &raw3215_pm_start, 776 .restore = &raw3215_pm_start,
777 .int_class = IOINT_C15,
779}; 778};
780 779
781#ifdef CONFIG_TN3215_CONSOLE 780#ifdef CONFIG_TN3215_CONSOLE
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 810ac38631c3..e5cb9248a442 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,7 +7,6 @@
7 * Copyright IBM Corp. 2003, 2009 7 * Copyright IBM Corp. 2003, 2009
8 */ 8 */
9 9
10#include <linux/kernel_stat.h>
11#include <linux/module.h> 10#include <linux/module.h>
12#include <linux/err.h> 11#include <linux/err.h>
13#include <linux/init.h> 12#include <linux/init.h>
@@ -330,7 +329,6 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
330 struct raw3270_request *rq; 329 struct raw3270_request *rq;
331 int rc; 330 int rc;
332 331
333 kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
334 rp = dev_get_drvdata(&cdev->dev); 332 rp = dev_get_drvdata(&cdev->dev);
335 if (!rp) 333 if (!rp)
336 return; 334 return;
@@ -1398,6 +1396,7 @@ static struct ccw_driver raw3270_ccw_driver = {
1398 .freeze = &raw3270_pm_stop, 1396 .freeze = &raw3270_pm_stop,
1399 .thaw = &raw3270_pm_start, 1397 .thaw = &raw3270_pm_start,
1400 .restore = &raw3270_pm_start, 1398 .restore = &raw3270_pm_start,
1399 .int_class = IOINT_C70,
1401}; 1400};
1402 1401
1403static int 1402static int
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 837e010299a8..0b54a91f8dcd 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -61,8 +61,8 @@ static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
61 rc = sclp_service_call(cmd, sccb); 61 rc = sclp_service_call(cmd, sccb);
62 if (rc) 62 if (rc)
63 goto out; 63 goto out;
64 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | 64 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
65 PSW_MASK_WAIT | PSW_DEFAULT_KEY); 65 PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
66 local_irq_disable(); 66 local_irq_disable();
67out: 67out:
68 /* Contents of the sccb might have changed. */ 68 /* Contents of the sccb might have changed. */
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index a90a02c28d6a..87fc0ac11e67 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -30,7 +30,8 @@ static void do_machine_quiesce(void)
30 psw_t quiesce_psw; 30 psw_t quiesce_psw;
31 31
32 smp_send_stop(); 32 smp_send_stop();
33 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; 33 quiesce_psw.mask =
34 PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
34 quiesce_psw.addr = 0xfff; 35 quiesce_psw.addr = 0xfff;
35 __load_psw(quiesce_psw); 36 __load_psw(quiesce_psw);
36} 37}
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 9eff2df70ddb..934ef33eb9a4 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1330,6 +1330,7 @@ static struct ccw_driver tape_34xx_driver = {
1330 .set_online = tape_34xx_online, 1330 .set_online = tape_34xx_online,
1331 .set_offline = tape_generic_offline, 1331 .set_offline = tape_generic_offline,
1332 .freeze = tape_generic_pm_suspend, 1332 .freeze = tape_generic_pm_suspend,
1333 .int_class = IOINT_TAP,
1333}; 1334};
1334 1335
1335static int 1336static int
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index a7d570728882..49c6aab7ad78 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1762,6 +1762,7 @@ static struct ccw_driver tape_3590_driver = {
1762 .set_offline = tape_generic_offline, 1762 .set_offline = tape_generic_offline,
1763 .set_online = tape_3590_online, 1763 .set_online = tape_3590_online,
1764 .freeze = tape_generic_pm_suspend, 1764 .freeze = tape_generic_pm_suspend,
1765 .int_class = IOINT_TAP,
1765}; 1766};
1766 1767
1767/* 1768/*
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 7978a0adeaf3..b3a3e8e8656e 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -14,7 +14,6 @@
14#define KMSG_COMPONENT "tape" 14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 16
17#include <linux/kernel_stat.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/init.h> // for kernel parameters 18#include <linux/init.h> // for kernel parameters
20#include <linux/kmod.h> // for requesting modules 19#include <linux/kmod.h> // for requesting modules
@@ -1115,7 +1114,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1115 struct tape_request *request; 1114 struct tape_request *request;
1116 int rc; 1115 int rc;
1117 1116
1118 kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
1119 device = dev_get_drvdata(&cdev->dev); 1117 device = dev_get_drvdata(&cdev->dev);
1120 if (device == NULL) { 1118 if (device == NULL) {
1121 return; 1119 return;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index f6b00c3df425..d291a54acfad 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -11,7 +11,6 @@
11#define KMSG_COMPONENT "vmur" 11#define KMSG_COMPONENT "vmur"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
15#include <linux/cdev.h> 14#include <linux/cdev.h>
16#include <linux/slab.h> 15#include <linux/slab.h>
17 16
@@ -74,6 +73,7 @@ static struct ccw_driver ur_driver = {
74 .set_online = ur_set_online, 73 .set_online = ur_set_online,
75 .set_offline = ur_set_offline, 74 .set_offline = ur_set_offline,
76 .freeze = ur_pm_suspend, 75 .freeze = ur_pm_suspend,
76 .int_class = IOINT_VMR,
77}; 77};
78 78
79static DEFINE_MUTEX(vmur_mutex); 79static DEFINE_MUTEX(vmur_mutex);
@@ -305,7 +305,6 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
305{ 305{
306 struct urdev *urd; 306 struct urdev *urd;
307 307
308 kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
309 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 308 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
310 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 309 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
311 irb->scsw.cmd.count); 310 irb->scsw.cmd.count);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 3b94044027c2..43068fbd0baa 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
18#include <linux/debugfs.h> 18#include <linux/debugfs.h>
19#include <linux/module.h>
19#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
20#include <asm/ipl.h> 21#include <asm/ipl.h>
21#include <asm/sclp.h> 22#include <asm/sclp.h>
@@ -142,22 +143,6 @@ static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
142 return memcpy_hsa(dest, src, count, TO_KERNEL); 143 return memcpy_hsa(dest, src, count, TO_KERNEL);
143} 144}
144 145
145static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
146{
147 static char buf[4096];
148 int offs = 0, size;
149
150 while (offs < count) {
151 size = min(sizeof(buf), count - offs);
152 if (memcpy_real(buf, (void *) src + offs, size))
153 return -EFAULT;
154 if (copy_to_user(dest + offs, buf, size))
155 return -EFAULT;
156 offs += size;
157 }
158 return 0;
159}
160
161static int __init init_cpu_info(enum arch_id arch) 146static int __init init_cpu_info(enum arch_id arch)
162{ 147{
163 struct save_area *sa; 148 struct save_area *sa;
@@ -346,8 +331,8 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
346 331
347 /* Copy from real mem */ 332 /* Copy from real mem */
348 size = count - mem_offs - hdr_count; 333 size = count - mem_offs - hdr_count;
349 rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs, 334 rc = copy_to_user_real(buf + hdr_count + mem_offs,
350 size); 335 (void *) mem_start + mem_offs, size);
351 if (rc) 336 if (rc)
352 goto fail; 337 goto fail;
353 338
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5c567414c4bb..4f1989d27b1f 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -29,31 +29,20 @@
29 29
30/* a device matches a driver if all its slave devices match the same 30/* a device matches a driver if all its slave devices match the same
31 * entry of the driver */ 31 * entry of the driver */
32static int 32static int ccwgroup_bus_match(struct device *dev, struct device_driver * drv)
33ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
34{ 33{
35 struct ccwgroup_device *gdev; 34 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
36 struct ccwgroup_driver *gdrv; 35 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(drv);
37
38 gdev = to_ccwgroupdev(dev);
39 gdrv = to_ccwgroupdrv(drv);
40 36
41 if (gdev->creator_id == gdrv->driver_id) 37 if (gdev->creator_id == gdrv->driver_id)
42 return 1; 38 return 1;
43 39
44 return 0; 40 return 0;
45} 41}
46static int
47ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
48{
49 /* TODO */
50 return 0;
51}
52 42
53static struct bus_type ccwgroup_bus_type; 43static struct bus_type ccwgroup_bus_type;
54 44
55static void 45static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
56__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
57{ 46{
58 int i; 47 int i;
59 char str[8]; 48 char str[8];
@@ -63,7 +52,6 @@ __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
63 sysfs_remove_link(&gdev->dev.kobj, str); 52 sysfs_remove_link(&gdev->dev.kobj, str);
64 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); 53 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
65 } 54 }
66
67} 55}
68 56
69/* 57/*
@@ -87,6 +75,87 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
87 } 75 }
88} 76}
89 77
78static int ccwgroup_set_online(struct ccwgroup_device *gdev)
79{
80 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
81 int ret = 0;
82
83 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
84 return -EAGAIN;
85 if (gdev->state == CCWGROUP_ONLINE)
86 goto out;
87 if (gdrv->set_online)
88 ret = gdrv->set_online(gdev);
89 if (ret)
90 goto out;
91
92 gdev->state = CCWGROUP_ONLINE;
93out:
94 atomic_set(&gdev->onoff, 0);
95 return ret;
96}
97
98static int ccwgroup_set_offline(struct ccwgroup_device *gdev)
99{
100 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
101 int ret = 0;
102
103 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
104 return -EAGAIN;
105 if (gdev->state == CCWGROUP_OFFLINE)
106 goto out;
107 if (gdrv->set_offline)
108 ret = gdrv->set_offline(gdev);
109 if (ret)
110 goto out;
111
112 gdev->state = CCWGROUP_OFFLINE;
113out:
114 atomic_set(&gdev->onoff, 0);
115 return ret;
116}
117
118static ssize_t ccwgroup_online_store(struct device *dev,
119 struct device_attribute *attr,
120 const char *buf, size_t count)
121{
122 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
123 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
124 unsigned long value;
125 int ret;
126
127 if (!dev->driver)
128 return -EINVAL;
129 if (!try_module_get(gdrv->driver.owner))
130 return -EINVAL;
131
132 ret = strict_strtoul(buf, 0, &value);
133 if (ret)
134 goto out;
135
136 if (value == 1)
137 ret = ccwgroup_set_online(gdev);
138 else if (value == 0)
139 ret = ccwgroup_set_offline(gdev);
140 else
141 ret = -EINVAL;
142out:
143 module_put(gdrv->driver.owner);
144 return (ret == 0) ? count : ret;
145}
146
147static ssize_t ccwgroup_online_show(struct device *dev,
148 struct device_attribute *attr,
149 char *buf)
150{
151 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
152 int online;
153
154 online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
155
156 return scnprintf(buf, PAGE_SIZE, "%d\n", online);
157}
158
90/* 159/*
91 * Provide an 'ungroup' attribute so the user can remove group devices no 160 * Provide an 'ungroup' attribute so the user can remove group devices no
92 * longer needed or accidentially created. Saves memory :) 161 * longer needed or accidentially created. Saves memory :)
@@ -104,14 +173,13 @@ static void ccwgroup_ungroup_callback(struct device *dev)
104 mutex_unlock(&gdev->reg_mutex); 173 mutex_unlock(&gdev->reg_mutex);
105} 174}
106 175
107static ssize_t 176static ssize_t ccwgroup_ungroup_store(struct device *dev,
108ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 177 struct device_attribute *attr,
178 const char *buf, size_t count)
109{ 179{
110 struct ccwgroup_device *gdev; 180 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
111 int rc; 181 int rc;
112 182
113 gdev = to_ccwgroupdev(dev);
114
115 /* Prevent concurrent online/offline processing and ungrouping. */ 183 /* Prevent concurrent online/offline processing and ungrouping. */
116 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) 184 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
117 return -EAGAIN; 185 return -EAGAIN;
@@ -132,24 +200,35 @@ out:
132 } 200 }
133 return count; 201 return count;
134} 202}
135
136static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store); 203static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
204static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
137 205
138static void 206static struct attribute *ccwgroup_attrs[] = {
139ccwgroup_release (struct device *dev) 207 &dev_attr_online.attr,
208 &dev_attr_ungroup.attr,
209 NULL,
210};
211static struct attribute_group ccwgroup_attr_group = {
212 .attrs = ccwgroup_attrs,
213};
214static const struct attribute_group *ccwgroup_attr_groups[] = {
215 &ccwgroup_attr_group,
216 NULL,
217};
218
219static void ccwgroup_release(struct device *dev)
140{ 220{
141 kfree(to_ccwgroupdev(dev)); 221 kfree(to_ccwgroupdev(dev));
142} 222}
143 223
144static int 224static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
145__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
146{ 225{
147 char str[8]; 226 char str[8];
148 int i, rc; 227 int i, rc;
149 228
150 for (i = 0; i < gdev->count; i++) { 229 for (i = 0; i < gdev->count; i++) {
151 rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj, 230 rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
152 "group_device"); 231 &gdev->dev.kobj, "group_device");
153 if (rc) { 232 if (rc) {
154 for (--i; i >= 0; i--) 233 for (--i; i >= 0; i--)
155 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, 234 sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
@@ -159,8 +238,8 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
159 } 238 }
160 for (i = 0; i < gdev->count; i++) { 239 for (i = 0; i < gdev->count; i++) {
161 sprintf(str, "cdev%d", i); 240 sprintf(str, "cdev%d", i);
162 rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj, 241 rc = sysfs_create_link(&gdev->dev.kobj,
163 str); 242 &gdev->cdev[i]->dev.kobj, str);
164 if (rc) { 243 if (rc) {
165 for (--i; i >= 0; i--) { 244 for (--i; i >= 0; i--) {
166 sprintf(str, "cdev%d", i); 245 sprintf(str, "cdev%d", i);
@@ -293,26 +372,17 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
293 } 372 }
294 373
295 dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); 374 dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
296 375 gdev->dev.groups = ccwgroup_attr_groups;
297 rc = device_add(&gdev->dev); 376 rc = device_add(&gdev->dev);
298 if (rc) 377 if (rc)
299 goto error; 378 goto error;
300 get_device(&gdev->dev); 379 rc = __ccwgroup_create_symlinks(gdev);
301 rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
302
303 if (rc) { 380 if (rc) {
304 device_unregister(&gdev->dev); 381 device_del(&gdev->dev);
305 goto error; 382 goto error;
306 } 383 }
307 384 mutex_unlock(&gdev->reg_mutex);
308 rc = __ccwgroup_create_symlinks(gdev); 385 return 0;
309 if (!rc) {
310 mutex_unlock(&gdev->reg_mutex);
311 put_device(&gdev->dev);
312 return 0;
313 }
314 device_remove_file(&gdev->dev, &dev_attr_ungroup);
315 device_unregister(&gdev->dev);
316error: 386error:
317 for (i = 0; i < num_devices; i++) 387 for (i = 0; i < num_devices; i++)
318 if (gdev->cdev[i]) { 388 if (gdev->cdev[i]) {
@@ -330,7 +400,15 @@ error:
330EXPORT_SYMBOL(ccwgroup_create_from_string); 400EXPORT_SYMBOL(ccwgroup_create_from_string);
331 401
332static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, 402static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
333 void *data); 403 void *data)
404{
405 struct device *dev = data;
406
407 if (action == BUS_NOTIFY_UNBIND_DRIVER)
408 device_schedule_callback(dev, ccwgroup_ungroup_callback);
409
410 return NOTIFY_OK;
411}
334 412
335static struct notifier_block ccwgroup_nb = { 413static struct notifier_block ccwgroup_nb = {
336 .notifier_call = ccwgroup_notifier 414 .notifier_call = ccwgroup_notifier
@@ -362,138 +440,21 @@ module_exit(cleanup_ccwgroup);
362 440
363/************************** driver stuff ******************************/ 441/************************** driver stuff ******************************/
364 442
365static int 443static int ccwgroup_probe(struct device *dev)
366ccwgroup_set_online(struct ccwgroup_device *gdev)
367{ 444{
368 struct ccwgroup_driver *gdrv; 445 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
369 int ret; 446 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
370
371 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
372 return -EAGAIN;
373 if (gdev->state == CCWGROUP_ONLINE) {
374 ret = 0;
375 goto out;
376 }
377 if (!gdev->dev.driver) {
378 ret = -EINVAL;
379 goto out;
380 }
381 gdrv = to_ccwgroupdrv (gdev->dev.driver);
382 if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0))
383 goto out;
384
385 gdev->state = CCWGROUP_ONLINE;
386 out:
387 atomic_set(&gdev->onoff, 0);
388 return ret;
389}
390
391static int
392ccwgroup_set_offline(struct ccwgroup_device *gdev)
393{
394 struct ccwgroup_driver *gdrv;
395 int ret;
396
397 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
398 return -EAGAIN;
399 if (gdev->state == CCWGROUP_OFFLINE) {
400 ret = 0;
401 goto out;
402 }
403 if (!gdev->dev.driver) {
404 ret = -EINVAL;
405 goto out;
406 }
407 gdrv = to_ccwgroupdrv (gdev->dev.driver);
408 if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0))
409 goto out;
410
411 gdev->state = CCWGROUP_OFFLINE;
412 out:
413 atomic_set(&gdev->onoff, 0);
414 return ret;
415}
416
417static ssize_t
418ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
419{
420 struct ccwgroup_device *gdev;
421 struct ccwgroup_driver *gdrv;
422 unsigned long value;
423 int ret;
424
425 if (!dev->driver)
426 return -ENODEV;
427
428 gdev = to_ccwgroupdev(dev);
429 gdrv = to_ccwgroupdrv(dev->driver);
430
431 if (!try_module_get(gdrv->driver.owner))
432 return -EINVAL;
433
434 ret = strict_strtoul(buf, 0, &value);
435 if (ret)
436 goto out;
437
438 if (value == 1)
439 ret = ccwgroup_set_online(gdev);
440 else if (value == 0)
441 ret = ccwgroup_set_offline(gdev);
442 else
443 ret = -EINVAL;
444out:
445 module_put(gdrv->driver.owner);
446 return (ret == 0) ? count : ret;
447}
448
449static ssize_t
450ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf)
451{
452 int online;
453
454 online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
455
456 return sprintf(buf, online ? "1\n" : "0\n");
457}
458
459static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
460
461static int
462ccwgroup_probe (struct device *dev)
463{
464 struct ccwgroup_device *gdev;
465 struct ccwgroup_driver *gdrv;
466
467 int ret;
468
469 gdev = to_ccwgroupdev(dev);
470 gdrv = to_ccwgroupdrv(dev->driver);
471
472 if ((ret = device_create_file(dev, &dev_attr_online)))
473 return ret;
474
475 ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
476 if (ret)
477 device_remove_file(dev, &dev_attr_online);
478 447
479 return ret; 448 return gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
480} 449}
481 450
482static int 451static int ccwgroup_remove(struct device *dev)
483ccwgroup_remove (struct device *dev)
484{ 452{
485 struct ccwgroup_device *gdev; 453 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
486 struct ccwgroup_driver *gdrv; 454 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
487
488 device_remove_file(dev, &dev_attr_online);
489 device_remove_file(dev, &dev_attr_ungroup);
490 455
491 if (!dev->driver) 456 if (!dev->driver)
492 return 0; 457 return 0;
493
494 gdev = to_ccwgroupdev(dev);
495 gdrv = to_ccwgroupdrv(dev->driver);
496
497 if (gdrv->remove) 458 if (gdrv->remove)
498 gdrv->remove(gdev); 459 gdrv->remove(gdev);
499 460
@@ -502,15 +463,11 @@ ccwgroup_remove (struct device *dev)
502 463
503static void ccwgroup_shutdown(struct device *dev) 464static void ccwgroup_shutdown(struct device *dev)
504{ 465{
505 struct ccwgroup_device *gdev; 466 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
506 struct ccwgroup_driver *gdrv; 467 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
507 468
508 if (!dev->driver) 469 if (!dev->driver)
509 return; 470 return;
510
511 gdev = to_ccwgroupdev(dev);
512 gdrv = to_ccwgroupdrv(dev->driver);
513
514 if (gdrv->shutdown) 471 if (gdrv->shutdown)
515 gdrv->shutdown(gdev); 472 gdrv->shutdown(gdev);
516} 473}
@@ -586,26 +543,12 @@ static const struct dev_pm_ops ccwgroup_pm_ops = {
586static struct bus_type ccwgroup_bus_type = { 543static struct bus_type ccwgroup_bus_type = {
587 .name = "ccwgroup", 544 .name = "ccwgroup",
588 .match = ccwgroup_bus_match, 545 .match = ccwgroup_bus_match,
589 .uevent = ccwgroup_uevent,
590 .probe = ccwgroup_probe, 546 .probe = ccwgroup_probe,
591 .remove = ccwgroup_remove, 547 .remove = ccwgroup_remove,
592 .shutdown = ccwgroup_shutdown, 548 .shutdown = ccwgroup_shutdown,
593 .pm = &ccwgroup_pm_ops, 549 .pm = &ccwgroup_pm_ops,
594}; 550};
595 551
596
597static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
598 void *data)
599{
600 struct device *dev = data;
601
602 if (action == BUS_NOTIFY_UNBIND_DRIVER)
603 device_schedule_callback(dev, ccwgroup_ungroup_callback);
604
605 return NOTIFY_OK;
606}
607
608
609/** 552/**
610 * ccwgroup_driver_register() - register a ccw group driver 553 * ccwgroup_driver_register() - register a ccw group driver
611 * @cdriver: driver to be registered 554 * @cdriver: driver to be registered
@@ -619,9 +562,9 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
619 562
620 return driver_register(&cdriver->driver); 563 return driver_register(&cdriver->driver);
621} 564}
565EXPORT_SYMBOL(ccwgroup_driver_register);
622 566
623static int 567static int __ccwgroup_match_all(struct device *dev, void *data)
624__ccwgroup_match_all(struct device *dev, void *data)
625{ 568{
626 return 1; 569 return 1;
627} 570}
@@ -652,6 +595,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
652 put_driver(&cdriver->driver); 595 put_driver(&cdriver->driver);
653 driver_unregister(&cdriver->driver); 596 driver_unregister(&cdriver->driver);
654} 597}
598EXPORT_SYMBOL(ccwgroup_driver_unregister);
655 599
656/** 600/**
657 * ccwgroup_probe_ccwdev() - probe function for slave devices 601 * ccwgroup_probe_ccwdev() - probe function for slave devices
@@ -666,6 +610,7 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
666{ 610{
667 return 0; 611 return 0;
668} 612}
613EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
669 614
670/** 615/**
671 * ccwgroup_remove_ccwdev() - remove function for slave devices 616 * ccwgroup_remove_ccwdev() - remove function for slave devices
@@ -702,9 +647,5 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
702 /* Release ccwgroup device reference for local processing. */ 647 /* Release ccwgroup device reference for local processing. */
703 put_device(&gdev->dev); 648 put_device(&gdev->dev);
704} 649}
705
706MODULE_LICENSE("GPL");
707EXPORT_SYMBOL(ccwgroup_driver_register);
708EXPORT_SYMBOL(ccwgroup_driver_unregister);
709EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
710EXPORT_SYMBOL(ccwgroup_remove_ccwdev); 650EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
651MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index d15f8b4d78bd..5156264d0c74 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -1,10 +1,13 @@
1/* 1/*
2 * Handling of internal CCW device requests. 2 * Handling of internal CCW device requests.
3 * 3 *
4 * Copyright IBM Corp. 2009 4 * Copyright IBM Corp. 2009, 2011
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
8#define KMSG_COMPONENT "cio"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
8#include <linux/types.h> 11#include <linux/types.h>
9#include <linux/err.h> 12#include <linux/err.h>
10#include <asm/ccwdev.h> 13#include <asm/ccwdev.h>
@@ -323,7 +326,21 @@ void ccw_request_timeout(struct ccw_device *cdev)
323{ 326{
324 struct subchannel *sch = to_subchannel(cdev->dev.parent); 327 struct subchannel *sch = to_subchannel(cdev->dev.parent);
325 struct ccw_request *req = &cdev->private->req; 328 struct ccw_request *req = &cdev->private->req;
326 int rc; 329 int rc = -ENODEV, chp;
330
331 if (cio_update_schib(sch))
332 goto err;
333
334 for (chp = 0; chp < 8; chp++) {
335 if ((0x80 >> chp) & sch->schib.pmcw.lpum)
336 pr_warning("%s: No interrupt was received within %lus "
337 "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
338 dev_name(&cdev->dev), req->timeout / HZ,
339 scsw_cstat(&sch->schib.scsw),
340 scsw_dstat(&sch->schib.scsw),
341 sch->schid.cssid,
342 sch->schib.pmcw.chpid[chp]);
343 }
327 344
328 if (!ccwreq_next_path(cdev)) { 345 if (!ccwreq_next_path(cdev)) {
329 /* set the final return code for this request */ 346 /* set the final return code for this request */
@@ -342,7 +359,7 @@ err:
342 * ccw_request_notoper - notoper handler for I/O request procedure 359 * ccw_request_notoper - notoper handler for I/O request procedure
343 * @cdev: ccw device 360 * @cdev: ccw device
344 * 361 *
345 * Handle timeout during I/O request procedure. 362 * Handle notoper during I/O request procedure.
346 */ 363 */
347void ccw_request_notoper(struct ccw_device *cdev) 364void ccw_request_notoper(struct ccw_device *cdev)
348{ 365{
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index e950f1ad4dd1..0c87b0fc7714 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for s390 chsc subchannels 2 * Driver for s390 chsc subchannels
3 * 3 *
4 * Copyright IBM Corp. 2008, 2009 4 * Copyright IBM Corp. 2008, 2011
5 * 5 *
6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
7 * 7 *
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/uaccess.h> 13#include <linux/uaccess.h>
14#include <linux/miscdevice.h> 14#include <linux/miscdevice.h>
15#include <linux/kernel_stat.h>
15 16
16#include <asm/compat.h> 17#include <asm/compat.h>
17#include <asm/cio.h> 18#include <asm/cio.h>
@@ -56,6 +57,8 @@ static void chsc_subchannel_irq(struct subchannel *sch)
56 57
57 CHSC_LOG(4, "irb"); 58 CHSC_LOG(4, "irb");
58 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 59 CHSC_LOG_HEX(4, irb, sizeof(*irb));
60 kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++;
61
59 /* Copy irb to provided request and set done. */ 62 /* Copy irb to provided request and set done. */
60 if (!request) { 63 if (!request) {
61 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", 64 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index eb3140ee821e..dc67c397449e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -622,6 +622,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
622 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 622 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
623 if (!sch) { 623 if (!sch) {
624 /* Clear pending interrupt condition. */ 624 /* Clear pending interrupt condition. */
625 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
625 tsch(tpi_info->schid, irb); 626 tsch(tpi_info->schid, irb);
626 continue; 627 continue;
627 } 628 }
@@ -634,7 +635,10 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
634 /* Call interrupt handler if there is one. */ 635 /* Call interrupt handler if there is one. */
635 if (sch->driver && sch->driver->irq) 636 if (sch->driver && sch->driver->irq)
636 sch->driver->irq(sch); 637 sch->driver->irq(sch);
637 } 638 else
639 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
640 } else
641 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
638 spin_unlock(sch->lock); 642 spin_unlock(sch->lock);
639 /* 643 /*
640 * Are more interrupts pending? 644 * Are more interrupts pending?
@@ -667,18 +671,23 @@ static int cio_tpi(void)
667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 671 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
668 if (tpi(NULL) != 1) 672 if (tpi(NULL) != 1)
669 return 0; 673 return 0;
674 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
670 if (tpi_info->adapter_IO) { 675 if (tpi_info->adapter_IO) {
671 do_adapter_IO(tpi_info->isc); 676 do_adapter_IO(tpi_info->isc);
672 return 1; 677 return 1;
673 } 678 }
674 irb = (struct irb *)&S390_lowcore.irb; 679 irb = (struct irb *)&S390_lowcore.irb;
675 /* Store interrupt response block to lowcore. */ 680 /* Store interrupt response block to lowcore. */
676 if (tsch(tpi_info->schid, irb) != 0) 681 if (tsch(tpi_info->schid, irb) != 0) {
677 /* Not status pending or not operational. */ 682 /* Not status pending or not operational. */
683 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
678 return 1; 684 return 1;
685 }
679 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 686 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
680 if (!sch) 687 if (!sch) {
688 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
681 return 1; 689 return 1;
690 }
682 irq_context = in_interrupt(); 691 irq_context = in_interrupt();
683 if (!irq_context) 692 if (!irq_context)
684 local_bh_disable(); 693 local_bh_disable();
@@ -687,6 +696,8 @@ static int cio_tpi(void)
687 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); 696 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
688 if (sch->driver && sch->driver->irq) 697 if (sch->driver && sch->driver->irq)
689 sch->driver->irq(sch); 698 sch->driver->irq(sch);
699 else
700 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
690 spin_unlock(sch->lock); 701 spin_unlock(sch->lock);
691 irq_exit(); 702 irq_exit();
692 if (!irq_context) 703 if (!irq_context)
@@ -1058,7 +1069,7 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
1058{ 1069{
1059 struct subchannel_id schid; 1070 struct subchannel_id schid;
1060 1071
1061 s390_reset_system(); 1072 s390_reset_system(NULL, NULL);
1062 if (reipl_find_schid(devid, &schid) != 0) 1073 if (reipl_find_schid(devid, &schid) != 0)
1063 panic("IPL Device not found\n"); 1074 panic("IPL Device not found\n");
1064 do_reipl_asm(*((__u32*)&schid)); 1075 do_reipl_asm(*((__u32*)&schid));
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 80ebdddf7747..33bb4d891e16 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -133,6 +133,8 @@ struct channel_subsystem {
133 133
134extern struct channel_subsystem *channel_subsystems[]; 134extern struct channel_subsystem *channel_subsystems[];
135 135
136void channel_subsystem_reinit(void);
137
136/* Helper functions to build lists for the slow path. */ 138/* Helper functions to build lists for the slow path. */
137void css_schedule_eval(struct subchannel_id schid); 139void css_schedule_eval(struct subchannel_id schid);
138void css_schedule_eval_all(void); 140void css_schedule_eval_all(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 8e04c00cf0ad..d734f4a0ecac 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -21,6 +21,7 @@
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23#include <linux/timer.h> 23#include <linux/timer.h>
24#include <linux/kernel_stat.h>
24 25
25#include <asm/ccwdev.h> 26#include <asm/ccwdev.h>
26#include <asm/cio.h> 27#include <asm/cio.h>
@@ -747,6 +748,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
747 struct ccw_device *cdev) 748 struct ccw_device *cdev)
748{ 749{
749 cdev->private->cdev = cdev; 750 cdev->private->cdev = cdev;
751 cdev->private->int_class = IOINT_CIO;
750 atomic_set(&cdev->private->onoff, 0); 752 atomic_set(&cdev->private->onoff, 0);
751 cdev->dev.parent = &sch->dev; 753 cdev->dev.parent = &sch->dev;
752 cdev->dev.release = ccw_device_release; 754 cdev->dev.release = ccw_device_release;
@@ -1010,6 +1012,8 @@ static void io_subchannel_irq(struct subchannel *sch)
1010 CIO_TRACE_EVENT(6, dev_name(&sch->dev)); 1012 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1011 if (cdev) 1013 if (cdev)
1012 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1014 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1015 else
1016 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
1013} 1017}
1014 1018
1015void io_subchannel_init_config(struct subchannel *sch) 1019void io_subchannel_init_config(struct subchannel *sch)
@@ -1621,6 +1625,7 @@ ccw_device_probe_console(void)
1621 memset(&console_private, 0, sizeof(struct ccw_device_private)); 1625 memset(&console_private, 0, sizeof(struct ccw_device_private));
1622 console_cdev.private = &console_private; 1626 console_cdev.private = &console_private;
1623 console_private.cdev = &console_cdev; 1627 console_private.cdev = &console_cdev;
1628 console_private.int_class = IOINT_CIO;
1624 ret = ccw_device_console_enable(&console_cdev, sch); 1629 ret = ccw_device_console_enable(&console_cdev, sch);
1625 if (ret) { 1630 if (ret) {
1626 cio_release_console(); 1631 cio_release_console();
@@ -1702,11 +1707,18 @@ ccw_device_probe (struct device *dev)
1702 int ret; 1707 int ret;
1703 1708
1704 cdev->drv = cdrv; /* to let the driver call _set_online */ 1709 cdev->drv = cdrv; /* to let the driver call _set_online */
1710 /* Note: we interpret class 0 in this context as an uninitialized
1711 * field since it translates to a non-I/O interrupt class. */
1712 if (cdrv->int_class != 0)
1713 cdev->private->int_class = cdrv->int_class;
1714 else
1715 cdev->private->int_class = IOINT_CIO;
1705 1716
1706 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1717 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1707 1718
1708 if (ret) { 1719 if (ret) {
1709 cdev->drv = NULL; 1720 cdev->drv = NULL;
1721 cdev->private->int_class = IOINT_CIO;
1710 return ret; 1722 return ret;
1711 } 1723 }
1712 1724
@@ -1740,6 +1752,7 @@ ccw_device_remove (struct device *dev)
1740 } 1752 }
1741 ccw_device_set_timeout(cdev, 0); 1753 ccw_device_set_timeout(cdev, 0);
1742 cdev->drv = NULL; 1754 cdev->drv = NULL;
1755 cdev->private->int_class = IOINT_CIO;
1743 return 0; 1756 return 0;
1744} 1757}
1745 1758
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 0b7245c72d5e..179824b3082f 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -5,6 +5,7 @@
5#include <linux/atomic.h> 5#include <linux/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7#include <linux/notifier.h> 7#include <linux/notifier.h>
8#include <linux/kernel_stat.h>
8#include "io_sch.h" 9#include "io_sch.h"
9 10
10/* 11/*
@@ -56,7 +57,17 @@ extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
56static inline void 57static inline void
57dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event) 58dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
58{ 59{
59 dev_jumptable[cdev->private->state][dev_event](cdev, dev_event); 60 int state = cdev->private->state;
61
62 if (dev_event == DEV_EVENT_INTERRUPT) {
63 if (state == DEV_STATE_ONLINE)
64 kstat_cpu(smp_processor_id()).
65 irqs[cdev->private->int_class]++;
66 else if (state != DEV_STATE_CMFCHANGE &&
67 state != DEV_STATE_CMFUPDATE)
68 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
69 }
70 dev_jumptable[state][dev_event](cdev, dev_event);
60} 71}
61 72
62/* 73/*
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index ba31ad88f4f7..2ebb492a5c17 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/schid.h> 5#include <asm/schid.h>
6#include <asm/ccwdev.h> 6#include <asm/ccwdev.h>
7#include <asm/irq.h>
7#include "css.h" 8#include "css.h"
8#include "orb.h" 9#include "orb.h"
9 10
@@ -157,6 +158,7 @@ struct ccw_device_private {
157 struct list_head cmb_list; /* list of measured devices */ 158 struct list_head cmb_list; /* list of measured devices */
158 u64 cmb_start_time; /* clock value of cmb reset */ 159 u64 cmb_start_time; /* clock value of cmb reset */
159 void *cmb_wait; /* deferred cmb enable/disable */ 160 void *cmb_wait; /* deferred cmb enable/disable */
161 enum interruption_class int_class;
160}; 162};
161 163
162static inline int rsch(struct subchannel_id schid) 164static inline int rsch(struct subchannel_id schid)
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 3dd86441da3d..b962ffbc0803 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -18,14 +18,6 @@
18#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */ 18#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
19#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ 19#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
20 20
21/*
22 * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
23 * till next initiative to give transmitted skbs back to the stack is too long.
24 * Therefore polling is started in case of multicast queue is filled more
25 * than 50 percent.
26 */
27#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */
28
29enum qdio_irq_states { 21enum qdio_irq_states {
30 QDIO_IRQ_STATE_INACTIVE, 22 QDIO_IRQ_STATE_INACTIVE,
31 QDIO_IRQ_STATE_ESTABLISHED, 23 QDIO_IRQ_STATE_ESTABLISHED,
@@ -290,6 +282,9 @@ struct qdio_q {
290 /* error condition during a data transfer */ 282 /* error condition during a data transfer */
291 unsigned int qdio_error; 283 unsigned int qdio_error;
292 284
285 /* last scan of the queue */
286 u64 timestamp;
287
293 struct tasklet_struct tasklet; 288 struct tasklet_struct tasklet;
294 struct qdio_queue_perf_stat q_stats; 289 struct qdio_queue_perf_stat q_stats;
295 290
@@ -423,31 +418,7 @@ static inline int multicast_outbound(struct qdio_q *q)
423#define queue_irqs_disabled(q) \ 418#define queue_irqs_disabled(q) \
424 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) 419 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
425 420
426#define TIQDIO_SHARED_IND 63 421extern u64 last_ai_time;
427
428/* device state change indicators */
429struct indicator_t {
430 u32 ind; /* u32 because of compare-and-swap performance */
431 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
432};
433
434extern struct indicator_t *q_indicators;
435
436static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq)
437{
438 return irq->nr_input_qs > 1;
439}
440
441static inline int references_shared_dsci(struct qdio_irq *irq)
442{
443 return irq->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
444}
445
446static inline int shared_ind(struct qdio_q *q)
447{
448 struct qdio_irq *i = q->irq_ptr;
449 return references_shared_dsci(i) || has_multiple_inq_on_dsci(i);
450}
451 422
452/* prototypes for thin interrupt */ 423/* prototypes for thin interrupt */
453void qdio_setup_thinint(struct qdio_irq *irq_ptr); 424void qdio_setup_thinint(struct qdio_irq *irq_ptr);
@@ -460,7 +431,8 @@ int tiqdio_allocate_memory(void);
460void tiqdio_free_memory(void); 431void tiqdio_free_memory(void);
461int tiqdio_register_thinints(void); 432int tiqdio_register_thinints(void);
462void tiqdio_unregister_thinints(void); 433void tiqdio_unregister_thinints(void);
463 434void clear_nonshared_ind(struct qdio_irq *);
435int test_nonshared_ind(struct qdio_irq *);
464 436
465/* prototypes for setup */ 437/* prototypes for setup */
466void qdio_inbound_processing(unsigned long data); 438void qdio_inbound_processing(unsigned long data);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index aaf7f935bfd3..ed68245f9741 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -54,15 +54,17 @@ static int qstat_show(struct seq_file *m, void *v)
54 if (!q) 54 if (!q)
55 return 0; 55 return 0;
56 56
57 seq_printf(m, "DSCI: %d nr_used: %d\n", 57 seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); 58 q->timestamp, last_ai_time);
59 seq_printf(m, "ftc: %d last_move: %d\n", 59 seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
60 atomic_read(&q->nr_buf_used),
60 q->first_to_check, q->last_move); 61 q->first_to_check, q->last_move);
61 if (q->is_input_q) { 62 if (q->is_input_q) {
62 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 63 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
63 q->u.in.polling, q->u.in.ack_start, 64 q->u.in.polling, q->u.in.ack_start,
64 q->u.in.ack_count); 65 q->u.in.ack_count);
65 seq_printf(m, "IRQs disabled: %u\n", 66 seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
67 *(u32 *)q->irq_ptr->dsci,
66 test_bit(QDIO_QUEUE_IRQS_DISABLED, 68 test_bit(QDIO_QUEUE_IRQS_DISABLED,
67 &q->u.in.queue_irq_state)); 69 &q->u.in.queue_irq_state));
68 } 70 }
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 6547ff469410..3ef8d071c64a 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -15,7 +15,6 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/kernel_stat.h>
19#include <linux/atomic.h> 18#include <linux/atomic.h>
20#include <asm/debug.h> 19#include <asm/debug.h>
21#include <asm/qdio.h> 20#include <asm/qdio.h>
@@ -105,9 +104,12 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
105 /* all done or next buffer state different */ 104 /* all done or next buffer state different */
106 if (ccq == 0 || ccq == 32) 105 if (ccq == 0 || ccq == 32)
107 return 0; 106 return 0;
108 /* not all buffers processed */ 107 /* no buffer processed */
109 if (ccq == 96 || ccq == 97) 108 if (ccq == 97)
110 return 1; 109 return 1;
110 /* not all buffers processed */
111 if (ccq == 96)
112 return 2;
111 /* notify devices immediately */ 113 /* notify devices immediately */
112 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 114 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
113 return -EIO; 115 return -EIO;
@@ -127,10 +129,8 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
127static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 129static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
128 int start, int count, int auto_ack) 130 int start, int count, int auto_ack)
129{ 131{
132 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
130 unsigned int ccq = 0; 133 unsigned int ccq = 0;
131 int tmp_count = count, tmp_start = start;
132 int nr = q->nr;
133 int rc;
134 134
135 BUG_ON(!q->irq_ptr->sch_token); 135 BUG_ON(!q->irq_ptr->sch_token);
136 qperf_inc(q, eqbs); 136 qperf_inc(q, eqbs);
@@ -141,30 +141,34 @@ again:
141 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, 141 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
142 auto_ack); 142 auto_ack);
143 rc = qdio_check_ccq(q, ccq); 143 rc = qdio_check_ccq(q, ccq);
144 144 if (!rc)
145 /* At least one buffer was processed, return and extract the remaining 145 return count - tmp_count;
146 * buffers later.
147 */
148 if ((ccq == 96) && (count != tmp_count)) {
149 qperf_inc(q, eqbs_partial);
150 return (count - tmp_count);
151 }
152 146
153 if (rc == 1) { 147 if (rc == 1) {
154 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); 148 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
155 goto again; 149 goto again;
156 } 150 }
157 151
158 if (rc < 0) { 152 if (rc == 2) {
159 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); 153 BUG_ON(tmp_count == count);
160 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 154 qperf_inc(q, eqbs_partial);
161 q->handler(q->irq_ptr->cdev, 155 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
162 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 156 tmp_count);
163 q->nr, q->first_to_kick, count, 157 /*
164 q->irq_ptr->int_parm); 158 * Retry once, if that fails bail out and process the
165 return 0; 159 * extracted buffers before trying again.
160 */
161 if (!retried++)
162 goto again;
163 else
164 return count - tmp_count;
166 } 165 }
167 return count - tmp_count; 166
167 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
168 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
169 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
170 0, -1, -1, q->irq_ptr->int_parm);
171 return 0;
168} 172}
169 173
170/** 174/**
@@ -197,22 +201,22 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
197again: 201again:
198 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 202 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
199 rc = qdio_check_ccq(q, ccq); 203 rc = qdio_check_ccq(q, ccq);
200 if (rc == 1) { 204 if (!rc) {
205 WARN_ON(tmp_count);
206 return count - tmp_count;
207 }
208
209 if (rc == 1 || rc == 2) {
201 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 210 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
202 qperf_inc(q, sqbs_partial); 211 qperf_inc(q, sqbs_partial);
203 goto again; 212 goto again;
204 } 213 }
205 if (rc < 0) { 214
206 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); 215 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
207 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 216 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
208 q->handler(q->irq_ptr->cdev, 217 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
209 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 218 0, -1, -1, q->irq_ptr->int_parm);
210 q->nr, q->first_to_kick, count, 219 return 0;
211 q->irq_ptr->int_parm);
212 return 0;
213 }
214 WARN_ON(tmp_count);
215 return count - tmp_count;
216} 220}
217 221
218/* returns number of examined buffers and their common state in *state */ 222/* returns number of examined buffers and their common state in *state */
@@ -277,7 +281,7 @@ static inline int set_buf_state(struct qdio_q *q, int bufnr,
277} 281}
278 282
279/* set slsb states to initial state */ 283/* set slsb states to initial state */
280void qdio_init_buf_states(struct qdio_irq *irq_ptr) 284static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
281{ 285{
282 struct qdio_q *q; 286 struct qdio_q *q;
283 int i; 287 int i;
@@ -446,7 +450,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
446 qperf_inc(q, target_full); 450 qperf_inc(q, target_full);
447 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 451 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
448 q->first_to_check); 452 q->first_to_check);
449 return; 453 goto set;
450 } 454 }
451 455
452 DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); 456 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
@@ -456,6 +460,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
456 q->sbal[q->first_to_check]->element[14].sflags, 460 q->sbal[q->first_to_check]->element[14].sflags,
457 q->sbal[q->first_to_check]->element[15].sflags); 461 q->sbal[q->first_to_check]->element[15].sflags);
458 462
463set:
459 /* 464 /*
460 * Interrupts may be avoided as long as the error is present 465 * Interrupts may be avoided as long as the error is present
461 * so change the buffer state immediately to avoid starvation. 466 * so change the buffer state immediately to avoid starvation.
@@ -513,6 +518,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
513 int count, stop; 518 int count, stop;
514 unsigned char state = 0; 519 unsigned char state = 0;
515 520
521 q->timestamp = get_clock_fast();
522
516 /* 523 /*
517 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 524 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
518 * would return 0. 525 * would return 0.
@@ -782,6 +789,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
782 int count, stop; 789 int count, stop;
783 unsigned char state = 0; 790 unsigned char state = 0;
784 791
792 q->timestamp = get_clock_fast();
793
785 if (need_siga_sync(q)) 794 if (need_siga_sync(q))
786 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 795 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
787 !pci_out_supported(q)) || 796 !pci_out_supported(q)) ||
@@ -912,21 +921,13 @@ static void __qdio_outbound_processing(struct qdio_q *q)
912 if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) 921 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
913 goto sched; 922 goto sched;
914 923
915 /* bail out for HiperSockets unicast queues */
916 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
917 return;
918
919 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
920 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
921 goto sched;
922
923 if (q->u.out.pci_out_enabled) 924 if (q->u.out.pci_out_enabled)
924 return; 925 return;
925 926
926 /* 927 /*
927 * Now we know that queue type is either qeth without pci enabled 928 * Now we know that queue type is either qeth without pci enabled
928 * or HiperSockets multicast. Make sure buffer switch from PRIMED to 929 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
929 * EMPTY is noticed and outbound_handler is called after some time. 930 * is noticed and outbound_handler is called after some time.
930 */ 931 */
931 if (qdio_outbound_q_done(q)) 932 if (qdio_outbound_q_done(q))
932 del_timer(&q->u.out.timer); 933 del_timer(&q->u.out.timer);
@@ -1128,7 +1129,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1128 return; 1129 return;
1129 } 1130 }
1130 1131
1131 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
1132 if (irq_ptr->perf_stat_enabled) 1132 if (irq_ptr->perf_stat_enabled)
1133 irq_ptr->perf_stat.qdio_int++; 1133 irq_ptr->perf_stat.qdio_int++;
1134 1134
@@ -1719,9 +1719,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1719 1719
1720 WARN_ON(queue_irqs_enabled(q)); 1720 WARN_ON(queue_irqs_enabled(q));
1721 1721
1722 if (!shared_ind(q)) 1722 clear_nonshared_ind(irq_ptr);
1723 xchg(q->irq_ptr->dsci, 0);
1724
1725 qdio_stop_polling(q); 1723 qdio_stop_polling(q);
1726 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); 1724 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1727 1725
@@ -1729,7 +1727,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1729 * We need to check again to not lose initiative after 1727 * We need to check again to not lose initiative after
1730 * resetting the ACK state. 1728 * resetting the ACK state.
1731 */ 1729 */
1732 if (!shared_ind(q) && *q->irq_ptr->dsci) 1730 if (test_nonshared_ind(irq_ptr))
1733 goto rescan; 1731 goto rescan;
1734 if (!qdio_inbound_q_done(q)) 1732 if (!qdio_inbound_q_done(q))
1735 goto rescan; 1733 goto rescan;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index a3e3949d7b69..011eadea3ee4 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -26,17 +26,24 @@
26 */ 26 */
27#define TIQDIO_NR_NONSHARED_IND 63 27#define TIQDIO_NR_NONSHARED_IND 63
28#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 28#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
29#define TIQDIO_SHARED_IND 63
30
31/* device state change indicators */
32struct indicator_t {
33 u32 ind; /* u32 because of compare-and-swap performance */
34 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
35};
29 36
30/* list of thin interrupt input queues */ 37/* list of thin interrupt input queues */
31static LIST_HEAD(tiq_list); 38static LIST_HEAD(tiq_list);
32DEFINE_MUTEX(tiq_list_lock); 39static DEFINE_MUTEX(tiq_list_lock);
33 40
34/* adapter local summary indicator */ 41/* adapter local summary indicator */
35static u8 *tiqdio_alsi; 42static u8 *tiqdio_alsi;
36 43
37struct indicator_t *q_indicators; 44static struct indicator_t *q_indicators;
38 45
39static u64 last_ai_time; 46u64 last_ai_time;
40 47
41/* returns addr for the device state change indicator */ 48/* returns addr for the device state change indicator */
42static u32 *get_indicator(void) 49static u32 *get_indicator(void)
@@ -90,6 +97,43 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
90 synchronize_rcu(); 97 synchronize_rcu();
91} 98}
92 99
100static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
101{
102 return irq_ptr->nr_input_qs > 1;
103}
104
105static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
106{
107 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
108}
109
110static inline int shared_ind(struct qdio_irq *irq_ptr)
111{
112 return references_shared_dsci(irq_ptr) ||
113 has_multiple_inq_on_dsci(irq_ptr);
114}
115
116void clear_nonshared_ind(struct qdio_irq *irq_ptr)
117{
118 if (!is_thinint_irq(irq_ptr))
119 return;
120 if (shared_ind(irq_ptr))
121 return;
122 xchg(irq_ptr->dsci, 0);
123}
124
125int test_nonshared_ind(struct qdio_irq *irq_ptr)
126{
127 if (!is_thinint_irq(irq_ptr))
128 return 0;
129 if (shared_ind(irq_ptr))
130 return 0;
131 if (*irq_ptr->dsci)
132 return 1;
133 else
134 return 0;
135}
136
93static inline u32 clear_shared_ind(void) 137static inline u32 clear_shared_ind(void)
94{ 138{
95 if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) 139 if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
@@ -119,7 +163,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
119 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, 163 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
120 q->irq_ptr->int_parm); 164 q->irq_ptr->int_parm);
121 } else { 165 } else {
122 if (!shared_ind(q)) 166 if (!shared_ind(q->irq_ptr))
123 xchg(q->irq_ptr->dsci, 0); 167 xchg(q->irq_ptr->dsci, 0);
124 168
125 /* 169 /*
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index aec60d55b10d..3c2c923d5c0a 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -33,7 +33,7 @@
33 * The pointer to our (page) of device descriptions. 33 * The pointer to our (page) of device descriptions.
34 */ 34 */
35static void *kvm_devices; 35static void *kvm_devices;
36struct work_struct hotplug_work; 36static struct work_struct hotplug_work;
37 37
38struct kvm_device { 38struct kvm_device {
39 struct virtio_device vdev; 39 struct virtio_device vdev;
@@ -334,10 +334,10 @@ static void scan_devices(void)
334 */ 334 */
335static int match_desc(struct device *dev, void *data) 335static int match_desc(struct device *dev, void *data)
336{ 336{
337 if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data) 337 struct virtio_device *vdev = dev_to_virtio(dev);
338 return 1; 338 struct kvm_device *kdev = to_kvmdev(vdev);
339 339
340 return 0; 340 return kdev->desc == data;
341} 341}
342 342
343/* 343/*
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index f1fa2483ae6b..b41fae37d3af 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -63,7 +63,6 @@
63 63
64#define KMSG_COMPONENT "claw" 64#define KMSG_COMPONENT "claw"
65 65
66#include <linux/kernel_stat.h>
67#include <asm/ccwdev.h> 66#include <asm/ccwdev.h>
68#include <asm/ccwgroup.h> 67#include <asm/ccwgroup.h>
69#include <asm/debug.h> 68#include <asm/debug.h>
@@ -291,6 +290,7 @@ static struct ccw_driver claw_ccw_driver = {
291 .ids = claw_ids, 290 .ids = claw_ids,
292 .probe = ccwgroup_probe_ccwdev, 291 .probe = ccwgroup_probe_ccwdev,
293 .remove = ccwgroup_remove_ccwdev, 292 .remove = ccwgroup_remove_ccwdev,
293 .int_class = IOINT_CLW,
294}; 294};
295 295
296static ssize_t 296static ssize_t
@@ -645,7 +645,6 @@ claw_irq_handler(struct ccw_device *cdev,
645 struct claw_env *p_env; 645 struct claw_env *p_env;
646 struct chbk *p_ch_r=NULL; 646 struct chbk *p_ch_r=NULL;
647 647
648 kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
649 CLAW_DBF_TEXT(4, trace, "clawirq"); 648 CLAW_DBF_TEXT(4, trace, "clawirq");
650 /* Bypass all 'unsolicited interrupts' */ 649 /* Bypass all 'unsolicited interrupts' */
651 privptr = dev_get_drvdata(&cdev->dev); 650 privptr = dev_get_drvdata(&cdev->dev);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 426787efc492..5cb93a8e3403 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -24,7 +24,6 @@
24#define KMSG_COMPONENT "ctcm" 24#define KMSG_COMPONENT "ctcm"
25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 26
27#include <linux/kernel_stat.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h> 28#include <linux/init.h>
30#include <linux/kernel.h> 29#include <linux/kernel.h>
@@ -1203,7 +1202,6 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1203 int cstat; 1202 int cstat;
1204 int dstat; 1203 int dstat;
1205 1204
1206 kstat_cpu(smp_processor_id()).irqs[IOINT_CTC]++;
1207 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1205 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1208 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev)); 1206 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
1209 1207
@@ -1769,6 +1767,7 @@ static struct ccw_driver ctcm_ccw_driver = {
1769 .ids = ctcm_ids, 1767 .ids = ctcm_ids,
1770 .probe = ccwgroup_probe_ccwdev, 1768 .probe = ccwgroup_probe_ccwdev,
1771 .remove = ccwgroup_remove_ccwdev, 1769 .remove = ccwgroup_remove_ccwdev,
1770 .int_class = IOINT_CTC,
1772}; 1771};
1773 1772
1774static struct ccwgroup_driver ctcm_group_driver = { 1773static struct ccwgroup_driver ctcm_group_driver = {
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8305319b2a84..650aec1839e9 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -159,7 +159,7 @@ static ssize_t ctcm_proto_store(struct device *dev,
159 return count; 159 return count;
160} 160}
161 161
162const char *ctcm_type[] = { 162static const char *ctcm_type[] = {
163 "not a channel", 163 "not a channel",
164 "CTC/A", 164 "CTC/A",
165 "FICON channel", 165 "FICON channel",
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index fb246b944b16..c28713da1ec5 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -26,7 +26,6 @@
26#define KMSG_COMPONENT "lcs" 26#define KMSG_COMPONENT "lcs"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 28
29#include <linux/kernel_stat.h>
30#include <linux/module.h> 29#include <linux/module.h>
31#include <linux/if.h> 30#include <linux/if.h>
32#include <linux/netdevice.h> 31#include <linux/netdevice.h>
@@ -1399,7 +1398,6 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1399 int rc, index; 1398 int rc, index;
1400 int cstat, dstat; 1399 int cstat, dstat;
1401 1400
1402 kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
1403 if (lcs_check_irb_error(cdev, irb)) 1401 if (lcs_check_irb_error(cdev, irb))
1404 return; 1402 return;
1405 1403
@@ -1972,7 +1970,7 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1972 1970
1973static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1971static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1974 1972
1975const char *lcs_type[] = { 1973static const char *lcs_type[] = {
1976 "not a channel", 1974 "not a channel",
1977 "2216 parallel", 1975 "2216 parallel",
1978 "2216 channel", 1976 "2216 channel",
@@ -2399,6 +2397,7 @@ static struct ccw_driver lcs_ccw_driver = {
2399 .ids = lcs_ids, 2397 .ids = lcs_ids,
2400 .probe = ccwgroup_probe_ccwdev, 2398 .probe = ccwgroup_probe_ccwdev,
2401 .remove = ccwgroup_remove_ccwdev, 2399 .remove = ccwgroup_remove_ccwdev,
2400 .int_class = IOINT_LCS,
2402}; 2401};
2403 2402
2404/** 2403/**
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ce735204d317..e4c1176ee25b 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1415,7 +1415,7 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1415 return 0; 1415 return 0;
1416} 1416}
1417 1417
1418int qeth_l3_set_rx_csum(struct qeth_card *card, int on) 1418static int qeth_l3_set_rx_csum(struct qeth_card *card, int on)
1419{ 1419{
1420 int rc = 0; 1420 int rc = 0;
1421 1421
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 74054074e876..5c4abce94ad1 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -10,6 +10,7 @@
10#define ELFCORE_ADDR_ERR (-2ULL) 10#define ELFCORE_ADDR_ERR (-2ULL)
11 11
12extern unsigned long long elfcorehdr_addr; 12extern unsigned long long elfcorehdr_addr;
13extern unsigned long long elfcorehdr_size;
13 14
14extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 15extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
15 unsigned long, int); 16 unsigned long, int);
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 110821cb6ea5..31f0508d7da7 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -395,6 +395,7 @@ typedef struct elf64_shdr {
395#define NT_S390_CTRS 0x304 /* s390 control registers */ 395#define NT_S390_CTRS 0x304 /* s390 control registers */
396#define NT_S390_PREFIX 0x305 /* s390 prefix register */ 396#define NT_S390_PREFIX 0x305 /* s390 prefix register */
397#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */ 397#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
398#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
398#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ 399#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
399 400
400 401
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c2478a342cd7..fe45136b32cc 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -33,6 +33,14 @@
33#error KEXEC_ARCH not defined 33#error KEXEC_ARCH not defined
34#endif 34#endif
35 35
36#ifndef KEXEC_CRASH_CONTROL_MEMORY_LIMIT
37#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT KEXEC_CONTROL_MEMORY_LIMIT
38#endif
39
40#ifndef KEXEC_CRASH_MEM_ALIGN
41#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
42#endif
43
36#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) 44#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
37#define KEXEC_CORE_NOTE_NAME "CORE" 45#define KEXEC_CORE_NOTE_NAME "CORE"
38#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4) 46#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4)
@@ -129,6 +137,8 @@ extern void crash_kexec(struct pt_regs *);
129int kexec_should_crash(struct task_struct *); 137int kexec_should_crash(struct task_struct *);
130void crash_save_cpu(struct pt_regs *regs, int cpu); 138void crash_save_cpu(struct pt_regs *regs, int cpu);
131void crash_save_vmcoreinfo(void); 139void crash_save_vmcoreinfo(void);
140void crash_map_reserved_pages(void);
141void crash_unmap_reserved_pages(void);
132void arch_crash_save_vmcoreinfo(void); 142void arch_crash_save_vmcoreinfo(void);
133void vmcoreinfo_append_str(const char *fmt, ...) 143void vmcoreinfo_append_str(const char *fmt, ...)
134 __attribute__ ((format (printf, 1, 2))); 144 __attribute__ ((format (printf, 1, 2)));
diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c
index 5f85690285d4..69ebf3380bac 100644
--- a/kernel/crash_dump.c
+++ b/kernel/crash_dump.c
@@ -20,8 +20,15 @@ unsigned long saved_max_pfn;
20unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; 20unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
21 21
22/* 22/*
23 * stores the size of elf header of crash image
24 */
25unsigned long long elfcorehdr_size;
26
27/*
23 * elfcorehdr= specifies the location of elf core header stored by the crashed 28 * elfcorehdr= specifies the location of elf core header stored by the crashed
24 * kernel. This option will be passed by kexec loader to the capture kernel. 29 * kernel. This option will be passed by kexec loader to the capture kernel.
30 *
31 * Syntax: elfcorehdr=[size[KMG]@]offset[KMG]
25 */ 32 */
26static int __init setup_elfcorehdr(char *arg) 33static int __init setup_elfcorehdr(char *arg)
27{ 34{
@@ -29,6 +36,10 @@ static int __init setup_elfcorehdr(char *arg)
29 if (!arg) 36 if (!arg)
30 return -EINVAL; 37 return -EINVAL;
31 elfcorehdr_addr = memparse(arg, &end); 38 elfcorehdr_addr = memparse(arg, &end);
39 if (*end == '@') {
40 elfcorehdr_size = elfcorehdr_addr;
41 elfcorehdr_addr = memparse(end + 1, &end);
42 }
32 return end > arg ? 0 : -EINVAL; 43 return end > arg ? 0 : -EINVAL;
33} 44}
34early_param("elfcorehdr", setup_elfcorehdr); 45early_param("elfcorehdr", setup_elfcorehdr);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 296fbc84d659..dc7bc0829286 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -498,7 +498,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
498 while (hole_end <= crashk_res.end) { 498 while (hole_end <= crashk_res.end) {
499 unsigned long i; 499 unsigned long i;
500 500
501 if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) 501 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
502 break; 502 break;
503 if (hole_end > crashk_res.end) 503 if (hole_end > crashk_res.end)
504 break; 504 break;
@@ -999,6 +999,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
999 kimage_free(xchg(&kexec_crash_image, NULL)); 999 kimage_free(xchg(&kexec_crash_image, NULL));
1000 result = kimage_crash_alloc(&image, entry, 1000 result = kimage_crash_alloc(&image, entry,
1001 nr_segments, segments); 1001 nr_segments, segments);
1002 crash_map_reserved_pages();
1002 } 1003 }
1003 if (result) 1004 if (result)
1004 goto out; 1005 goto out;
@@ -1015,6 +1016,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
1015 goto out; 1016 goto out;
1016 } 1017 }
1017 kimage_terminate(image); 1018 kimage_terminate(image);
1019 if (flags & KEXEC_ON_CRASH)
1020 crash_unmap_reserved_pages();
1018 } 1021 }
1019 /* Install the new kernel, and Uninstall the old */ 1022 /* Install the new kernel, and Uninstall the old */
1020 image = xchg(dest_image, image); 1023 image = xchg(dest_image, image);
@@ -1026,6 +1029,18 @@ out:
1026 return result; 1029 return result;
1027} 1030}
1028 1031
1032/*
1033 * Add and remove page tables for crashkernel memory
1034 *
1035 * Provide an empty default implementation here -- architecture
1036 * code may override this
1037 */
1038void __weak crash_map_reserved_pages(void)
1039{}
1040
1041void __weak crash_unmap_reserved_pages(void)
1042{}
1043
1029#ifdef CONFIG_COMPAT 1044#ifdef CONFIG_COMPAT
1030asmlinkage long compat_sys_kexec_load(unsigned long entry, 1045asmlinkage long compat_sys_kexec_load(unsigned long entry,
1031 unsigned long nr_segments, 1046 unsigned long nr_segments,
@@ -1134,14 +1149,16 @@ int crash_shrink_memory(unsigned long new_size)
1134 goto unlock; 1149 goto unlock;
1135 } 1150 }
1136 1151
1137 start = roundup(start, PAGE_SIZE); 1152 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1138 end = roundup(start + new_size, PAGE_SIZE); 1153 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1139 1154
1155 crash_map_reserved_pages();
1140 crash_free_reserved_phys_range(end, crashk_res.end); 1156 crash_free_reserved_phys_range(end, crashk_res.end);
1141 1157
1142 if ((start == end) && (crashk_res.parent != NULL)) 1158 if ((start == end) && (crashk_res.parent != NULL))
1143 release_resource(&crashk_res); 1159 release_resource(&crashk_res);
1144 crashk_res.end = end - 1; 1160 crashk_res.end = end - 1;
1161 crash_unmap_reserved_pages();
1145 1162
1146unlock: 1163unlock:
1147 mutex_unlock(&kexec_mutex); 1164 mutex_unlock(&kexec_mutex);
@@ -1380,24 +1397,23 @@ int __init parse_crashkernel(char *cmdline,
1380} 1397}
1381 1398
1382 1399
1383 1400static void update_vmcoreinfo_note(void)
1384void crash_save_vmcoreinfo(void)
1385{ 1401{
1386 u32 *buf; 1402 u32 *buf = vmcoreinfo_note;
1387 1403
1388 if (!vmcoreinfo_size) 1404 if (!vmcoreinfo_size)
1389 return; 1405 return;
1390
1391 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1392
1393 buf = (u32 *)vmcoreinfo_note;
1394
1395 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, 1406 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1396 vmcoreinfo_size); 1407 vmcoreinfo_size);
1397
1398 final_note(buf); 1408 final_note(buf);
1399} 1409}
1400 1410
1411void crash_save_vmcoreinfo(void)
1412{
1413 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1414 update_vmcoreinfo_note();
1415}
1416
1401void vmcoreinfo_append_str(const char *fmt, ...) 1417void vmcoreinfo_append_str(const char *fmt, ...)
1402{ 1418{
1403 va_list args; 1419 va_list args;
@@ -1483,6 +1499,7 @@ static int __init crash_save_vmcoreinfo_init(void)
1483 VMCOREINFO_NUMBER(PG_swapcache); 1499 VMCOREINFO_NUMBER(PG_swapcache);
1484 1500
1485 arch_crash_save_vmcoreinfo(); 1501 arch_crash_save_vmcoreinfo();
1502 update_vmcoreinfo_note();
1486 1503
1487 return 0; 1504 return 0;
1488} 1505}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2d2ecdcc8cdb..2fe2bc2a57ea 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -151,14 +151,6 @@ extern int pwrsw_enabled;
151extern int unaligned_enabled; 151extern int unaligned_enabled;
152#endif 152#endif
153 153
154#ifdef CONFIG_S390
155#ifdef CONFIG_MATHEMU
156extern int sysctl_ieee_emulation_warnings;
157#endif
158extern int sysctl_userprocess_debug;
159extern int spin_retry;
160#endif
161
162#ifdef CONFIG_IA64 154#ifdef CONFIG_IA64
163extern int no_unaligned_warning; 155extern int no_unaligned_warning;
164extern int unaligned_dump_stack; 156extern int unaligned_dump_stack;