aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-09 22:01:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-09 22:01:47 -0500
commit67dd2f5a669f48e48ea1013fb80522adca8287f4 (patch)
treeeee4e7f15df90f899211cde0a669d661085de05d /arch/s390
parent5327b9b83a9c45a3fcbcda224a2b02d9eea9f6bb (diff)
parent42d61b9b415686d81eaa022b846737548876e51d (diff)
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (72 commits) [S390] 3215/3270 console: remove wrong comment [S390] dasd: remove BKL from extended error reporting code [S390] vmlogrdr: remove BKL [S390] vmur: remove BKL [S390] zcrypt: remove BKL [S390] 3270: remove BKL [S390] vmwatchdog: remove lock_kernel() from open() function [S390] monwriter: remove lock_kernel() from open() function [S390] monreader: remove lock_kernel() from open() function [S390] s390: remove unused nfsd #includes [S390] ftrace: build ftrace.o when CONFIG_FTRACE_SYSCALLS is set for s390 [S390] etr/stp: put correct per cpu variable [S390] tty3270: move keyboard compat ioctls [S390] sclp: improve servicability setting [S390] s390: use change recording override for kernel mapping [S390] MAINTAINERS: Add s390 drivers block [S390] use generic sockios.h header file [S390] use generic termbits.h header file [S390] smp: remove unused typedef and defines [S390] cmm: free pages on hibernate. ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/include/asm/atomic.h8
-rw-r--r--arch/s390/include/asm/ccwdev.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/setup.h17
-rw-r--r--arch/s390/include/asm/smp.h54
-rw-r--r--arch/s390/include/asm/sockios.h21
-rw-r--r--arch/s390/include/asm/termbits.h206
-rw-r--r--arch/s390/include/asm/todclk.h23
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/compat_linux.c6
-rw-r--r--arch/s390/kernel/compat_linux.h4
-rw-r--r--arch/s390/kernel/head64.S3
-rw-r--r--arch/s390/kernel/setup.c36
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/lib/uaccess_mvcos.c4
-rw-r--r--arch/s390/lib/uaccess_pt.c147
-rw-r--r--arch/s390/mm/cmm.c61
-rw-r--r--arch/s390/mm/fault.c378
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/mm/vmem.c11
27 files changed, 371 insertions, 658 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 16c673096a22..c80235206c01 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -220,23 +220,8 @@ config AUDIT_ARCH
220 bool 220 bool
221 default y 221 default y
222 222
223config S390_SWITCH_AMODE
224 bool "Switch kernel/user addressing modes"
225 help
226 This option allows to switch the addressing modes of kernel and user
227 space. The kernel parameter switch_amode=on will enable this feature,
228 default is disabled. Enabling this (via kernel parameter) on machines
229 earlier than IBM System z9-109 EC/BC will reduce system performance.
230
231 Note that this option will also be selected by selecting the execute
232 protection option below. Enabling the execute protection via the
233 noexec kernel parameter will also switch the addressing modes,
234 independent of the switch_amode kernel parameter.
235
236
237config S390_EXEC_PROTECT 223config S390_EXEC_PROTECT
238 bool "Data execute protection" 224 bool "Data execute protection"
239 select S390_SWITCH_AMODE
240 help 225 help
241 This option allows to enable a buffer overflow protection for user 226 This option allows to enable a buffer overflow protection for user
242 space programs and it also selects the addressing mode option above. 227 space programs and it also selects the addressing mode option above.
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index ab4464486b7a..f4e53c6708dc 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -185,7 +185,6 @@ CONFIG_HOTPLUG_CPU=y
185CONFIG_COMPAT=y 185CONFIG_COMPAT=y
186CONFIG_SYSVIPC_COMPAT=y 186CONFIG_SYSVIPC_COMPAT=y
187CONFIG_AUDIT_ARCH=y 187CONFIG_AUDIT_ARCH=y
188CONFIG_S390_SWITCH_AMODE=y
189CONFIG_S390_EXEC_PROTECT=y 188CONFIG_S390_EXEC_PROTECT=y
190 189
191# 190#
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index ae7c8f9f94a5..2a113d6a7dfd 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -21,7 +21,7 @@
21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
22 22
23#define __CS_LOOP(ptr, op_val, op_string) ({ \ 23#define __CS_LOOP(ptr, op_val, op_string) ({ \
24 typeof(ptr->counter) old_val, new_val; \ 24 int old_val, new_val; \
25 asm volatile( \ 25 asm volatile( \
26 " l %0,%2\n" \ 26 " l %0,%2\n" \
27 "0: lr %1,%0\n" \ 27 "0: lr %1,%0\n" \
@@ -38,7 +38,7 @@
38#else /* __GNUC__ */ 38#else /* __GNUC__ */
39 39
40#define __CS_LOOP(ptr, op_val, op_string) ({ \ 40#define __CS_LOOP(ptr, op_val, op_string) ({ \
41 typeof(ptr->counter) old_val, new_val; \ 41 int old_val, new_val; \
42 asm volatile( \ 42 asm volatile( \
43 " l %0,0(%3)\n" \ 43 " l %0,0(%3)\n" \
44 "0: lr %1,%0\n" \ 44 "0: lr %1,%0\n" \
@@ -143,7 +143,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
144 144
145#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 145#define __CSG_LOOP(ptr, op_val, op_string) ({ \
146 typeof(ptr->counter) old_val, new_val; \ 146 long long old_val, new_val; \
147 asm volatile( \ 147 asm volatile( \
148 " lg %0,%2\n" \ 148 " lg %0,%2\n" \
149 "0: lgr %1,%0\n" \ 149 "0: lgr %1,%0\n" \
@@ -160,7 +160,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
160#else /* __GNUC__ */ 160#else /* __GNUC__ */
161 161
162#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 162#define __CSG_LOOP(ptr, op_val, op_string) ({ \
163 typeof(ptr->counter) old_val, new_val; \ 163 long long old_val, new_val; \
164 asm volatile( \ 164 asm volatile( \
165 " lg %0,0(%3)\n" \ 165 " lg %0,0(%3)\n" \
166 "0: lgr %1,%0\n" \ 166 "0: lgr %1,%0\n" \
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 2a5419551176..f4bd346a52d3 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -142,6 +142,8 @@ struct ccw1;
142extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long); 142extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long);
143extern int ccw_device_set_options(struct ccw_device *, unsigned long); 143extern int ccw_device_set_options(struct ccw_device *, unsigned long);
144extern void ccw_device_clear_options(struct ccw_device *, unsigned long); 144extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
145int ccw_device_is_pathgroup(struct ccw_device *cdev);
146int ccw_device_is_multipath(struct ccw_device *cdev);
145 147
146/* Allow for i/o completion notification after primary interrupt status. */ 148/* Allow for i/o completion notification after primary interrupt status. */
147#define CCWDEV_EARLY_NOTIFICATION 0x0001 149#define CCWDEV_EARLY_NOTIFICATION 0x0001
@@ -151,6 +153,8 @@ extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
151#define CCWDEV_DO_PATHGROUP 0x0004 153#define CCWDEV_DO_PATHGROUP 0x0004
152/* Allow forced onlining of boxed devices. */ 154/* Allow forced onlining of boxed devices. */
153#define CCWDEV_ALLOW_FORCE 0x0008 155#define CCWDEV_ALLOW_FORCE 0x0008
156/* Try to use multipath mode. */
157#define CCWDEV_DO_MULTIPATH 0x0010
154 158
155extern int ccw_device_start(struct ccw_device *, struct ccw1 *, 159extern int ccw_device_start(struct ccw_device *, struct ccw1 *,
156 unsigned long, __u8, unsigned long); 160 unsigned long, __u8, unsigned long);
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index fc7edd6f41b6..976e273988c2 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -36,7 +36,7 @@ static inline int init_new_context(struct task_struct *tsk,
36 mm->context.has_pgste = 1; 36 mm->context.has_pgste = 1;
37 mm->context.alloc_pgste = 1; 37 mm->context.alloc_pgste = 1;
38 } else { 38 } else {
39 mm->context.noexec = s390_noexec; 39 mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
40 mm->context.has_pgste = 0; 40 mm->context.has_pgste = 0;
41 mm->context.alloc_pgste = 0; 41 mm->context.alloc_pgste = 0;
42 } 42 }
@@ -58,7 +58,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
58 pgd_t *pgd = mm->pgd; 58 pgd_t *pgd = mm->pgd;
59 59
60 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 60 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
61 if (switch_amode) { 61 if (user_mode != HOME_SPACE_MODE) {
62 /* Load primary space page table origin. */ 62 /* Load primary space page table origin. */
63 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd; 63 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
64 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd); 64 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index ddad5903341c..68940d0bad91 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -143,7 +143,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
143 spin_lock_init(&mm->context.list_lock); 143 spin_lock_init(&mm->context.list_lock);
144 INIT_LIST_HEAD(&mm->context.crst_list); 144 INIT_LIST_HEAD(&mm->context.crst_list);
145 INIT_LIST_HEAD(&mm->context.pgtable_list); 145 INIT_LIST_HEAD(&mm->context.pgtable_list);
146 return (pgd_t *) crst_table_alloc(mm, s390_noexec); 146 return (pgd_t *)
147 crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
147} 148}
148#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 149#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
149 150
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 60a7b1a1702f..e2fa79cf0614 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -169,12 +169,13 @@ extern unsigned long VMALLOC_START;
169 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) 169 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
170 * 170 *
171 * A 64 bit pagetable entry of S390 has following format: 171 * A 64 bit pagetable entry of S390 has following format:
172 * | PFRA |0IP0| OS | 172 * | PFRA |0IPC| OS |
173 * 0000000000111111111122222222223333333333444444444455555555556666 173 * 0000000000111111111122222222223333333333444444444455555555556666
174 * 0123456789012345678901234567890123456789012345678901234567890123 174 * 0123456789012345678901234567890123456789012345678901234567890123
175 * 175 *
176 * I Page-Invalid Bit: Page is not available for address-translation 176 * I Page-Invalid Bit: Page is not available for address-translation
177 * P Page-Protection Bit: Store access not possible for page 177 * P Page-Protection Bit: Store access not possible for page
178 * C Change-bit override: HW is not required to set change bit
178 * 179 *
179 * A 64 bit segmenttable entry of S390 has following format: 180 * A 64 bit segmenttable entry of S390 has following format:
180 * | P-table origin | TT 181 * | P-table origin | TT
@@ -218,6 +219,7 @@ extern unsigned long VMALLOC_START;
218 */ 219 */
219 220
220/* Hardware bits in the page table entry */ 221/* Hardware bits in the page table entry */
222#define _PAGE_CO 0x100 /* HW Change-bit override */
221#define _PAGE_RO 0x200 /* HW read-only bit */ 223#define _PAGE_RO 0x200 /* HW read-only bit */
222#define _PAGE_INVALID 0x400 /* HW invalid bit */ 224#define _PAGE_INVALID 0x400 /* HW invalid bit */
223 225
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index e37478e87286..52a779c337e8 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -49,17 +49,12 @@ extern unsigned long memory_end;
49 49
50void detect_memory_layout(struct mem_chunk chunk[]); 50void detect_memory_layout(struct mem_chunk chunk[]);
51 51
52#ifdef CONFIG_S390_SWITCH_AMODE 52#define PRIMARY_SPACE_MODE 0
53extern unsigned int switch_amode; 53#define ACCESS_REGISTER_MODE 1
54#else 54#define SECONDARY_SPACE_MODE 2
55#define switch_amode (0) 55#define HOME_SPACE_MODE 3
56#endif 56
57 57extern unsigned int user_mode;
58#ifdef CONFIG_S390_EXEC_PROTECT
59extern unsigned int s390_noexec;
60#else
61#define s390_noexec (0)
62#endif
63 58
64/* 59/*
65 * Machine features detected in head.S 60 * Machine features detected in head.S
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a868b272c257..2ab1141eeb50 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -1,57 +1,22 @@
1/* 1/*
2 * include/asm-s390/smp.h 2 * Copyright IBM Corp. 1999,2009
3 * 3 * Author(s): Denis Joseph Barrow,
4 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Heiko Carstens <heiko.carstens@de.ibm.com>,
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 */ 6 */
10#ifndef __ASM_SMP_H 7#ifndef __ASM_SMP_H
11#define __ASM_SMP_H 8#define __ASM_SMP_H
12 9
13#include <linux/threads.h> 10#ifdef CONFIG_SMP
14#include <linux/cpumask.h>
15#include <linux/bitops.h>
16 11
17#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
18
19#include <asm/lowcore.h>
20#include <asm/sigp.h>
21#include <asm/ptrace.h>
22#include <asm/system.h> 12#include <asm/system.h>
23 13#include <asm/sigp.h>
24/*
25 s390 specific smp.c headers
26 */
27typedef struct
28{
29 int intresting;
30 sigp_ccode ccode;
31 __u32 status;
32 __u16 cpu;
33} sigp_info;
34 14
35extern void machine_restart_smp(char *); 15extern void machine_restart_smp(char *);
36extern void machine_halt_smp(void); 16extern void machine_halt_smp(void);
37extern void machine_power_off_smp(void); 17extern void machine_power_off_smp(void);
38 18
39#define NO_PROC_ID 0xFF /* No processor magic marker */
40
41/*
42 * This magic constant controls our willingness to transfer
43 * a process across CPUs. Such a transfer incurs misses on the L1
44 * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
45 * gut feeling is this will vary by board in value. For a board
46 * with separate L2 cache it probably depends also on the RSS, and
47 * for a board with shared L2 cache it ought to decay fast as other
48 * processes are run.
49 */
50
51#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
52
53#define raw_smp_processor_id() (S390_lowcore.cpu_nr) 19#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
54#define cpu_logical_map(cpu) (cpu)
55 20
56extern int __cpu_disable (void); 21extern int __cpu_disable (void);
57extern void __cpu_die (unsigned int cpu); 22extern void __cpu_die (unsigned int cpu);
@@ -64,7 +29,9 @@ extern int smp_cpu_polarization[];
64extern void arch_send_call_function_single_ipi(int cpu); 29extern void arch_send_call_function_single_ipi(int cpu);
65extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 30extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
66 31
67#endif 32extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
33
34#endif /* CONFIG_SMP */
68 35
69#ifdef CONFIG_HOTPLUG_CPU 36#ifdef CONFIG_HOTPLUG_CPU
70extern int smp_rescan_cpus(void); 37extern int smp_rescan_cpus(void);
@@ -72,5 +39,4 @@ extern int smp_rescan_cpus(void);
72static inline int smp_rescan_cpus(void) { return 0; } 39static inline int smp_rescan_cpus(void) { return 0; }
73#endif 40#endif
74 41
75extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 42#endif /* __ASM_SMP_H */
76#endif
diff --git a/arch/s390/include/asm/sockios.h b/arch/s390/include/asm/sockios.h
index f4fc16c7da59..6f60eee73242 100644
--- a/arch/s390/include/asm/sockios.h
+++ b/arch/s390/include/asm/sockios.h
@@ -1,21 +1,6 @@
1/* 1#ifndef _ASM_S390_SOCKIOS_H
2 * include/asm-s390/sockios.h 2#define _ASM_S390_SOCKIOS_H
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/sockios.h"
7 */
8 3
9#ifndef __ARCH_S390_SOCKIOS__ 4#include <asm-generic/sockios.h>
10#define __ARCH_S390_SOCKIOS__
11
12/* Socket-level I/O control calls. */
13#define FIOSETOWN 0x8901
14#define SIOCSPGRP 0x8902
15#define FIOGETOWN 0x8903
16#define SIOCGPGRP 0x8904
17#define SIOCATMARK 0x8905
18#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
19#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
20 5
21#endif 6#endif
diff --git a/arch/s390/include/asm/termbits.h b/arch/s390/include/asm/termbits.h
index 58731853d529..71bf6ac6a2b9 100644
--- a/arch/s390/include/asm/termbits.h
+++ b/arch/s390/include/asm/termbits.h
@@ -1,206 +1,6 @@
1/* 1#ifndef _ASM_S390_TERMBITS_H
2 * include/asm-s390/termbits.h 2#define _ASM_S390_TERMBITS_H
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/termbits.h"
7 */
8 3
9#ifndef __ARCH_S390_TERMBITS_H__ 4#include <asm-generic/termbits.h>
10#define __ARCH_S390_TERMBITS_H__
11
12#include <linux/posix_types.h>
13
14typedef unsigned char cc_t;
15typedef unsigned int speed_t;
16typedef unsigned int tcflag_t;
17
18#define NCCS 19
19struct termios {
20 tcflag_t c_iflag; /* input mode flags */
21 tcflag_t c_oflag; /* output mode flags */
22 tcflag_t c_cflag; /* control mode flags */
23 tcflag_t c_lflag; /* local mode flags */
24 cc_t c_line; /* line discipline */
25 cc_t c_cc[NCCS]; /* control characters */
26};
27
28struct termios2 {
29 tcflag_t c_iflag; /* input mode flags */
30 tcflag_t c_oflag; /* output mode flags */
31 tcflag_t c_cflag; /* control mode flags */
32 tcflag_t c_lflag; /* local mode flags */
33 cc_t c_line; /* line discipline */
34 cc_t c_cc[NCCS]; /* control characters */
35 speed_t c_ispeed; /* input speed */
36 speed_t c_ospeed; /* output speed */
37};
38
39struct ktermios {
40 tcflag_t c_iflag; /* input mode flags */
41 tcflag_t c_oflag; /* output mode flags */
42 tcflag_t c_cflag; /* control mode flags */
43 tcflag_t c_lflag; /* local mode flags */
44 cc_t c_line; /* line discipline */
45 cc_t c_cc[NCCS]; /* control characters */
46 speed_t c_ispeed; /* input speed */
47 speed_t c_ospeed; /* output speed */
48};
49
50/* c_cc characters */
51#define VINTR 0
52#define VQUIT 1
53#define VERASE 2
54#define VKILL 3
55#define VEOF 4
56#define VTIME 5
57#define VMIN 6
58#define VSWTC 7
59#define VSTART 8
60#define VSTOP 9
61#define VSUSP 10
62#define VEOL 11
63#define VREPRINT 12
64#define VDISCARD 13
65#define VWERASE 14
66#define VLNEXT 15
67#define VEOL2 16
68
69/* c_iflag bits */
70#define IGNBRK 0000001
71#define BRKINT 0000002
72#define IGNPAR 0000004
73#define PARMRK 0000010
74#define INPCK 0000020
75#define ISTRIP 0000040
76#define INLCR 0000100
77#define IGNCR 0000200
78#define ICRNL 0000400
79#define IUCLC 0001000
80#define IXON 0002000
81#define IXANY 0004000
82#define IXOFF 0010000
83#define IMAXBEL 0020000
84#define IUTF8 0040000
85
86/* c_oflag bits */
87#define OPOST 0000001
88#define OLCUC 0000002
89#define ONLCR 0000004
90#define OCRNL 0000010
91#define ONOCR 0000020
92#define ONLRET 0000040
93#define OFILL 0000100
94#define OFDEL 0000200
95#define NLDLY 0000400
96#define NL0 0000000
97#define NL1 0000400
98#define CRDLY 0003000
99#define CR0 0000000
100#define CR1 0001000
101#define CR2 0002000
102#define CR3 0003000
103#define TABDLY 0014000
104#define TAB0 0000000
105#define TAB1 0004000
106#define TAB2 0010000
107#define TAB3 0014000
108#define XTABS 0014000
109#define BSDLY 0020000
110#define BS0 0000000
111#define BS1 0020000
112#define VTDLY 0040000
113#define VT0 0000000
114#define VT1 0040000
115#define FFDLY 0100000
116#define FF0 0000000
117#define FF1 0100000
118
119/* c_cflag bit meaning */
120#define CBAUD 0010017
121#define B0 0000000 /* hang up */
122#define B50 0000001
123#define B75 0000002
124#define B110 0000003
125#define B134 0000004
126#define B150 0000005
127#define B200 0000006
128#define B300 0000007
129#define B600 0000010
130#define B1200 0000011
131#define B1800 0000012
132#define B2400 0000013
133#define B4800 0000014
134#define B9600 0000015
135#define B19200 0000016
136#define B38400 0000017
137#define EXTA B19200
138#define EXTB B38400
139#define CSIZE 0000060
140#define CS5 0000000
141#define CS6 0000020
142#define CS7 0000040
143#define CS8 0000060
144#define CSTOPB 0000100
145#define CREAD 0000200
146#define PARENB 0000400
147#define PARODD 0001000
148#define HUPCL 0002000
149#define CLOCAL 0004000
150#define CBAUDEX 0010000
151#define BOTHER 0010000
152#define B57600 0010001
153#define B115200 0010002
154#define B230400 0010003
155#define B460800 0010004
156#define B500000 0010005
157#define B576000 0010006
158#define B921600 0010007
159#define B1000000 0010010
160#define B1152000 0010011
161#define B1500000 0010012
162#define B2000000 0010013
163#define B2500000 0010014
164#define B3000000 0010015
165#define B3500000 0010016
166#define B4000000 0010017
167#define CIBAUD 002003600000 /* input baud rate */
168#define CMSPAR 010000000000 /* mark or space (stick) parity */
169#define CRTSCTS 020000000000 /* flow control */
170
171#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
172
173/* c_lflag bits */
174#define ISIG 0000001
175#define ICANON 0000002
176#define XCASE 0000004
177#define ECHO 0000010
178#define ECHOE 0000020
179#define ECHOK 0000040
180#define ECHONL 0000100
181#define NOFLSH 0000200
182#define TOSTOP 0000400
183#define ECHOCTL 0001000
184#define ECHOPRT 0002000
185#define ECHOKE 0004000
186#define FLUSHO 0010000
187#define PENDIN 0040000
188#define IEXTEN 0100000
189
190/* tcflow() and TCXONC use these */
191#define TCOOFF 0
192#define TCOON 1
193#define TCIOFF 2
194#define TCION 3
195
196/* tcflush() and TCFLSH use these */
197#define TCIFLUSH 0
198#define TCOFLUSH 1
199#define TCIOFLUSH 2
200
201/* tcsetattr uses these */
202#define TCSANOW 0
203#define TCSADRAIN 1
204#define TCSAFLUSH 2
205 5
206#endif 6#endif
diff --git a/arch/s390/include/asm/todclk.h b/arch/s390/include/asm/todclk.h
deleted file mode 100644
index c7f62055488a..000000000000
--- a/arch/s390/include/asm/todclk.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * File...........: linux/include/asm/todclk.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 *
7 * History of changes (starts July 2000)
8 */
9
10#ifndef __ASM_TODCLK_H
11#define __ASM_TODCLK_H
12
13#ifdef __KERNEL__
14
15#define TOD_uSEC (0x1000ULL)
16#define TOD_mSEC (1000 * TOD_uSEC)
17#define TOD_SEC (1000 * TOD_mSEC)
18#define TOD_MIN (60 * TOD_SEC)
19#define TOD_HOUR (60 * TOD_MIN)
20
21#endif
22
23#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 8377e91533d2..cbf0a8745bf4 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -93,6 +93,8 @@ extern struct uaccess_ops uaccess_mvcos;
93extern struct uaccess_ops uaccess_mvcos_switch; 93extern struct uaccess_ops uaccess_mvcos_switch;
94extern struct uaccess_ops uaccess_pt; 94extern struct uaccess_ops uaccess_pt;
95 95
96extern int __handle_fault(unsigned long, unsigned long, int);
97
96static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 98static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
97{ 99{
98 size = uaccess.copy_to_user_small(size, ptr, x); 100 size = uaccess.copy_to_user_small(size, ptr, x);
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c7be8e10b87e..683f6381cc59 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
44obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) 44obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
45obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 45obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
46obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 46obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
47obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
47 48
48# Kexec part 49# Kexec part
49S390_KEXEC_OBJS := machine_kexec.o crash.o 50S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index fda1a8123f9b..25c31d681402 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -31,14 +31,8 @@
31#include <linux/shm.h> 31#include <linux/shm.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/uio.h> 33#include <linux/uio.h>
34#include <linux/nfs_fs.h>
35#include <linux/quota.h> 34#include <linux/quota.h>
36#include <linux/module.h> 35#include <linux/module.h>
37#include <linux/sunrpc/svc.h>
38#include <linux/nfsd/nfsd.h>
39#include <linux/nfsd/cache.h>
40#include <linux/nfsd/xdr.h>
41#include <linux/nfsd/syscall.h>
42#include <linux/poll.h> 36#include <linux/poll.h>
43#include <linux/personality.h> 37#include <linux/personality.h>
44#include <linux/stat.h> 38#include <linux/stat.h>
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 45e9092b3aad..cb97afc85c94 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -4,10 +4,6 @@
4#include <linux/compat.h> 4#include <linux/compat.h>
5#include <linux/socket.h> 5#include <linux/socket.h>
6#include <linux/syscalls.h> 6#include <linux/syscalls.h>
7#include <linux/nfs_fs.h>
8#include <linux/sunrpc/svc.h>
9#include <linux/nfsd/nfsd.h>
10#include <linux/nfsd/export.h>
11 7
12/* Macro that masks the high order bit of an 32 bit pointer and converts it*/ 8/* Macro that masks the high order bit of an 32 bit pointer and converts it*/
13/* to a 64 bit pointer */ 9/* to a 64 bit pointer */
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6a250808092b..d984a2a380c3 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -83,6 +83,8 @@ startup_continue:
83 slr %r0,%r0 # set cpuid to zero 83 slr %r0,%r0 # set cpuid to zero
84 sigp %r1,%r0,0x12 # switch to esame mode 84 sigp %r1,%r0,0x12 # switch to esame mode
85 sam64 # switch to 64 bit mode 85 sam64 # switch to 64 bit mode
86 llgfr %r13,%r13 # clear high-order half of base reg
87 lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
86 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 88 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
87 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 89 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
88 # move IPL device to lowcore 90 # move IPL device to lowcore
@@ -127,6 +129,7 @@ startup_continue:
127.L4malign:.quad 0xffffffffffc00000 129.L4malign:.quad 0xffffffffffc00000
128.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 130.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
129.Lnop: .long 0x07000700 131.Lnop: .long 0x07000700
132.Lzero64:.fill 16,4,0x0
130#ifdef CONFIG_ZFCPDUMP 133#ifdef CONFIG_ZFCPDUMP
131.Lcurrent_cpu: 134.Lcurrent_cpu:
132 .long 0x0 135 .long 0x0
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 061479ff029f..0663287fa1b3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,9 +305,8 @@ static int __init early_parse_mem(char *p)
305} 305}
306early_param("mem", early_parse_mem); 306early_param("mem", early_parse_mem);
307 307
308#ifdef CONFIG_S390_SWITCH_AMODE 308unsigned int user_mode = HOME_SPACE_MODE;
309unsigned int switch_amode = 0; 309EXPORT_SYMBOL_GPL(user_mode);
310EXPORT_SYMBOL_GPL(switch_amode);
311 310
312static int set_amode_and_uaccess(unsigned long user_amode, 311static int set_amode_and_uaccess(unsigned long user_amode,
313 unsigned long user32_amode) 312 unsigned long user32_amode)
@@ -340,23 +339,29 @@ static int set_amode_and_uaccess(unsigned long user_amode,
340 */ 339 */
341static int __init early_parse_switch_amode(char *p) 340static int __init early_parse_switch_amode(char *p)
342{ 341{
343 switch_amode = 1; 342 if (user_mode != SECONDARY_SPACE_MODE)
343 user_mode = PRIMARY_SPACE_MODE;
344 return 0; 344 return 0;
345} 345}
346early_param("switch_amode", early_parse_switch_amode); 346early_param("switch_amode", early_parse_switch_amode);
347 347
348#else /* CONFIG_S390_SWITCH_AMODE */ 348static int __init early_parse_user_mode(char *p)
349static inline int set_amode_and_uaccess(unsigned long user_amode,
350 unsigned long user32_amode)
351{ 349{
350 if (p && strcmp(p, "primary") == 0)
351 user_mode = PRIMARY_SPACE_MODE;
352#ifdef CONFIG_S390_EXEC_PROTECT
353 else if (p && strcmp(p, "secondary") == 0)
354 user_mode = SECONDARY_SPACE_MODE;
355#endif
356 else if (!p || strcmp(p, "home") == 0)
357 user_mode = HOME_SPACE_MODE;
358 else
359 return 1;
352 return 0; 360 return 0;
353} 361}
354#endif /* CONFIG_S390_SWITCH_AMODE */ 362early_param("user_mode", early_parse_user_mode);
355 363
356#ifdef CONFIG_S390_EXEC_PROTECT 364#ifdef CONFIG_S390_EXEC_PROTECT
357unsigned int s390_noexec = 0;
358EXPORT_SYMBOL_GPL(s390_noexec);
359
360/* 365/*
361 * Enable execute protection? 366 * Enable execute protection?
362 */ 367 */
@@ -364,8 +369,7 @@ static int __init early_parse_noexec(char *p)
364{ 369{
365 if (!strncmp(p, "off", 3)) 370 if (!strncmp(p, "off", 3))
366 return 0; 371 return 0;
367 switch_amode = 1; 372 user_mode = SECONDARY_SPACE_MODE;
368 s390_noexec = 1;
369 return 0; 373 return 0;
370} 374}
371early_param("noexec", early_parse_noexec); 375early_param("noexec", early_parse_noexec);
@@ -373,7 +377,7 @@ early_param("noexec", early_parse_noexec);
373 377
374static void setup_addressing_mode(void) 378static void setup_addressing_mode(void)
375{ 379{
376 if (s390_noexec) { 380 if (user_mode == SECONDARY_SPACE_MODE) {
377 if (set_amode_and_uaccess(PSW_ASC_SECONDARY, 381 if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
378 PSW32_ASC_SECONDARY)) 382 PSW32_ASC_SECONDARY))
379 pr_info("Execute protection active, " 383 pr_info("Execute protection active, "
@@ -381,7 +385,7 @@ static void setup_addressing_mode(void)
381 else 385 else
382 pr_info("Execute protection active, " 386 pr_info("Execute protection active, "
383 "mvcos not available\n"); 387 "mvcos not available\n");
384 } else if (switch_amode) { 388 } else if (user_mode == PRIMARY_SPACE_MODE) {
385 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) 389 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
386 pr_info("Address spaces switched, " 390 pr_info("Address spaces switched, "
387 "mvcos available\n"); 391 "mvcos available\n");
@@ -411,7 +415,7 @@ setup_lowcore(void)
411 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 415 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
412 lc->restart_psw.addr = 416 lc->restart_psw.addr =
413 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 417 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
414 if (switch_amode) 418 if (user_mode != HOME_SPACE_MODE)
415 lc->restart_psw.mask |= PSW_ASC_HOME; 419 lc->restart_psw.mask |= PSW_ASC_HOME;
416 lc->external_new_psw.mask = psw_kernel_bits; 420 lc->external_new_psw.mask = psw_kernel_bits;
417 lc->external_new_psw.addr = 421 lc->external_new_psw.addr =
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 68e1ecf5ebab..65065ac48ed3 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -335,7 +335,7 @@ int get_sync_clock(unsigned long long *clock)
335 sw0 = atomic_read(sw_ptr); 335 sw0 = atomic_read(sw_ptr);
336 *clock = get_clock(); 336 *clock = get_clock();
337 sw1 = atomic_read(sw_ptr); 337 sw1 = atomic_read(sw_ptr);
338 put_cpu_var(clock_sync_sync); 338 put_cpu_var(clock_sync_word);
339 if (sw0 == sw1 && (sw0 & 0x80000000U)) 339 if (sw0 == sw1 && (sw0 & 0x80000000U))
340 /* Success: time is in sync. */ 340 /* Success: time is in sync. */
341 return 0; 341 return 0;
@@ -385,7 +385,7 @@ static inline int check_sync_clock(void)
385 385
386 sw_ptr = &get_cpu_var(clock_sync_word); 386 sw_ptr = &get_cpu_var(clock_sync_word);
387 rc = (atomic_read(sw_ptr) & 0x80000000U) != 0; 387 rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
388 put_cpu_var(clock_sync_sync); 388 put_cpu_var(clock_sync_word);
389 return rc; 389 return rc;
390} 390}
391 391
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index adfb32aa6d59..5f99e66c51c3 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -86,7 +86,8 @@ static void vdso_init_data(struct vdso_data *vd)
86 unsigned int facility_list; 86 unsigned int facility_list;
87 87
88 facility_list = stfl(); 88 facility_list = stfl();
89 vd->ectg_available = switch_amode && (facility_list & 1); 89 vd->ectg_available =
90 user_mode != HOME_SPACE_MODE && (facility_list & 1);
90} 91}
91 92
92#ifdef CONFIG_64BIT 93#ifdef CONFIG_64BIT
@@ -114,7 +115,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
114 115
115 lowcore->vdso_per_cpu_data = __LC_PASTE; 116 lowcore->vdso_per_cpu_data = __LC_PASTE;
116 117
117 if (!switch_amode || !vdso_enabled) 118 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
118 return 0; 119 return 0;
119 120
120 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 121 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -160,7 +161,7 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
160 unsigned long segment_table, page_table, page_frame; 161 unsigned long segment_table, page_table, page_frame;
161 u32 *psal, *aste; 162 u32 *psal, *aste;
162 163
163 if (!switch_amode || !vdso_enabled) 164 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
164 return; 165 return;
165 166
166 psal = (u32 *)(addr_t) lowcore->paste[4]; 167 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -184,7 +185,7 @@ static void __vdso_init_cr5(void *dummy)
184 185
185static void vdso_init_cr5(void) 186static void vdso_init_cr5(void)
186{ 187{
187 if (switch_amode && vdso_enabled) 188 if (user_mode != HOME_SPACE_MODE && vdso_enabled)
188 on_each_cpu(__vdso_init_cr5, NULL, 1); 189 on_each_cpu(__vdso_init_cr5, NULL, 1);
189} 190}
190#endif /* CONFIG_64BIT */ 191#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index bf164fc21864..6ee55ae84ce2 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -20,7 +20,6 @@ config KVM
20 depends on HAVE_KVM && EXPERIMENTAL 20 depends on HAVE_KVM && EXPERIMENTAL
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select S390_SWITCH_AMODE
24 ---help--- 23 ---help---
25 Support hosting paravirtualized guest machines using the SIE 24 Support hosting paravirtualized guest machines using the SIE
26 virtualization capability on the mainframe. This should work 25 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 58da3f461214..60455f104ea3 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -162,7 +162,6 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165#ifdef CONFIG_S390_SWITCH_AMODE
166static size_t strnlen_user_mvcos(size_t count, const char __user *src) 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
167{ 166{
168 char buf[256]; 167 char buf[256];
@@ -200,7 +199,6 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
200 } while ((len_str == len) && (done < count)); 199 } while ((len_str == len) && (done < count));
201 return done; 200 return done;
202} 201}
203#endif /* CONFIG_S390_SWITCH_AMODE */
204 202
205struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
206 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -215,7 +213,6 @@ struct uaccess_ops uaccess_mvcos = {
215 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
216}; 214};
217 215
218#ifdef CONFIG_S390_SWITCH_AMODE
219struct uaccess_ops uaccess_mvcos_switch = { 216struct uaccess_ops uaccess_mvcos_switch = {
220 .copy_from_user = copy_from_user_mvcos, 217 .copy_from_user = copy_from_user_mvcos,
221 .copy_from_user_small = copy_from_user_mvcos, 218 .copy_from_user_small = copy_from_user_mvcos,
@@ -228,4 +225,3 @@ struct uaccess_ops uaccess_mvcos_switch = {
228 .futex_atomic_op = futex_atomic_op_pt, 225 .futex_atomic_op = futex_atomic_op_pt,
229 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, 226 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
230}; 227};
231#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index cb5d59eab0ee..404f2de296dc 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -23,86 +23,21 @@ static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
23 23
24 pgd = pgd_offset(mm, addr); 24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
26 return NULL; 26 return (pte_t *) 0x3a;
27 27
28 pud = pud_offset(pgd, addr); 28 pud = pud_offset(pgd, addr);
29 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 29 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
30 return NULL; 30 return (pte_t *) 0x3b;
31 31
32 pmd = pmd_offset(pud, addr); 32 pmd = pmd_offset(pud, addr);
33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
34 return NULL; 34 return (pte_t *) 0x10;
35 35
36 return pte_offset_map(pmd, addr); 36 return pte_offset_map(pmd, addr);
37} 37}
38 38
39static int __handle_fault(struct mm_struct *mm, unsigned long address, 39static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
40 int write_access) 40 size_t n, int write_user)
41{
42 struct vm_area_struct *vma;
43 int ret = -EFAULT;
44 int fault;
45
46 if (in_atomic())
47 return ret;
48 down_read(&mm->mmap_sem);
49 vma = find_vma(mm, address);
50 if (unlikely(!vma))
51 goto out;
52 if (unlikely(vma->vm_start > address)) {
53 if (!(vma->vm_flags & VM_GROWSDOWN))
54 goto out;
55 if (expand_stack(vma, address))
56 goto out;
57 }
58
59 if (!write_access) {
60 /* page not present, check vm flags */
61 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
62 goto out;
63 } else {
64 if (!(vma->vm_flags & VM_WRITE))
65 goto out;
66 }
67
68survive:
69 fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM)
72 goto out_of_memory;
73 else if (fault & VM_FAULT_SIGBUS)
74 goto out_sigbus;
75 BUG();
76 }
77 if (fault & VM_FAULT_MAJOR)
78 current->maj_flt++;
79 else
80 current->min_flt++;
81 ret = 0;
82out:
83 up_read(&mm->mmap_sem);
84 return ret;
85
86out_of_memory:
87 up_read(&mm->mmap_sem);
88 if (is_global_init(current)) {
89 yield();
90 down_read(&mm->mmap_sem);
91 goto survive;
92 }
93 printk("VM: killing process %s\n", current->comm);
94 return ret;
95
96out_sigbus:
97 up_read(&mm->mmap_sem);
98 current->thread.prot_addr = address;
99 current->thread.trap_no = 0x11;
100 force_sig(SIGBUS, current);
101 return ret;
102}
103
104static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
105 size_t n, int write_user)
106{ 41{
107 struct mm_struct *mm = current->mm; 42 struct mm_struct *mm = current->mm;
108 unsigned long offset, pfn, done, size; 43 unsigned long offset, pfn, done, size;
@@ -114,12 +49,17 @@ retry:
114 spin_lock(&mm->page_table_lock); 49 spin_lock(&mm->page_table_lock);
115 do { 50 do {
116 pte = follow_table(mm, uaddr); 51 pte = follow_table(mm, uaddr);
117 if (!pte || !pte_present(*pte) || 52 if ((unsigned long) pte < 0x1000)
118 (write_user && !pte_write(*pte)))
119 goto fault; 53 goto fault;
54 if (!pte_present(*pte)) {
55 pte = (pte_t *) 0x11;
56 goto fault;
57 } else if (write_user && !pte_write(*pte)) {
58 pte = (pte_t *) 0x04;
59 goto fault;
60 }
120 61
121 pfn = pte_pfn(*pte); 62 pfn = pte_pfn(*pte);
122
123 offset = uaddr & (PAGE_SIZE - 1); 63 offset = uaddr & (PAGE_SIZE - 1);
124 size = min(n - done, PAGE_SIZE - offset); 64 size = min(n - done, PAGE_SIZE - offset);
125 if (write_user) { 65 if (write_user) {
@@ -137,7 +77,7 @@ retry:
137 return n - done; 77 return n - done;
138fault: 78fault:
139 spin_unlock(&mm->page_table_lock); 79 spin_unlock(&mm->page_table_lock);
140 if (__handle_fault(mm, uaddr, write_user)) 80 if (__handle_fault(uaddr, (unsigned long) pte, write_user))
141 return n - done; 81 return n - done;
142 goto retry; 82 goto retry;
143} 83}
@@ -146,30 +86,31 @@ fault:
146 * Do DAT for user address by page table walk, return kernel address. 86 * Do DAT for user address by page table walk, return kernel address.
147 * This function needs to be called with current->mm->page_table_lock held. 87 * This function needs to be called with current->mm->page_table_lock held.
148 */ 88 */
149static unsigned long __dat_user_addr(unsigned long uaddr) 89static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
150{ 90{
151 struct mm_struct *mm = current->mm; 91 struct mm_struct *mm = current->mm;
152 unsigned long pfn, ret; 92 unsigned long pfn;
153 pte_t *pte; 93 pte_t *pte;
154 int rc; 94 int rc;
155 95
156 ret = 0;
157retry: 96retry:
158 pte = follow_table(mm, uaddr); 97 pte = follow_table(mm, uaddr);
159 if (!pte || !pte_present(*pte)) 98 if ((unsigned long) pte < 0x1000)
160 goto fault; 99 goto fault;
100 if (!pte_present(*pte)) {
101 pte = (pte_t *) 0x11;
102 goto fault;
103 }
161 104
162 pfn = pte_pfn(*pte); 105 pfn = pte_pfn(*pte);
163 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); 106 return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
164out:
165 return ret;
166fault: 107fault:
167 spin_unlock(&mm->page_table_lock); 108 spin_unlock(&mm->page_table_lock);
168 rc = __handle_fault(mm, uaddr, 0); 109 rc = __handle_fault(uaddr, (unsigned long) pte, 0);
169 spin_lock(&mm->page_table_lock); 110 spin_lock(&mm->page_table_lock);
170 if (rc) 111 if (!rc)
171 goto out; 112 goto retry;
172 goto retry; 113 return 0;
173} 114}
174 115
175size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 116size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
@@ -234,8 +175,12 @@ retry:
234 spin_lock(&mm->page_table_lock); 175 spin_lock(&mm->page_table_lock);
235 do { 176 do {
236 pte = follow_table(mm, uaddr); 177 pte = follow_table(mm, uaddr);
237 if (!pte || !pte_present(*pte)) 178 if ((unsigned long) pte < 0x1000)
179 goto fault;
180 if (!pte_present(*pte)) {
181 pte = (pte_t *) 0x11;
238 goto fault; 182 goto fault;
183 }
239 184
240 pfn = pte_pfn(*pte); 185 pfn = pte_pfn(*pte);
241 offset = uaddr & (PAGE_SIZE-1); 186 offset = uaddr & (PAGE_SIZE-1);
@@ -249,9 +194,8 @@ retry:
249 return done + 1; 194 return done + 1;
250fault: 195fault:
251 spin_unlock(&mm->page_table_lock); 196 spin_unlock(&mm->page_table_lock);
252 if (__handle_fault(mm, uaddr, 0)) { 197 if (__handle_fault(uaddr, (unsigned long) pte, 0))
253 return 0; 198 return 0;
254 }
255 goto retry; 199 goto retry;
256} 200}
257 201
@@ -284,7 +228,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
284{ 228{
285 struct mm_struct *mm = current->mm; 229 struct mm_struct *mm = current->mm;
286 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, 230 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
287 uaddr, done, size; 231 uaddr, done, size, error_code;
288 unsigned long uaddr_from = (unsigned long) from; 232 unsigned long uaddr_from = (unsigned long) from;
289 unsigned long uaddr_to = (unsigned long) to; 233 unsigned long uaddr_to = (unsigned long) to;
290 pte_t *pte_from, *pte_to; 234 pte_t *pte_from, *pte_to;
@@ -298,17 +242,28 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
298retry: 242retry:
299 spin_lock(&mm->page_table_lock); 243 spin_lock(&mm->page_table_lock);
300 do { 244 do {
245 write_user = 0;
246 uaddr = uaddr_from;
301 pte_from = follow_table(mm, uaddr_from); 247 pte_from = follow_table(mm, uaddr_from);
302 if (!pte_from || !pte_present(*pte_from)) { 248 error_code = (unsigned long) pte_from;
303 uaddr = uaddr_from; 249 if (error_code < 0x1000)
304 write_user = 0; 250 goto fault;
251 if (!pte_present(*pte_from)) {
252 error_code = 0x11;
305 goto fault; 253 goto fault;
306 } 254 }
307 255
256 write_user = 1;
257 uaddr = uaddr_to;
308 pte_to = follow_table(mm, uaddr_to); 258 pte_to = follow_table(mm, uaddr_to);
309 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { 259 error_code = (unsigned long) pte_to;
310 uaddr = uaddr_to; 260 if (error_code < 0x1000)
311 write_user = 1; 261 goto fault;
262 if (!pte_present(*pte_to)) {
263 error_code = 0x11;
264 goto fault;
265 } else if (!pte_write(*pte_to)) {
266 error_code = 0x04;
312 goto fault; 267 goto fault;
313 } 268 }
314 269
@@ -329,7 +284,7 @@ retry:
329 return n - done; 284 return n - done;
330fault: 285fault:
331 spin_unlock(&mm->page_table_lock); 286 spin_unlock(&mm->page_table_lock);
332 if (__handle_fault(mm, uaddr, write_user)) 287 if (__handle_fault(uaddr, error_code, write_user))
333 return n - done; 288 return n - done;
334 goto retry; 289 goto retry;
335} 290}
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index ff58779bf7e9..76a3637b88e0 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -18,6 +18,7 @@
18#include <linux/swap.h> 18#include <linux/swap.h>
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20#include <linux/oom.h> 20#include <linux/oom.h>
21#include <linux/suspend.h>
21 22
22#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
@@ -44,6 +45,7 @@ static volatile long cmm_pages_target;
44static volatile long cmm_timed_pages_target; 45static volatile long cmm_timed_pages_target;
45static long cmm_timeout_pages; 46static long cmm_timeout_pages;
46static long cmm_timeout_seconds; 47static long cmm_timeout_seconds;
48static int cmm_suspended;
47 49
48static struct cmm_page_array *cmm_page_list; 50static struct cmm_page_array *cmm_page_list;
49static struct cmm_page_array *cmm_timed_page_list; 51static struct cmm_page_array *cmm_timed_page_list;
@@ -147,9 +149,9 @@ cmm_thread(void *dummy)
147 149
148 while (1) { 150 while (1) {
149 rc = wait_event_interruptible(cmm_thread_wait, 151 rc = wait_event_interruptible(cmm_thread_wait,
150 (cmm_pages != cmm_pages_target || 152 (!cmm_suspended && (cmm_pages != cmm_pages_target ||
151 cmm_timed_pages != cmm_timed_pages_target || 153 cmm_timed_pages != cmm_timed_pages_target)) ||
152 kthread_should_stop())); 154 kthread_should_stop());
153 if (kthread_should_stop() || rc == -ERESTARTSYS) { 155 if (kthread_should_stop() || rc == -ERESTARTSYS) {
154 cmm_pages_target = cmm_pages; 156 cmm_pages_target = cmm_pages;
155 cmm_timed_pages_target = cmm_timed_pages; 157 cmm_timed_pages_target = cmm_timed_pages;
@@ -410,6 +412,38 @@ cmm_smsg_target(char *from, char *msg)
410 412
411static struct ctl_table_header *cmm_sysctl_header; 413static struct ctl_table_header *cmm_sysctl_header;
412 414
415static int cmm_suspend(void)
416{
417 cmm_suspended = 1;
418 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
419 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
420 return 0;
421}
422
423static int cmm_resume(void)
424{
425 cmm_suspended = 0;
426 cmm_kick_thread();
427 return 0;
428}
429
430static int cmm_power_event(struct notifier_block *this,
431 unsigned long event, void *ptr)
432{
433 switch (event) {
434 case PM_POST_HIBERNATION:
435 return cmm_resume();
436 case PM_HIBERNATION_PREPARE:
437 return cmm_suspend();
438 default:
439 return NOTIFY_DONE;
440 }
441}
442
443static struct notifier_block cmm_power_notifier = {
444 .notifier_call = cmm_power_event,
445};
446
413static int 447static int
414cmm_init (void) 448cmm_init (void)
415{ 449{
@@ -418,7 +452,7 @@ cmm_init (void)
418#ifdef CONFIG_CMM_PROC 452#ifdef CONFIG_CMM_PROC
419 cmm_sysctl_header = register_sysctl_table(cmm_dir_table); 453 cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
420 if (!cmm_sysctl_header) 454 if (!cmm_sysctl_header)
421 goto out; 455 goto out_sysctl;
422#endif 456#endif
423#ifdef CONFIG_CMM_IUCV 457#ifdef CONFIG_CMM_IUCV
424 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); 458 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
@@ -428,17 +462,21 @@ cmm_init (void)
428 rc = register_oom_notifier(&cmm_oom_nb); 462 rc = register_oom_notifier(&cmm_oom_nb);
429 if (rc < 0) 463 if (rc < 0)
430 goto out_oom_notify; 464 goto out_oom_notify;
465 rc = register_pm_notifier(&cmm_power_notifier);
466 if (rc)
467 goto out_pm;
431 init_waitqueue_head(&cmm_thread_wait); 468 init_waitqueue_head(&cmm_thread_wait);
432 init_timer(&cmm_timer); 469 init_timer(&cmm_timer);
433 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 470 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
434 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 471 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
435 if (!rc) 472 if (rc)
436 goto out; 473 goto out_kthread;
437 /* 474 return 0;
438 * kthread_create failed. undo all the stuff from above again.
439 */
440 unregister_oom_notifier(&cmm_oom_nb);
441 475
476out_kthread:
477 unregister_pm_notifier(&cmm_power_notifier);
478out_pm:
479 unregister_oom_notifier(&cmm_oom_nb);
442out_oom_notify: 480out_oom_notify:
443#ifdef CONFIG_CMM_IUCV 481#ifdef CONFIG_CMM_IUCV
444 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 482 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
@@ -446,8 +484,8 @@ out_smsg:
446#endif 484#endif
447#ifdef CONFIG_CMM_PROC 485#ifdef CONFIG_CMM_PROC
448 unregister_sysctl_table(cmm_sysctl_header); 486 unregister_sysctl_table(cmm_sysctl_header);
487out_sysctl:
449#endif 488#endif
450out:
451 return rc; 489 return rc;
452} 490}
453 491
@@ -455,6 +493,7 @@ static void
455cmm_exit(void) 493cmm_exit(void)
456{ 494{
457 kthread_stop(cmm_thread_ptr); 495 kthread_stop(cmm_thread_ptr);
496 unregister_pm_notifier(&cmm_power_notifier);
458 unregister_oom_notifier(&cmm_oom_nb); 497 unregister_oom_notifier(&cmm_oom_nb);
459 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 498 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
460 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 499 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6d507462967a..fc102e70d9c2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,16 +34,15 @@
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/s390_ext.h> 35#include <asm/s390_ext.h>
36#include <asm/mmu_context.h> 36#include <asm/mmu_context.h>
37#include <asm/compat.h>
37#include "../kernel/entry.h" 38#include "../kernel/entry.h"
38 39
39#ifndef CONFIG_64BIT 40#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000 41#define __FAIL_ADDR_MASK 0x7ffff000
41#define __FIXUP_MASK 0x7fffffff
42#define __SUBCODE_MASK 0x0200 42#define __SUBCODE_MASK 0x0200
43#define __PF_RES_FIELD 0ULL 43#define __PF_RES_FIELD 0ULL
44#else /* CONFIG_64BIT */ 44#else /* CONFIG_64BIT */
45#define __FAIL_ADDR_MASK -4096L 45#define __FAIL_ADDR_MASK -4096L
46#define __FIXUP_MASK ~0L
47#define __SUBCODE_MASK 0x0600 46#define __SUBCODE_MASK 0x0600
48#define __PF_RES_FIELD 0x8000000000000000ULL 47#define __PF_RES_FIELD 0x8000000000000000ULL
49#endif /* CONFIG_64BIT */ 48#endif /* CONFIG_64BIT */
@@ -52,11 +51,15 @@
52extern int sysctl_userprocess_debug; 51extern int sysctl_userprocess_debug;
53#endif 52#endif
54 53
55#ifdef CONFIG_KPROBES 54#define VM_FAULT_BADCONTEXT 0x010000
56static inline int notify_page_fault(struct pt_regs *regs, long err) 55#define VM_FAULT_BADMAP 0x020000
56#define VM_FAULT_BADACCESS 0x040000
57
58static inline int notify_page_fault(struct pt_regs *regs)
57{ 59{
58 int ret = 0; 60 int ret = 0;
59 61
62#ifdef CONFIG_KPROBES
60 /* kprobe_running() needs smp_processor_id() */ 63 /* kprobe_running() needs smp_processor_id() */
61 if (!user_mode(regs)) { 64 if (!user_mode(regs)) {
62 preempt_disable(); 65 preempt_disable();
@@ -64,15 +67,9 @@ static inline int notify_page_fault(struct pt_regs *regs, long err)
64 ret = 1; 67 ret = 1;
65 preempt_enable(); 68 preempt_enable();
66 } 69 }
67 70#endif
68 return ret; 71 return ret;
69} 72}
70#else
71static inline int notify_page_fault(struct pt_regs *regs, long err)
72{
73 return 0;
74}
75#endif
76 73
77 74
78/* 75/*
@@ -100,57 +97,50 @@ void bust_spinlocks(int yes)
100 97
101/* 98/*
102 * Returns the address space associated with the fault. 99 * Returns the address space associated with the fault.
103 * Returns 0 for kernel space, 1 for user space and 100 * Returns 0 for kernel space and 1 for user space.
104 * 2 for code execution in user space with noexec=on.
105 */ 101 */
106static inline int check_space(struct task_struct *tsk) 102static inline int user_space_fault(unsigned long trans_exc_code)
107{ 103{
108 /* 104 /*
109 * The lowest two bits of S390_lowcore.trans_exc_code 105 * The lowest two bits of the translation exception
110 * indicate which paging table was used. 106 * identification indicate which paging table was used.
111 */ 107 */
112 int desc = S390_lowcore.trans_exc_code & 3; 108 trans_exc_code &= 3;
113 109 if (trans_exc_code == 2)
114 if (desc == 3) /* Home Segment Table Descriptor */ 110 /* Access via secondary space, set_fs setting decides */
115 return switch_amode == 0; 111 return current->thread.mm_segment.ar4;
116 if (desc == 2) /* Secondary Segment Table Descriptor */ 112 if (user_mode == HOME_SPACE_MODE)
117 return tsk->thread.mm_segment.ar4; 113 /* User space if the access has been done via home space. */
118#ifdef CONFIG_S390_SWITCH_AMODE 114 return trans_exc_code == 3;
119 if (unlikely(desc == 1)) { /* STD determined via access register */ 115 /*
120 /* %a0 always indicates primary space. */ 116 * If the user space is not the home space the kernel runs in home
121 if (S390_lowcore.exc_access_id != 0) { 117 * space. Access via secondary space has already been covered,
122 save_access_regs(tsk->thread.acrs); 118 * access via primary space or access register is from user space
123 /* 119 * and access via home space is from the kernel.
124 * An alet of 0 indicates primary space. 120 */
125 * An alet of 1 indicates secondary space. 121 return trans_exc_code != 3;
126 * Any other alet values generate an
127 * alen-translation exception.
128 */
129 if (tsk->thread.acrs[S390_lowcore.exc_access_id])
130 return tsk->thread.mm_segment.ar4;
131 }
132 }
133#endif
134 /* Primary Segment Table Descriptor */
135 return switch_amode << s390_noexec;
136} 122}
137 123
138/* 124/*
139 * Send SIGSEGV to task. This is an external routine 125 * Send SIGSEGV to task. This is an external routine
140 * to keep the stack usage of do_page_fault small. 126 * to keep the stack usage of do_page_fault small.
141 */ 127 */
142static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, 128static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
143 int si_code, unsigned long address) 129 int si_code, unsigned long trans_exc_code)
144{ 130{
145 struct siginfo si; 131 struct siginfo si;
132 unsigned long address;
146 133
134 address = trans_exc_code & __FAIL_ADDR_MASK;
135 current->thread.prot_addr = address;
136 current->thread.trap_no = int_code;
147#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 137#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
148#if defined(CONFIG_SYSCTL) 138#if defined(CONFIG_SYSCTL)
149 if (sysctl_userprocess_debug) 139 if (sysctl_userprocess_debug)
150#endif 140#endif
151 { 141 {
152 printk("User process fault: interruption code 0x%lX\n", 142 printk("User process fault: interruption code 0x%lX\n",
153 error_code); 143 int_code);
154 printk("failing address: %lX\n", address); 144 printk("failing address: %lX\n", address);
155 show_regs(regs); 145 show_regs(regs);
156 } 146 }
@@ -161,13 +151,14 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
161 force_sig_info(SIGSEGV, &si, current); 151 force_sig_info(SIGSEGV, &si, current);
162} 152}
163 153
164static void do_no_context(struct pt_regs *regs, unsigned long error_code, 154static noinline void do_no_context(struct pt_regs *regs, long int_code,
165 unsigned long address) 155 unsigned long trans_exc_code)
166{ 156{
167 const struct exception_table_entry *fixup; 157 const struct exception_table_entry *fixup;
158 unsigned long address;
168 159
169 /* Are we prepared to handle this kernel fault? */ 160 /* Are we prepared to handle this kernel fault? */
170 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); 161 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
171 if (fixup) { 162 if (fixup) {
172 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 163 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
173 return; 164 return;
@@ -177,129 +168,149 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
177 * Oops. The kernel tried to access some bad page. We'll have to 168 * Oops. The kernel tried to access some bad page. We'll have to
178 * terminate things with extreme prejudice. 169 * terminate things with extreme prejudice.
179 */ 170 */
180 if (check_space(current) == 0) 171 address = trans_exc_code & __FAIL_ADDR_MASK;
172 if (!user_space_fault(trans_exc_code))
181 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 173 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
182 " at virtual kernel address %p\n", (void *)address); 174 " at virtual kernel address %p\n", (void *)address);
183 else 175 else
184 printk(KERN_ALERT "Unable to handle kernel paging request" 176 printk(KERN_ALERT "Unable to handle kernel paging request"
185 " at virtual user address %p\n", (void *)address); 177 " at virtual user address %p\n", (void *)address);
186 178
187 die("Oops", regs, error_code); 179 die("Oops", regs, int_code);
188 do_exit(SIGKILL); 180 do_exit(SIGKILL);
189} 181}
190 182
191static void do_low_address(struct pt_regs *regs, unsigned long error_code) 183static noinline void do_low_address(struct pt_regs *regs, long int_code,
184 unsigned long trans_exc_code)
192{ 185{
193 /* Low-address protection hit in kernel mode means 186 /* Low-address protection hit in kernel mode means
194 NULL pointer write access in kernel mode. */ 187 NULL pointer write access in kernel mode. */
195 if (regs->psw.mask & PSW_MASK_PSTATE) { 188 if (regs->psw.mask & PSW_MASK_PSTATE) {
196 /* Low-address protection hit in user mode 'cannot happen'. */ 189 /* Low-address protection hit in user mode 'cannot happen'. */
197 die ("Low-address protection", regs, error_code); 190 die ("Low-address protection", regs, int_code);
198 do_exit(SIGKILL); 191 do_exit(SIGKILL);
199 } 192 }
200 193
201 do_no_context(regs, error_code, 0); 194 do_no_context(regs, int_code, trans_exc_code);
202} 195}
203 196
204static void do_sigbus(struct pt_regs *regs, unsigned long error_code, 197static noinline void do_sigbus(struct pt_regs *regs, long int_code,
205 unsigned long address) 198 unsigned long trans_exc_code)
206{ 199{
207 struct task_struct *tsk = current; 200 struct task_struct *tsk = current;
208 struct mm_struct *mm = tsk->mm;
209 201
210 up_read(&mm->mmap_sem);
211 /* 202 /*
212 * Send a sigbus, regardless of whether we were in kernel 203 * Send a sigbus, regardless of whether we were in kernel
213 * or user mode. 204 * or user mode.
214 */ 205 */
215 tsk->thread.prot_addr = address; 206 tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK;
216 tsk->thread.trap_no = error_code; 207 tsk->thread.trap_no = int_code;
217 force_sig(SIGBUS, tsk); 208 force_sig(SIGBUS, tsk);
218
219 /* Kernel mode? Handle exceptions or die */
220 if (!(regs->psw.mask & PSW_MASK_PSTATE))
221 do_no_context(regs, error_code, address);
222} 209}
223 210
224#ifdef CONFIG_S390_EXEC_PROTECT 211#ifdef CONFIG_S390_EXEC_PROTECT
225static int signal_return(struct mm_struct *mm, struct pt_regs *regs, 212static noinline int signal_return(struct pt_regs *regs, long int_code,
226 unsigned long address, unsigned long error_code) 213 unsigned long trans_exc_code)
227{ 214{
228 u16 instruction; 215 u16 instruction;
229 int rc; 216 int rc;
230#ifdef CONFIG_COMPAT
231 int compat;
232#endif
233 217
234 pagefault_disable();
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 218 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236 pagefault_enable();
237 if (rc)
238 return -EFAULT;
239 219
240 up_read(&mm->mmap_sem); 220 if (!rc && instruction == 0x0a77) {
241 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 221 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
242#ifdef CONFIG_COMPAT 222 if (is_compat_task())
243 compat = is_compat_task(); 223 sys32_sigreturn();
244 if (compat && instruction == 0x0a77) 224 else
245 sys32_sigreturn(); 225 sys_sigreturn();
246 else if (compat && instruction == 0x0aad) 226 } else if (!rc && instruction == 0x0aad) {
247 sys32_rt_sigreturn(); 227 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
248 else 228 if (is_compat_task())
249#endif 229 sys32_rt_sigreturn();
250 if (instruction == 0x0a77) 230 else
251 sys_sigreturn(); 231 sys_rt_sigreturn();
252 else if (instruction == 0x0aad) 232 } else
253 sys_rt_sigreturn(); 233 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
254 else {
255 current->thread.prot_addr = address;
256 current->thread.trap_no = error_code;
257 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
258 }
259 return 0; 234 return 0;
260} 235}
261#endif /* CONFIG_S390_EXEC_PROTECT */ 236#endif /* CONFIG_S390_EXEC_PROTECT */
262 237
238static noinline void do_fault_error(struct pt_regs *regs, long int_code,
239 unsigned long trans_exc_code, int fault)
240{
241 int si_code;
242
243 switch (fault) {
244 case VM_FAULT_BADACCESS:
245#ifdef CONFIG_S390_EXEC_PROTECT
246 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
247 (trans_exc_code & 3) == 0) {
248 signal_return(regs, int_code, trans_exc_code);
249 break;
250 }
251#endif /* CONFIG_S390_EXEC_PROTECT */
252 case VM_FAULT_BADMAP:
253 /* Bad memory access. Check if it is kernel or user space. */
254 if (regs->psw.mask & PSW_MASK_PSTATE) {
255 /* User mode accesses just cause a SIGSEGV */
256 si_code = (fault == VM_FAULT_BADMAP) ?
257 SEGV_MAPERR : SEGV_ACCERR;
258 do_sigsegv(regs, int_code, si_code, trans_exc_code);
259 return;
260 }
261 case VM_FAULT_BADCONTEXT:
262 do_no_context(regs, int_code, trans_exc_code);
263 break;
264 default: /* fault & VM_FAULT_ERROR */
265 if (fault & VM_FAULT_OOM)
266 pagefault_out_of_memory();
267 else if (fault & VM_FAULT_SIGBUS) {
268 do_sigbus(regs, int_code, trans_exc_code);
269 /* Kernel mode? Handle exceptions or die */
270 if (!(regs->psw.mask & PSW_MASK_PSTATE))
271 do_no_context(regs, int_code, trans_exc_code);
272 } else
273 BUG();
274 break;
275 }
276}
277
263/* 278/*
264 * This routine handles page faults. It determines the address, 279 * This routine handles page faults. It determines the address,
265 * and the problem, and then passes it off to one of the appropriate 280 * and the problem, and then passes it off to one of the appropriate
266 * routines. 281 * routines.
267 * 282 *
268 * error_code: 283 * interruption code (int_code):
269 * 04 Protection -> Write-Protection (suprression) 284 * 04 Protection -> Write-Protection (suprression)
270 * 10 Segment translation -> Not present (nullification) 285 * 10 Segment translation -> Not present (nullification)
271 * 11 Page translation -> Not present (nullification) 286 * 11 Page translation -> Not present (nullification)
272 * 3b Region third trans. -> Not present (nullification) 287 * 3b Region third trans. -> Not present (nullification)
273 */ 288 */
274static inline void 289static inline int do_exception(struct pt_regs *regs, int access,
275do_exception(struct pt_regs *regs, unsigned long error_code, int write) 290 unsigned long trans_exc_code)
276{ 291{
277 struct task_struct *tsk; 292 struct task_struct *tsk;
278 struct mm_struct *mm; 293 struct mm_struct *mm;
279 struct vm_area_struct *vma; 294 struct vm_area_struct *vma;
280 unsigned long address; 295 unsigned long address;
281 int space;
282 int si_code;
283 int fault; 296 int fault;
284 297
285 if (notify_page_fault(regs, error_code)) 298 if (notify_page_fault(regs))
286 return; 299 return 0;
287 300
288 tsk = current; 301 tsk = current;
289 mm = tsk->mm; 302 mm = tsk->mm;
290 303
291 /* get the failing address and the affected space */
292 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
293 space = check_space(tsk);
294
295 /* 304 /*
296 * Verify that the fault happened in user space, that 305 * Verify that the fault happened in user space, that
297 * we are not in an interrupt and that there is a 306 * we are not in an interrupt and that there is a
298 * user context. 307 * user context.
299 */ 308 */
300 if (unlikely(space == 0 || in_atomic() || !mm)) 309 fault = VM_FAULT_BADCONTEXT;
301 goto no_context; 310 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
311 goto out;
302 312
313 address = trans_exc_code & __FAIL_ADDR_MASK;
303 /* 314 /*
304 * When we get here, the fault happened in the current 315 * When we get here, the fault happened in the current
305 * task's user address space, so we can switch on the 316 * task's user address space, so we can switch on the
@@ -309,42 +320,26 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
309 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 320 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
310 down_read(&mm->mmap_sem); 321 down_read(&mm->mmap_sem);
311 322
312 si_code = SEGV_MAPERR; 323 fault = VM_FAULT_BADMAP;
313 vma = find_vma(mm, address); 324 vma = find_vma(mm, address);
314 if (!vma) 325 if (!vma)
315 goto bad_area; 326 goto out_up;
316
317#ifdef CONFIG_S390_EXEC_PROTECT
318 if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
319 if (!signal_return(mm, regs, address, error_code))
320 /*
321 * signal_return() has done an up_read(&mm->mmap_sem)
322 * if it returns 0.
323 */
324 return;
325#endif
326 327
327 if (vma->vm_start <= address) 328 if (unlikely(vma->vm_start > address)) {
328 goto good_area; 329 if (!(vma->vm_flags & VM_GROWSDOWN))
329 if (!(vma->vm_flags & VM_GROWSDOWN)) 330 goto out_up;
330 goto bad_area; 331 if (expand_stack(vma, address))
331 if (expand_stack(vma, address)) 332 goto out_up;
332 goto bad_area;
333/*
334 * Ok, we have a good vm_area for this memory access, so
335 * we can handle it..
336 */
337good_area:
338 si_code = SEGV_ACCERR;
339 if (!write) {
340 /* page not present, check vm flags */
341 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
342 goto bad_area;
343 } else {
344 if (!(vma->vm_flags & VM_WRITE))
345 goto bad_area;
346 } 333 }
347 334
335 /*
336 * Ok, we have a good vm_area for this memory access, so
337 * we can handle it..
338 */
339 fault = VM_FAULT_BADACCESS;
340 if (unlikely(!(vma->vm_flags & access)))
341 goto out_up;
342
348 if (is_vm_hugetlb_page(vma)) 343 if (is_vm_hugetlb_page(vma))
349 address &= HPAGE_MASK; 344 address &= HPAGE_MASK;
350 /* 345 /*
@@ -352,18 +347,11 @@ good_area:
352 * make sure we exit gracefully rather than endlessly redo 347 * make sure we exit gracefully rather than endlessly redo
353 * the fault. 348 * the fault.
354 */ 349 */
355 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 350 fault = handle_mm_fault(mm, vma, address,
356 if (unlikely(fault & VM_FAULT_ERROR)) { 351 (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0);
357 if (fault & VM_FAULT_OOM) { 352 if (unlikely(fault & VM_FAULT_ERROR))
358 up_read(&mm->mmap_sem); 353 goto out_up;
359 pagefault_out_of_memory(); 354
360 return;
361 } else if (fault & VM_FAULT_SIGBUS) {
362 do_sigbus(regs, error_code, address);
363 return;
364 }
365 BUG();
366 }
367 if (fault & VM_FAULT_MAJOR) { 355 if (fault & VM_FAULT_MAJOR) {
368 tsk->maj_flt++; 356 tsk->maj_flt++;
369 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 357 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
@@ -373,74 +361,69 @@ good_area:
373 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 361 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
374 regs, address); 362 regs, address);
375 } 363 }
376 up_read(&mm->mmap_sem);
377 /* 364 /*
378 * The instruction that caused the program check will 365 * The instruction that caused the program check will
379 * be repeated. Don't signal single step via SIGTRAP. 366 * be repeated. Don't signal single step via SIGTRAP.
380 */ 367 */
381 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 368 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
382 return; 369 fault = 0;
383 370out_up:
384/*
385 * Something tried to access memory that isn't in our memory map..
386 * Fix it, but check if it's kernel or user first..
387 */
388bad_area:
389 up_read(&mm->mmap_sem); 371 up_read(&mm->mmap_sem);
390 372out:
391 /* User mode accesses just cause a SIGSEGV */ 373 return fault;
392 if (regs->psw.mask & PSW_MASK_PSTATE) {
393 tsk->thread.prot_addr = address;
394 tsk->thread.trap_no = error_code;
395 do_sigsegv(regs, error_code, si_code, address);
396 return;
397 }
398
399no_context:
400 do_no_context(regs, error_code, address);
401} 374}
402 375
403void __kprobes do_protection_exception(struct pt_regs *regs, 376void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
404 long error_code)
405{ 377{
378 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
379 int fault;
380
406 /* Protection exception is supressing, decrement psw address. */ 381 /* Protection exception is supressing, decrement psw address. */
407 regs->psw.addr -= (error_code >> 16); 382 regs->psw.addr -= (int_code >> 16);
408 /* 383 /*
409 * Check for low-address protection. This needs to be treated 384 * Check for low-address protection. This needs to be treated
410 * as a special case because the translation exception code 385 * as a special case because the translation exception code
411 * field is not guaranteed to contain valid data in this case. 386 * field is not guaranteed to contain valid data in this case.
412 */ 387 */
413 if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { 388 if (unlikely(!(trans_exc_code & 4))) {
414 do_low_address(regs, error_code); 389 do_low_address(regs, int_code, trans_exc_code);
415 return; 390 return;
416 } 391 }
417 do_exception(regs, 4, 1); 392 fault = do_exception(regs, VM_WRITE, trans_exc_code);
393 if (unlikely(fault))
394 do_fault_error(regs, 4, trans_exc_code, fault);
418} 395}
419 396
420void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) 397void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
421{ 398{
422 do_exception(regs, error_code & 0xff, 0); 399 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
400 int access, fault;
401
402 access = VM_READ | VM_EXEC | VM_WRITE;
403#ifdef CONFIG_S390_EXEC_PROTECT
404 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
405 (trans_exc_code & 3) == 0)
406 access = VM_EXEC;
407#endif
408 fault = do_exception(regs, access, trans_exc_code);
409 if (unlikely(fault))
410 do_fault_error(regs, int_code & 255, trans_exc_code, fault);
423} 411}
424 412
425#ifdef CONFIG_64BIT 413#ifdef CONFIG_64BIT
426void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) 414void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
427{ 415{
428 struct mm_struct *mm; 416 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
417 struct mm_struct *mm = current->mm;
429 struct vm_area_struct *vma; 418 struct vm_area_struct *vma;
430 unsigned long address;
431 int space;
432
433 mm = current->mm;
434 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
435 space = check_space(current);
436 419
437 if (unlikely(space == 0 || in_atomic() || !mm)) 420 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
438 goto no_context; 421 goto no_context;
439 422
440 local_irq_enable(); 423 local_irq_enable();
441 424
442 down_read(&mm->mmap_sem); 425 down_read(&mm->mmap_sem);
443 vma = find_vma(mm, address); 426 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
444 up_read(&mm->mmap_sem); 427 up_read(&mm->mmap_sem);
445 428
446 if (vma) { 429 if (vma) {
@@ -450,17 +433,38 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
450 433
451 /* User mode accesses just cause a SIGSEGV */ 434 /* User mode accesses just cause a SIGSEGV */
452 if (regs->psw.mask & PSW_MASK_PSTATE) { 435 if (regs->psw.mask & PSW_MASK_PSTATE) {
453 current->thread.prot_addr = address; 436 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
454 current->thread.trap_no = error_code;
455 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
456 return; 437 return;
457 } 438 }
458 439
459no_context: 440no_context:
460 do_no_context(regs, error_code, address); 441 do_no_context(regs, int_code, trans_exc_code);
461} 442}
462#endif 443#endif
463 444
445int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
446{
447 struct pt_regs regs;
448 int access, fault;
449
450 regs.psw.mask = psw_kernel_bits;
451 if (!irqs_disabled())
452 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
453 regs.psw.addr = (unsigned long) __builtin_return_address(0);
454 regs.psw.addr |= PSW_ADDR_AMODE;
455 uaddr &= PAGE_MASK;
456 access = write_user ? VM_WRITE : VM_READ;
457 fault = do_exception(&regs, access, uaddr | 2);
458 if (unlikely(fault)) {
459 if (fault & VM_FAULT_OOM) {
460 pagefault_out_of_memory();
461 fault = 0;
462 } else if (fault & VM_FAULT_SIGBUS)
463 do_sigbus(&regs, int_code, uaddr);
464 }
465 return fault ? -EFAULT : 0;
466}
467
464#ifdef CONFIG_PFAULT 468#ifdef CONFIG_PFAULT
465/* 469/*
466 * 'pfault' pseudo page faults routines. 470 * 'pfault' pseudo page faults routines.
@@ -522,7 +526,7 @@ void pfault_fini(void)
522 : : "a" (&refbk), "m" (refbk) : "cc"); 526 : : "a" (&refbk), "m" (refbk) : "cc");
523} 527}
524 528
525static void pfault_interrupt(__u16 error_code) 529static void pfault_interrupt(__u16 int_code)
526{ 530{
527 struct task_struct *tsk; 531 struct task_struct *tsk;
528 __u16 subcode; 532 __u16 subcode;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 2757c5616a07..ad621e06ada3 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -269,7 +269,7 @@ int s390_enable_sie(void)
269 struct mm_struct *mm, *old_mm; 269 struct mm_struct *mm, *old_mm;
270 270
271 /* Do we have switched amode? If no, we cannot do sie */ 271 /* Do we have switched amode? If no, we cannot do sie */
272 if (!switch_amode) 272 if (user_mode == HOME_SPACE_MODE)
273 return -EINVAL; 273 return -EINVAL;
274 274
275 /* Do we have pgstes? if yes, we are done */ 275 /* Do we have pgstes? if yes, we are done */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 5f91a38d7592..300ab012b0fd 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -70,8 +70,12 @@ static pte_t __ref *vmem_pte_alloc(void)
70 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 70 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
71 if (!pte) 71 if (!pte)
72 return NULL; 72 return NULL;
73 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, 73 if (MACHINE_HAS_HPAGE)
74 PTRS_PER_PTE * sizeof(pte_t)); 74 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
75 PTRS_PER_PTE * sizeof(pte_t));
76 else
77 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
78 PTRS_PER_PTE * sizeof(pte_t));
75 return pte; 79 return pte;
76} 80}
77 81
@@ -112,7 +116,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
112 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 116 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
113 (address + HPAGE_SIZE <= start + size) && 117 (address + HPAGE_SIZE <= start + size) &&
114 (address >= HPAGE_SIZE)) { 118 (address >= HPAGE_SIZE)) {
115 pte_val(pte) |= _SEGMENT_ENTRY_LARGE; 119 pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
120 _SEGMENT_ENTRY_CO;
116 pmd_val(*pm_dir) = pte_val(pte); 121 pmd_val(*pm_dir) = pte_val(pte);
117 address += HPAGE_SIZE - PAGE_SIZE; 122 address += HPAGE_SIZE - PAGE_SIZE;
118 continue; 123 continue;