aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2013-09-24 03:14:56 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-24 11:16:57 -0400
commite258d719ff28ecc7a048eb8f78380e68c4b3a3f0 (patch)
tree1c7f496b97cef1111b230371940e603a2ca401a4
parent7d7c7b24e416afb2637be8447e03ca4457c100fd (diff)
s390/uaccess: always run the kernel in home space
Simplify the uaccess code by removing the user_mode=home option. The kernel will now always run in the home space mode. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/mmu_context.h10
-rw-r--r--arch/s390/include/asm/processor.h8
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/setup.h7
-rw-r--r--arch/s390/include/asm/uaccess.h18
-rw-r--r--arch/s390/kernel/compat_signal.c8
-rw-r--r--arch/s390/kernel/ipl.c4
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/ptrace.c4
-rw-r--r--arch/s390/kernel/runtime_instr.c2
-rw-r--r--arch/s390/kernel/setup.c59
-rw-r--r--arch/s390/kernel/signal.c12
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/uaccess_mvcos.c30
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/lib/uaccess_std.c305
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/s390/mm/pgtable.c4
21 files changed, 51 insertions, 459 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 9f973d8de90e..5d1f950704dc 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
40 pgd_t *pgd = mm->pgd; 40 pgd_t *pgd = mm->pgd;
41 41
42 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 42 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
43 if (s390_user_mode != HOME_SPACE_MODE) { 43 /* Load primary space page table origin. */
44 /* Load primary space page table origin. */ 44 asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
45 asm volatile(LCTL_OPCODE" 1,1,%0\n"
46 : : "m" (S390_lowcore.user_asce) );
47 } else
48 /* Load home space page table origin. */
49 asm volatile(LCTL_OPCODE" 13,13,%0"
50 : : "m" (S390_lowcore.user_asce) );
51 set_fs(current->thread.mm_segment); 45 set_fs(current->thread.mm_segment);
52} 46}
53 47
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 50256f05fbcf..3caaf6548ef5 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -134,14 +134,14 @@ struct stack_frame {
134 * Do necessary setup to start up a new thread. 134 * Do necessary setup to start up a new thread.
135 */ 135 */
136#define start_thread(regs, new_psw, new_stackp) do { \ 136#define start_thread(regs, new_psw, new_stackp) do { \
137 regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \ 137 regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
138 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 138 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
139 regs->gprs[15] = new_stackp; \ 139 regs->gprs[15] = new_stackp; \
140 execve_tail(); \ 140 execve_tail(); \
141} while (0) 141} while (0)
142 142
143#define start_thread31(regs, new_psw, new_stackp) do { \ 143#define start_thread31(regs, new_psw, new_stackp) do { \
144 regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ 144 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
145 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 145 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
146 regs->gprs[15] = new_stackp; \ 146 regs->gprs[15] = new_stackp; \
147 __tlb_flush_mm(current->mm); \ 147 __tlb_flush_mm(current->mm); \
@@ -343,9 +343,9 @@ __set_psw_mask(unsigned long mask)
343} 343}
344 344
345#define local_mcck_enable() \ 345#define local_mcck_enable() \
346 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) 346 __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
347#define local_mcck_disable() \ 347#define local_mcck_disable() \
348 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) 348 __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
349 349
350/* 350/*
351 * Basic Machine Check/Program Check Handler. 351 * Basic Machine Check/Program Check Handler.
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 52b56533c57c..9c82cebddabd 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -10,8 +10,11 @@
10 10
11#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
12 12
13extern long psw_kernel_bits; 13#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
14extern long psw_user_bits; 14 PSW_MASK_EA | PSW_MASK_BA)
15#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
16 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
17 PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
15 18
16/* 19/*
17 * The pt_regs struct defines the way the registers are stored on 20 * The pt_regs struct defines the way the registers are stored on
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 59880dbaf360..df802ee14af6 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
48void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, 48void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
49 unsigned long size); 49 unsigned long size);
50 50
51#define PRIMARY_SPACE_MODE 0
52#define ACCESS_REGISTER_MODE 1
53#define SECONDARY_SPACE_MODE 2
54#define HOME_SPACE_MODE 3
55
56extern unsigned int s390_user_mode;
57
58/* 51/*
59 * Machine features detected in head.S 52 * Machine features detected in head.S
60 */ 53 */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9c33ed4e666f..79330af9a5f8 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
94 94
95struct uaccess_ops { 95struct uaccess_ops {
96 size_t (*copy_from_user)(size_t, const void __user *, void *); 96 size_t (*copy_from_user)(size_t, const void __user *, void *);
97 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
98 size_t (*copy_to_user)(size_t, void __user *, const void *); 97 size_t (*copy_to_user)(size_t, void __user *, const void *);
99 size_t (*copy_to_user_small)(size_t, void __user *, const void *);
100 size_t (*copy_in_user)(size_t, void __user *, const void __user *); 98 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
101 size_t (*clear_user)(size_t, void __user *); 99 size_t (*clear_user)(size_t, void __user *);
102 size_t (*strnlen_user)(size_t, const char __user *); 100 size_t (*strnlen_user)(size_t, const char __user *);
@@ -106,22 +104,20 @@ struct uaccess_ops {
106}; 104};
107 105
108extern struct uaccess_ops uaccess; 106extern struct uaccess_ops uaccess;
109extern struct uaccess_ops uaccess_std;
110extern struct uaccess_ops uaccess_mvcos; 107extern struct uaccess_ops uaccess_mvcos;
111extern struct uaccess_ops uaccess_mvcos_switch;
112extern struct uaccess_ops uaccess_pt; 108extern struct uaccess_ops uaccess_pt;
113 109
114extern int __handle_fault(unsigned long, unsigned long, int); 110extern int __handle_fault(unsigned long, unsigned long, int);
115 111
116static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 112static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
117{ 113{
118 size = uaccess.copy_to_user_small(size, ptr, x); 114 size = uaccess.copy_to_user(size, ptr, x);
119 return size ? -EFAULT : size; 115 return size ? -EFAULT : size;
120} 116}
121 117
122static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 118static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
123{ 119{
124 size = uaccess.copy_from_user_small(size, ptr, x); 120 size = uaccess.copy_from_user(size, ptr, x);
125 return size ? -EFAULT : size; 121 return size ? -EFAULT : size;
126} 122}
127 123
@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
226static inline unsigned long __must_check 222static inline unsigned long __must_check
227__copy_to_user(void __user *to, const void *from, unsigned long n) 223__copy_to_user(void __user *to, const void *from, unsigned long n)
228{ 224{
229 if (__builtin_constant_p(n) && (n <= 256)) 225 return uaccess.copy_to_user(n, to, from);
230 return uaccess.copy_to_user_small(n, to, from);
231 else
232 return uaccess.copy_to_user(n, to, from);
233} 226}
234 227
235#define __copy_to_user_inatomic __copy_to_user 228#define __copy_to_user_inatomic __copy_to_user
@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
275static inline unsigned long __must_check 268static inline unsigned long __must_check
276__copy_from_user(void *to, const void __user *from, unsigned long n) 269__copy_from_user(void *to, const void __user *from, unsigned long n)
277{ 270{
278 if (__builtin_constant_p(n) && (n <= 256)) 271 return uaccess.copy_from_user(n, from, to);
279 return uaccess.copy_from_user_small(n, from, to);
280 else
281 return uaccess.copy_from_user(n, from, to);
282} 272}
283 273
284extern void copy_from_user_overflow(void) 274extern void copy_from_user_overflow(void)
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index adaa9e9478d8..ee820079c06d 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -188,8 +188,8 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
188 (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | 188 (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
189 (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); 189 (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
190 /* Check for invalid user address space control. */ 190 /* Check for invalid user address space control. */
191 if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) 191 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
192 regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | 192 regs->psw.mask = PSW_ASC_PRIMARY |
193 (regs->psw.mask & ~PSW_MASK_ASC); 193 (regs->psw.mask & ~PSW_MASK_ASC);
194 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); 194 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
195 for (i = 0; i < NUM_GPRS; i++) 195 for (i = 0; i < NUM_GPRS; i++)
@@ -348,7 +348,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
348 regs->gprs[15] = (__force __u64) frame; 348 regs->gprs[15] = (__force __u64) frame;
349 /* Force 31 bit amode and default user address space control. */ 349 /* Force 31 bit amode and default user address space control. */
350 regs->psw.mask = PSW_MASK_BA | 350 regs->psw.mask = PSW_MASK_BA |
351 (psw_user_bits & PSW_MASK_ASC) | 351 (PSW_USER_BITS & PSW_MASK_ASC) |
352 (regs->psw.mask & ~PSW_MASK_ASC); 352 (regs->psw.mask & ~PSW_MASK_ASC);
353 regs->psw.addr = (__force __u64) ka->sa.sa_handler; 353 regs->psw.addr = (__force __u64) ka->sa.sa_handler;
354 354
@@ -415,7 +415,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
415 regs->gprs[15] = (__force __u64) frame; 415 regs->gprs[15] = (__force __u64) frame;
416 /* Force 31 bit amode and default user address space control. */ 416 /* Force 31 bit amode and default user address space control. */
417 regs->psw.mask = PSW_MASK_BA | 417 regs->psw.mask = PSW_MASK_BA |
418 (psw_user_bits & PSW_MASK_ASC) | 418 (PSW_USER_BITS & PSW_MASK_ASC) |
419 (regs->psw.mask & ~PSW_MASK_ASC); 419 (regs->psw.mask & ~PSW_MASK_ASC);
420 regs->psw.addr = (__u64 __force) ka->sa.sa_handler; 420 regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
421 421
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index feb719d3c851..633ca7504536 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2051,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data)
2051 __ctl_clear_bit(0,28); 2051 __ctl_clear_bit(0,28);
2052 2052
2053 /* Set new machine check handler */ 2053 /* Set new machine check handler */
2054 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; 2054 S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
2055 S390_lowcore.mcck_new_psw.addr = 2055 S390_lowcore.mcck_new_psw.addr =
2056 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; 2056 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
2057 2057
2058 /* Set new program check handler */ 2058 /* Set new program check handler */
2059 S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; 2059 S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
2060 S390_lowcore.program_new_psw.addr = 2060 S390_lowcore.program_new_psw.addr =
2061 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 2061 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
2062 2062
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index c5dbb335716d..e1cdd31acabb 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -139,7 +139,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
139 if (unlikely(p->flags & PF_KTHREAD)) { 139 if (unlikely(p->flags & PF_KTHREAD)) {
140 /* kernel thread */ 140 /* kernel thread */
141 memset(&frame->childregs, 0, sizeof(struct pt_regs)); 141 memset(&frame->childregs, 0, sizeof(struct pt_regs));
142 frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | 142 frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
143 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 143 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
144 frame->childregs.psw.addr = PSW_ADDR_AMODE | 144 frame->childregs.psw.addr = PSW_ADDR_AMODE |
145 (unsigned long) kernel_thread_starter; 145 (unsigned long) kernel_thread_starter;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9556905bd3ce..e6abd5bd31b0 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -200,7 +200,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
200 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); 200 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
201 if (addr == (addr_t) &dummy->regs.psw.mask) 201 if (addr == (addr_t) &dummy->regs.psw.mask)
202 /* Return a clean psw mask. */ 202 /* Return a clean psw mask. */
203 tmp = psw_user_bits | (tmp & PSW_MASK_USER); 203 tmp = PSW_USER_BITS | (tmp & PSW_MASK_USER);
204 204
205 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { 205 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
206 /* 206 /*
@@ -322,7 +322,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
322 * psw and gprs are stored on the stack 322 * psw and gprs are stored on the stack
323 */ 323 */
324 if (addr == (addr_t) &dummy->regs.psw.mask && 324 if (addr == (addr_t) &dummy->regs.psw.mask &&
325 ((data & ~PSW_MASK_USER) != psw_user_bits || 325 ((data & ~PSW_MASK_USER) != PSW_USER_BITS ||
326 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) 326 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
327 /* Invalid psw mask. */ 327 /* Invalid psw mask. */
328 return -EINVAL; 328 return -EINVAL;
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index e1c9d1c292fa..d817cce7e72d 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -40,8 +40,6 @@ static void disable_runtime_instr(void)
40static void init_runtime_instr_cb(struct runtime_instr_cb *cb) 40static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
41{ 41{
42 cb->buf_limit = 0xfff; 42 cb->buf_limit = 0xfff;
43 if (s390_user_mode == HOME_SPACE_MODE)
44 cb->home_space = 1;
45 cb->int_requested = 1; 43 cb->int_requested = 1;
46 cb->pstate = 1; 44 cb->pstate = 1;
47 cb->pstate_set_buf = 1; 45 cb->pstate_set_buf = 1;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index aeed8a61fa0d..ffe1c53264a7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -64,12 +64,6 @@
64#include <asm/sclp.h> 64#include <asm/sclp.h>
65#include "entry.h" 65#include "entry.h"
66 66
67long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
68 PSW_MASK_EA | PSW_MASK_BA;
69long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
70 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
71 PSW_MASK_PSTATE | PSW_ASC_HOME;
72
73/* 67/*
74 * User copy operations. 68 * User copy operations.
75 */ 69 */
@@ -300,43 +294,14 @@ static int __init parse_vmalloc(char *arg)
300} 294}
301early_param("vmalloc", parse_vmalloc); 295early_param("vmalloc", parse_vmalloc);
302 296
303unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
304EXPORT_SYMBOL_GPL(s390_user_mode);
305
306static void __init set_user_mode_primary(void)
307{
308 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
309 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
310#ifdef CONFIG_COMPAT
311 psw32_user_bits =
312 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
313#endif
314 uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
315}
316
317static int __init early_parse_user_mode(char *p) 297static int __init early_parse_user_mode(char *p)
318{ 298{
319 if (p && strcmp(p, "primary") == 0) 299 if (!p || strcmp(p, "primary") == 0)
320 s390_user_mode = PRIMARY_SPACE_MODE; 300 return 0;
321 else if (!p || strcmp(p, "home") == 0) 301 return 1;
322 s390_user_mode = HOME_SPACE_MODE;
323 else
324 return 1;
325 return 0;
326} 302}
327early_param("user_mode", early_parse_user_mode); 303early_param("user_mode", early_parse_user_mode);
328 304
329static void __init setup_addressing_mode(void)
330{
331 if (s390_user_mode != PRIMARY_SPACE_MODE)
332 return;
333 set_user_mode_primary();
334 if (MACHINE_HAS_MVCOS)
335 pr_info("Address spaces switched, mvcos available\n");
336 else
337 pr_info("Address spaces switched, mvcos not available\n");
338}
339
340void *restart_stack __attribute__((__section__(".data"))); 305void *restart_stack __attribute__((__section__(".data")));
341 306
342static void __init setup_lowcore(void) 307static void __init setup_lowcore(void)
@@ -348,24 +313,24 @@ static void __init setup_lowcore(void)
348 */ 313 */
349 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); 314 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
350 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 315 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
351 lc->restart_psw.mask = psw_kernel_bits; 316 lc->restart_psw.mask = PSW_KERNEL_BITS;
352 lc->restart_psw.addr = 317 lc->restart_psw.addr =
353 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 318 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
354 lc->external_new_psw.mask = psw_kernel_bits | 319 lc->external_new_psw.mask = PSW_KERNEL_BITS |
355 PSW_MASK_DAT | PSW_MASK_MCHECK; 320 PSW_MASK_DAT | PSW_MASK_MCHECK;
356 lc->external_new_psw.addr = 321 lc->external_new_psw.addr =
357 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 322 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
358 lc->svc_new_psw.mask = psw_kernel_bits | 323 lc->svc_new_psw.mask = PSW_KERNEL_BITS |
359 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 324 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
360 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 325 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
361 lc->program_new_psw.mask = psw_kernel_bits | 326 lc->program_new_psw.mask = PSW_KERNEL_BITS |
362 PSW_MASK_DAT | PSW_MASK_MCHECK; 327 PSW_MASK_DAT | PSW_MASK_MCHECK;
363 lc->program_new_psw.addr = 328 lc->program_new_psw.addr =
364 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; 329 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
365 lc->mcck_new_psw.mask = psw_kernel_bits; 330 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
366 lc->mcck_new_psw.addr = 331 lc->mcck_new_psw.addr =
367 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 332 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
368 lc->io_new_psw.mask = psw_kernel_bits | 333 lc->io_new_psw.mask = PSW_KERNEL_BITS |
369 PSW_MASK_DAT | PSW_MASK_MCHECK; 334 PSW_MASK_DAT | PSW_MASK_MCHECK;
370 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 335 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
371 lc->clock_comparator = -1ULL; 336 lc->clock_comparator = -1ULL;
@@ -1043,10 +1008,7 @@ void __init setup_arch(char **cmdline_p)
1043 init_mm.end_data = (unsigned long) &_edata; 1008 init_mm.end_data = (unsigned long) &_edata;
1044 init_mm.brk = (unsigned long) &_end; 1009 init_mm.brk = (unsigned long) &_end;
1045 1010
1046 if (MACHINE_HAS_MVCOS) 1011 uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;
1047 memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
1048 else
1049 memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
1050 1012
1051 parse_early_param(); 1013 parse_early_param();
1052 detect_memory_layout(memory_chunk, memory_end); 1014 detect_memory_layout(memory_chunk, memory_end);
@@ -1054,7 +1016,6 @@ void __init setup_arch(char **cmdline_p)
1054 setup_ipl(); 1016 setup_ipl();
1055 reserve_oldmem(); 1017 reserve_oldmem();
1056 setup_memory_end(); 1018 setup_memory_end();
1057 setup_addressing_mode();
1058 reserve_crashkernel(); 1019 reserve_crashkernel();
1059 setup_memory(); 1020 setup_memory();
1060 setup_resources(); 1021 setup_resources();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index c45becf82e01..b4fa7c0223e5 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -57,7 +57,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
57 57
58 /* Copy a 'clean' PSW mask to the user to avoid leaking 58 /* Copy a 'clean' PSW mask to the user to avoid leaking
59 information about whether PER is currently on. */ 59 information about whether PER is currently on. */
60 user_sregs.regs.psw.mask = psw_user_bits | 60 user_sregs.regs.psw.mask = PSW_USER_BITS |
61 (regs->psw.mask & PSW_MASK_USER); 61 (regs->psw.mask & PSW_MASK_USER);
62 user_sregs.regs.psw.addr = regs->psw.addr; 62 user_sregs.regs.psw.addr = regs->psw.addr;
63 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 63 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
@@ -85,12 +85,12 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
85 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); 85 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
86 if (err) 86 if (err)
87 return err; 87 return err;
88 /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ 88 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
89 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 89 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
90 (user_sregs.regs.psw.mask & PSW_MASK_USER); 90 (user_sregs.regs.psw.mask & PSW_MASK_USER);
91 /* Check for invalid user address space control. */ 91 /* Check for invalid user address space control. */
92 if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) 92 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
93 regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | 93 regs->psw.mask = PSW_ASC_PRIMARY |
94 (regs->psw.mask & ~PSW_MASK_ASC); 94 (regs->psw.mask & ~PSW_MASK_ASC);
95 /* Check for invalid amode */ 95 /* Check for invalid amode */
96 if (regs->psw.mask & PSW_MASK_EA) 96 if (regs->psw.mask & PSW_MASK_EA)
@@ -224,7 +224,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
224 regs->gprs[15] = (unsigned long) frame; 224 regs->gprs[15] = (unsigned long) frame;
225 /* Force default amode and default user address space control. */ 225 /* Force default amode and default user address space control. */
226 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 226 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
227 (psw_user_bits & PSW_MASK_ASC) | 227 (PSW_USER_BITS & PSW_MASK_ASC) |
228 (regs->psw.mask & ~PSW_MASK_ASC); 228 (regs->psw.mask & ~PSW_MASK_ASC);
229 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 229 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
230 230
@@ -295,7 +295,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
295 regs->gprs[15] = (unsigned long) frame; 295 regs->gprs[15] = (unsigned long) frame;
296 /* Force default amode and default user address space control. */ 296 /* Force default amode and default user address space control. */
297 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 297 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
298 (psw_user_bits & PSW_MASK_ASC) | 298 (PSW_USER_BITS & PSW_MASK_ASC) |
299 (regs->psw.mask & ~PSW_MASK_ASC); 299 (regs->psw.mask & ~PSW_MASK_ASC);
300 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 300 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
301 301
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1a4313a1b60f..cca6cf6abacc 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -283,7 +283,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
283 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 283 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
284 unsigned long source_cpu = stap(); 284 unsigned long source_cpu = stap();
285 285
286 __load_psw_mask(psw_kernel_bits); 286 __load_psw_mask(PSW_KERNEL_BITS);
287 if (pcpu->address == source_cpu) 287 if (pcpu->address == source_cpu)
288 func(data); /* should not return */ 288 func(data); /* should not return */
289 /* Stop target cpu (if func returns this stops the current cpu). */ 289 /* Stop target cpu (if func returns this stops the current cpu). */
@@ -395,7 +395,7 @@ void smp_send_stop(void)
395 int cpu; 395 int cpu;
396 396
397 /* Disable all interrupts/machine checks */ 397 /* Disable all interrupts/machine checks */
398 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 398 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
399 trace_hardirqs_off(); 399 trace_hardirqs_off();
400 400
401 debug_set_critical(); 401 debug_set_critical();
@@ -693,7 +693,7 @@ static void smp_start_secondary(void *cpuvoid)
693 S390_lowcore.restart_source = -1UL; 693 S390_lowcore.restart_source = -1UL;
694 restore_access_regs(S390_lowcore.access_regs_save_area); 694 restore_access_regs(S390_lowcore.access_regs_save_area);
695 __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 695 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
696 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 696 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
697 cpu_init(); 697 cpu_init();
698 preempt_disable(); 698 preempt_disable();
699 init_cpu_timer(); 699 init_cpu_timer();
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 05d75c413137..a84476f2a9bb 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -84,8 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
84 */ 84 */
85static void vdso_init_data(struct vdso_data *vd) 85static void vdso_init_data(struct vdso_data *vd)
86{ 86{
87 vd->ectg_available = 87 vd->ectg_available = test_facility(31);
88 s390_user_mode != HOME_SPACE_MODE && test_facility(31);
89} 88}
90 89
91#ifdef CONFIG_64BIT 90#ifdef CONFIG_64BIT
@@ -102,7 +101,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
102 101
103 lowcore->vdso_per_cpu_data = __LC_PASTE; 102 lowcore->vdso_per_cpu_data = __LC_PASTE;
104 103
105 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 104 if (!vdso_enabled)
106 return 0; 105 return 0;
107 106
108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 107 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -147,7 +146,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
147 unsigned long segment_table, page_table, page_frame; 146 unsigned long segment_table, page_table, page_frame;
148 u32 *psal, *aste; 147 u32 *psal, *aste;
149 148
150 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 149 if (!vdso_enabled)
151 return; 150 return;
152 151
153 psal = (u32 *)(addr_t) lowcore->paste[4]; 152 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -165,7 +164,7 @@ static void vdso_init_cr5(void)
165{ 164{
166 unsigned long cr5; 165 unsigned long cr5;
167 166
168 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 167 if (!vdso_enabled)
169 return; 168 return;
170 cr5 = offsetof(struct _lowcore, paste); 169 cr5 = offsetof(struct _lowcore, paste);
171 __ctl_load(cr5, 5, 5); 170 __ctl_load(cr5, 5, 5);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index abcfab55f99b..e312c48a1c40 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -161,7 +161,7 @@ void __kprobes vtime_stop_cpu(void)
161 trace_hardirqs_on(); 161 trace_hardirqs_on();
162 162
163 /* Wait for external, I/O or machine check interrupt. */ 163 /* Wait for external, I/O or machine check interrupt. */
164 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | 164 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
165 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 165 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
166 idle->nohz_delay = 0; 166 idle->nohz_delay = 0;
167 167
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index e506c5fb6f6a..b068729e50ac 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for s390-specific library files.. 2# Makefile for s390-specific library files..
3# 3#
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o find.o 5lib-y += delay.o string.o uaccess_pt.o find.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
7obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_64BIT) += uaccess_mvcos.o 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 1829742bf479..4b7993bf69b9 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -65,13 +65,6 @@ static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
65 return size; 65 return size;
66} 66}
67 67
68static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
69{
70 if (size <= 256)
71 return copy_from_user_std(size, ptr, x);
72 return copy_from_user_mvcos(size, ptr, x);
73}
74
75static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 68static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
76{ 69{
77 register unsigned long reg0 asm("0") = 0x810000UL; 70 register unsigned long reg0 asm("0") = 0x810000UL;
@@ -101,14 +94,6 @@ static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
101 return size; 94 return size;
102} 95}
103 96
104static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
105 const void *x)
106{
107 if (size <= 256)
108 return copy_to_user_std(size, ptr, x);
109 return copy_to_user_mvcos(size, ptr, x);
110}
111
112static size_t copy_in_user_mvcos(size_t size, void __user *to, 97static size_t copy_in_user_mvcos(size_t size, void __user *to,
113 const void __user *from) 98 const void __user *from)
114{ 99{
@@ -201,23 +186,8 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
201} 186}
202 187
203struct uaccess_ops uaccess_mvcos = { 188struct uaccess_ops uaccess_mvcos = {
204 .copy_from_user = copy_from_user_mvcos_check,
205 .copy_from_user_small = copy_from_user_std,
206 .copy_to_user = copy_to_user_mvcos_check,
207 .copy_to_user_small = copy_to_user_std,
208 .copy_in_user = copy_in_user_mvcos,
209 .clear_user = clear_user_mvcos,
210 .strnlen_user = strnlen_user_std,
211 .strncpy_from_user = strncpy_from_user_std,
212 .futex_atomic_op = futex_atomic_op_std,
213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
214};
215
216struct uaccess_ops uaccess_mvcos_switch = {
217 .copy_from_user = copy_from_user_mvcos, 189 .copy_from_user = copy_from_user_mvcos,
218 .copy_from_user_small = copy_from_user_mvcos,
219 .copy_to_user = copy_to_user_mvcos, 190 .copy_to_user = copy_to_user_mvcos,
220 .copy_to_user_small = copy_to_user_mvcos,
221 .copy_in_user = copy_in_user_mvcos, 191 .copy_in_user = copy_in_user_mvcos,
222 .clear_user = clear_user_mvcos, 192 .clear_user = clear_user_mvcos,
223 .strnlen_user = strnlen_user_mvcos, 193 .strnlen_user = strnlen_user_mvcos,
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 1694d738b175..97e03caf7825 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -461,9 +461,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
461 461
462struct uaccess_ops uaccess_pt = { 462struct uaccess_ops uaccess_pt = {
463 .copy_from_user = copy_from_user_pt, 463 .copy_from_user = copy_from_user_pt,
464 .copy_from_user_small = copy_from_user_pt,
465 .copy_to_user = copy_to_user_pt, 464 .copy_to_user = copy_to_user_pt,
466 .copy_to_user_small = copy_to_user_pt,
467 .copy_in_user = copy_in_user_pt, 465 .copy_in_user = copy_in_user_pt,
468 .clear_user = clear_user_pt, 466 .clear_user = clear_user_pt,
469 .strnlen_user = strnlen_user_pt, 467 .strnlen_user = strnlen_user_pt,
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
deleted file mode 100644
index 4a75d475b06a..000000000000
--- a/arch/s390/lib/uaccess_std.c
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 * Standard user space access functions based on mvcp/mvcs and doing
3 * interesting things in the secondary space mode.
4 *
5 * Copyright IBM Corp. 2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
8 */
9
10#include <linux/errno.h>
11#include <linux/mm.h>
12#include <linux/uaccess.h>
13#include <asm/futex.h>
14#include "uaccess.h"
15
16#ifndef CONFIG_64BIT
17#define AHI "ahi"
18#define ALR "alr"
19#define CLR "clr"
20#define LHI "lhi"
21#define SLR "slr"
22#else
23#define AHI "aghi"
24#define ALR "algr"
25#define CLR "clgr"
26#define LHI "lghi"
27#define SLR "slgr"
28#endif
29
30size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
31{
32 unsigned long tmp1, tmp2;
33
34 tmp1 = -256UL;
35 asm volatile(
36 "0: mvcp 0(%0,%2),0(%1),%3\n"
37 "10:jz 8f\n"
38 "1:"ALR" %0,%3\n"
39 " la %1,256(%1)\n"
40 " la %2,256(%2)\n"
41 "2: mvcp 0(%0,%2),0(%1),%3\n"
42 "11:jnz 1b\n"
43 " j 8f\n"
44 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
45 " "LHI" %3,-4096\n"
46 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
47 " "SLR" %4,%1\n"
48 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
49 " jnh 5f\n"
50 "4: mvcp 0(%4,%2),0(%1),%3\n"
51 "12:"SLR" %0,%4\n"
52 " "ALR" %2,%4\n"
53 "5:"LHI" %4,-1\n"
54 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
55 " bras %3,7f\n" /* memset loop */
56 " xc 0(1,%2),0(%2)\n"
57 "6: xc 0(256,%2),0(%2)\n"
58 " la %2,256(%2)\n"
59 "7:"AHI" %4,-256\n"
60 " jnm 6b\n"
61 " ex %4,0(%3)\n"
62 " j 9f\n"
63 "8:"SLR" %0,%0\n"
64 "9: \n"
65 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
66 EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
67 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
68 : : "cc", "memory");
69 return size;
70}
71
72static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
73 void *x)
74{
75 if (size <= 1024)
76 return copy_from_user_std(size, ptr, x);
77 return copy_from_user_pt(size, ptr, x);
78}
79
80size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
81{
82 unsigned long tmp1, tmp2;
83
84 tmp1 = -256UL;
85 asm volatile(
86 "0: mvcs 0(%0,%1),0(%2),%3\n"
87 "7: jz 5f\n"
88 "1:"ALR" %0,%3\n"
89 " la %1,256(%1)\n"
90 " la %2,256(%2)\n"
91 "2: mvcs 0(%0,%1),0(%2),%3\n"
92 "8: jnz 1b\n"
93 " j 5f\n"
94 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
95 " "LHI" %3,-4096\n"
96 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
97 " "SLR" %4,%1\n"
98 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
99 " jnh 6f\n"
100 "4: mvcs 0(%4,%1),0(%2),%3\n"
101 "9:"SLR" %0,%4\n"
102 " j 6f\n"
103 "5:"SLR" %0,%0\n"
104 "6: \n"
105 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
106 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
107 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
108 : : "cc", "memory");
109 return size;
110}
111
112static size_t copy_to_user_std_check(size_t size, void __user *ptr,
113 const void *x)
114{
115 if (size <= 1024)
116 return copy_to_user_std(size, ptr, x);
117 return copy_to_user_pt(size, ptr, x);
118}
119
120static size_t copy_in_user_std(size_t size, void __user *to,
121 const void __user *from)
122{
123 unsigned long tmp1;
124
125 asm volatile(
126 " sacf 256\n"
127 " "AHI" %0,-1\n"
128 " jo 5f\n"
129 " bras %3,3f\n"
130 "0:"AHI" %0,257\n"
131 "1: mvc 0(1,%1),0(%2)\n"
132 " la %1,1(%1)\n"
133 " la %2,1(%2)\n"
134 " "AHI" %0,-1\n"
135 " jnz 1b\n"
136 " j 5f\n"
137 "2: mvc 0(256,%1),0(%2)\n"
138 " la %1,256(%1)\n"
139 " la %2,256(%2)\n"
140 "3:"AHI" %0,-256\n"
141 " jnm 2b\n"
142 "4: ex %0,1b-0b(%3)\n"
143 "5: "SLR" %0,%0\n"
144 "6: sacf 0\n"
145 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
146 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
147 : : "cc", "memory");
148 return size;
149}
150
151static size_t clear_user_std(size_t size, void __user *to)
152{
153 unsigned long tmp1, tmp2;
154
155 asm volatile(
156 " sacf 256\n"
157 " "AHI" %0,-1\n"
158 " jo 5f\n"
159 " bras %3,3f\n"
160 " xc 0(1,%1),0(%1)\n"
161 "0:"AHI" %0,257\n"
162 " la %2,255(%1)\n" /* %2 = ptr + 255 */
163 " srl %2,12\n"
164 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
165 " "SLR" %2,%1\n"
166 " "CLR" %0,%2\n" /* clear crosses next page boundary? */
167 " jnh 5f\n"
168 " "AHI" %2,-1\n"
169 "1: ex %2,0(%3)\n"
170 " "AHI" %2,1\n"
171 " "SLR" %0,%2\n"
172 " j 5f\n"
173 "2: xc 0(256,%1),0(%1)\n"
174 " la %1,256(%1)\n"
175 "3:"AHI" %0,-256\n"
176 " jnm 2b\n"
177 "4: ex %0,0(%3)\n"
178 "5: "SLR" %0,%0\n"
179 "6: sacf 0\n"
180 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
181 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
182 : : "cc", "memory");
183 return size;
184}
185
186size_t strnlen_user_std(size_t size, const char __user *src)
187{
188 register unsigned long reg0 asm("0") = 0UL;
189 unsigned long tmp1, tmp2;
190
191 if (unlikely(!size))
192 return 0;
193 asm volatile(
194 " la %2,0(%1)\n"
195 " la %3,0(%0,%1)\n"
196 " "SLR" %0,%0\n"
197 " sacf 256\n"
198 "0: srst %3,%2\n"
199 " jo 0b\n"
200 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
201 " "SLR" %0,%1\n"
202 "1: sacf 0\n"
203 EX_TABLE(0b,1b)
204 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
205 : "d" (reg0) : "cc", "memory");
206 return size;
207}
208
209size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst)
210{
211 size_t done, len, offset, len_str;
212
213 if (unlikely(!count))
214 return 0;
215 done = 0;
216 do {
217 offset = (size_t)src & ~PAGE_MASK;
218 len = min(count - done, PAGE_SIZE - offset);
219 if (copy_from_user_std(len, src, dst))
220 return -EFAULT;
221 len_str = strnlen(dst, len);
222 done += len_str;
223 src += len_str;
224 dst += len_str;
225 } while ((len_str == len) && (done < count));
226 return done;
227}
228
229#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
230 asm volatile( \
231 " sacf 256\n" \
232 "0: l %1,0(%6)\n" \
233 "1:"insn \
234 "2: cs %1,%2,0(%6)\n" \
235 "3: jl 1b\n" \
236 " lhi %0,0\n" \
237 "4: sacf 0\n" \
238 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
239 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
240 "=m" (*uaddr) \
241 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
242 "m" (*uaddr) : "cc");
243
244int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
245{
246 int oldval = 0, newval, ret;
247
248 switch (op) {
249 case FUTEX_OP_SET:
250 __futex_atomic_op("lr %2,%5\n",
251 ret, oldval, newval, uaddr, oparg);
252 break;
253 case FUTEX_OP_ADD:
254 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
255 ret, oldval, newval, uaddr, oparg);
256 break;
257 case FUTEX_OP_OR:
258 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
259 ret, oldval, newval, uaddr, oparg);
260 break;
261 case FUTEX_OP_ANDN:
262 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
263 ret, oldval, newval, uaddr, oparg);
264 break;
265 case FUTEX_OP_XOR:
266 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
267 ret, oldval, newval, uaddr, oparg);
268 break;
269 default:
270 ret = -ENOSYS;
271 }
272 *old = oldval;
273 return ret;
274}
275
276int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
277 u32 oldval, u32 newval)
278{
279 int ret;
280
281 asm volatile(
282 " sacf 256\n"
283 "0: cs %1,%4,0(%5)\n"
284 "1: la %0,0\n"
285 "2: sacf 0\n"
286 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
287 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
288 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
289 : "cc", "memory" );
290 *uval = oldval;
291 return ret;
292}
293
294struct uaccess_ops uaccess_std = {
295 .copy_from_user = copy_from_user_std_check,
296 .copy_from_user_small = copy_from_user_std,
297 .copy_to_user = copy_to_user_std_check,
298 .copy_to_user_small = copy_to_user_std,
299 .copy_in_user = copy_in_user_std,
300 .clear_user = clear_user_std,
301 .strnlen_user = strnlen_user_std,
302 .strncpy_from_user = strncpy_from_user_std,
303 .futex_atomic_op = futex_atomic_op_std,
304 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
305};
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fc6679210d83..8f29762671cf 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -115,13 +115,8 @@ static inline int user_space_fault(unsigned long trans_exc_code)
115 if (trans_exc_code == 2) 115 if (trans_exc_code == 2)
116 /* Access via secondary space, set_fs setting decides */ 116 /* Access via secondary space, set_fs setting decides */
117 return current->thread.mm_segment.ar4; 117 return current->thread.mm_segment.ar4;
118 if (s390_user_mode == HOME_SPACE_MODE)
119 /* User space if the access has been done via home space. */
120 return trans_exc_code == 3;
121 /* 118 /*
122 * If the user space is not the home space the kernel runs in home 119 * Access via primary space or access register is from user space
123 * space. Access via secondary space has already been covered,
124 * access via primary space or access register is from user space
125 * and access via home space is from the kernel. 120 * and access via home space is from the kernel.
126 */ 121 */
127 return trans_exc_code != 3; 122 return trans_exc_code != 3;
@@ -471,7 +466,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
471 int access, fault; 466 int access, fault;
472 467
473 /* Emulate a uaccess fault from kernel mode. */ 468 /* Emulate a uaccess fault from kernel mode. */
474 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; 469 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
475 if (!irqs_disabled()) 470 if (!irqs_disabled())
476 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; 471 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
477 regs.psw.addr = (unsigned long) __builtin_return_address(0); 472 regs.psw.addr = (unsigned long) __builtin_return_address(0);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index de8cbc30dcd1..94f37a9fb1e5 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1157,10 +1157,6 @@ int s390_enable_sie(void)
1157 struct mm_struct *mm = tsk->mm; 1157 struct mm_struct *mm = tsk->mm;
1158 struct mmu_gather tlb; 1158 struct mmu_gather tlb;
1159 1159
1160 /* Do we have switched amode? If no, we cannot do sie */
1161 if (s390_user_mode == HOME_SPACE_MODE)
1162 return -EINVAL;
1163
1164 /* Do we have pgstes? if yes, we are done */ 1160 /* Do we have pgstes? if yes, we are done */
1165 if (mm_has_pgste(tsk->mm)) 1161 if (mm_has_pgste(tsk->mm))
1166 return 0; 1162 return 0;