aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2007-02-05 15:18:17 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-02-05 15:18:17 -0500
commitc1821c2e9711adc3cd298a16b7237c92a2cee78d (patch)
tree9155b089db35a37d95863125ea4c5f918bd7801b /arch
parent86aa9fc2456d8a662f299a70bdb70987209170f0 (diff)
[S390] noexec protection
This provides a noexec protection on s390 hardware. Our hardware does not have any bits left in the pte for a hw noexec bit, so this is a different approach using shadow page tables and a special addressing mode that allows separate address spaces for code and data. As a special feature of our "secondary-space" addressing mode, separate page tables can be specified for the translation of data addresses (storage operands) and instruction addresses. The shadow page table is used for the instruction addresses and the standard page table for the data addresses. The shadow page table is linked to the standard page table by a pointer in page->lru.next of the struct page corresponding to the page that contains the standard page table (since page->private is not really private with the pte_lock and the page table pages are not in the LRU list). Depending on the software bits of a pte, it is either inserted into both page tables or just into the standard (data) page table. Pages of a vma that does not have the VM_EXEC bit set get mapped only in the data address space. Any try to execute code on such a page will cause a page translation exception. The standard reaction to this is a SIGSEGV with two exceptions: the two system call opcodes 0x0a77 (sys_sigreturn) and 0x0aad (sys_rt_sigreturn) are allowed. They are stored by the kernel to the signal stack frame. Unfortunately, the signal return mechanism cannot be modified to use an SA_RESTORER because the exception unwinding code depends on the system call opcode stored behind the signal stack frame. This feature requires that user space is executed in secondary-space mode and the kernel in home-space mode, which means that the addressing modes need to be switched and that the noexec protection only works for user space. After switching the addressing modes, we cannot use the mvcp/mvcs instructions anymore to copy between kernel and user space. A new mvcos instruction has been added to the z9 EC/BC hardware which allows to copy between arbitrary address spaces, but on older hardware the page tables need to be walked manually. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig25
-rw-r--r--arch/s390/defconfig2
-rw-r--r--arch/s390/kernel/compat_linux.c6
-rw-r--r--arch/s390/kernel/compat_linux.h31
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/ipl.c4
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/ptrace.c10
-rw-r--r--arch/s390/kernel/setup.c98
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/lib/uaccess_mvcos.c53
-rw-r--r--arch/s390/lib/uaccess_pt.c320
-rw-r--r--arch/s390/mm/fault.c88
-rw-r--r--arch/s390/mm/init.c6
-rw-r--r--arch/s390/mm/vmem.c14
16 files changed, 606 insertions, 61 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 12272361c018..5c7e981c115b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -134,6 +134,31 @@ config AUDIT_ARCH
134 bool 134 bool
135 default y 135 default y
136 136
137config S390_SWITCH_AMODE
138 bool "Switch kernel/user addressing modes"
139 help
140 This option allows to switch the addressing modes of kernel and user
141 space. The kernel parameter switch_amode=on will enable this feature,
142 default is disabled. Enabling this (via kernel parameter) on machines
143 earlier than IBM System z9-109 EC/BC will reduce system performance.
144
145 Note that this option will also be selected by selecting the execute
146 protection option below. Enabling the execute protection via the
147 noexec kernel parameter will also switch the addressing modes,
148 independent of the switch_amode kernel parameter.
149
150
151config S390_EXEC_PROTECT
152 bool "Data execute protection"
153 select S390_SWITCH_AMODE
154 help
155 This option allows to enable a buffer overflow protection for user
156 space programs and it also selects the addressing mode option above.
157 The kernel parameter noexec=on will enable this feature and also
158 switch the addressing modes, default is disabled. Enabling this (via
159 kernel parameter) on machines earlier than IBM System z9-109 EC/BC
160 will reduce system performance.
161
137comment "Code generation options" 162comment "Code generation options"
138 163
139choice 164choice
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 46bb38515b0d..12eb97f9c1d1 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -108,6 +108,8 @@ CONFIG_DEFAULT_MIGRATION_COST=1000000
108CONFIG_COMPAT=y 108CONFIG_COMPAT=y
109CONFIG_SYSVIPC_COMPAT=y 109CONFIG_SYSVIPC_COMPAT=y
110CONFIG_AUDIT_ARCH=y 110CONFIG_AUDIT_ARCH=y
111CONFIG_S390_SWITCH_AMODE=y
112CONFIG_S390_EXEC_PROTECT=y
111 113
112# 114#
113# Code generation options 115# Code generation options
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index cf84d697daed..666bb6daa148 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -69,6 +69,12 @@
69 69
70#include "compat_linux.h" 70#include "compat_linux.h"
71 71
72long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
73 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
74 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
75long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
76 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
77 PSW32_MASK_PSTATE);
72 78
73/* For this source file, we want overflow handling. */ 79/* For this source file, we want overflow handling. */
74 80
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 1a18e29668ef..e89f8c0c42a0 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -115,37 +115,6 @@ typedef struct
115 __u32 addr; 115 __u32 addr;
116} _psw_t32 __attribute__ ((aligned(8))); 116} _psw_t32 __attribute__ ((aligned(8)));
117 117
118#define PSW32_MASK_PER 0x40000000UL
119#define PSW32_MASK_DAT 0x04000000UL
120#define PSW32_MASK_IO 0x02000000UL
121#define PSW32_MASK_EXT 0x01000000UL
122#define PSW32_MASK_KEY 0x00F00000UL
123#define PSW32_MASK_MCHECK 0x00040000UL
124#define PSW32_MASK_WAIT 0x00020000UL
125#define PSW32_MASK_PSTATE 0x00010000UL
126#define PSW32_MASK_ASC 0x0000C000UL
127#define PSW32_MASK_CC 0x00003000UL
128#define PSW32_MASK_PM 0x00000f00UL
129
130#define PSW32_ADDR_AMODE31 0x80000000UL
131#define PSW32_ADDR_INSN 0x7FFFFFFFUL
132
133#define PSW32_BASE_BITS 0x00080000UL
134
135#define PSW32_ASC_PRIMARY 0x00000000UL
136#define PSW32_ASC_ACCREG 0x00004000UL
137#define PSW32_ASC_SECONDARY 0x00008000UL
138#define PSW32_ASC_HOME 0x0000C000UL
139
140#define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
141 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
142 PSW32_MASK_PSTATE)
143
144#define PSW32_MASK_MERGE(CURRENT,NEW) \
145 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
146 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
147
148
149typedef struct 118typedef struct
150{ 119{
151 _psw_t32 psw; 120 _psw_t32 psw;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 8d17b2ab6f21..887a9881d0d0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -298,7 +298,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
298 _s390_regs_common32 regs32; 298 _s390_regs_common32 regs32;
299 int err, i; 299 int err, i;
300 300
301 regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS, 301 regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
302 (__u32)(regs->psw.mask >> 32)); 302 (__u32)(regs->psw.mask >> 32));
303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; 303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
304 for (i = 0; i < NUM_GPRS; i++) 304 for (i = 0; i < NUM_GPRS; i++)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 9e9972e8a52b..2c91226e1d40 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1016,12 +1016,12 @@ void s390_reset_system(void)
1016 __ctl_clear_bit(0,28); 1016 __ctl_clear_bit(0,28);
1017 1017
1018 /* Set new machine check handler */ 1018 /* Set new machine check handler */
1019 S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1019 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1020 S390_lowcore.mcck_new_psw.addr = 1020 S390_lowcore.mcck_new_psw.addr =
1021 PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler; 1021 PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler;
1022 1022
1023 /* Set new program check handler */ 1023 /* Set new program check handler */
1024 S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1024 S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1025 S390_lowcore.program_new_psw.addr = 1025 S390_lowcore.program_new_psw.addr =
1026 PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler; 1026 PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler;
1027 1027
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6603fbb41d07..5acfac654f9d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -144,7 +144,7 @@ static void default_idle(void)
144 144
145 trace_hardirqs_on(); 145 trace_hardirqs_on();
146 /* Wait for external, I/O or machine check interrupt. */ 146 /* Wait for external, I/O or machine check interrupt. */
147 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | 147 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
148 PSW_MASK_IO | PSW_MASK_EXT); 148 PSW_MASK_IO | PSW_MASK_EXT);
149} 149}
150 150
@@ -190,7 +190,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
190 struct pt_regs regs; 190 struct pt_regs regs;
191 191
192 memset(&regs, 0, sizeof(regs)); 192 memset(&regs, 0, sizeof(regs));
193 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 193 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; 194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
195 regs.gprs[9] = (unsigned long) fn; 195 regs.gprs[9] = (unsigned long) fn;
196 regs.gprs[10] = (unsigned long) arg; 196 regs.gprs[10] = (unsigned long) arg;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 29fde70090fe..2a8f0872ea8b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -230,9 +230,9 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
230 */ 230 */
231 if (addr == (addr_t) &dummy->regs.psw.mask && 231 if (addr == (addr_t) &dummy->regs.psw.mask &&
232#ifdef CONFIG_COMPAT 232#ifdef CONFIG_COMPAT
233 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && 233 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
234#endif 234#endif
235 data != PSW_MASK_MERGE(PSW_USER_BITS, data)) 235 data != PSW_MASK_MERGE(psw_user_bits, data))
236 /* Invalid psw mask. */ 236 /* Invalid psw mask. */
237 return -EINVAL; 237 return -EINVAL;
238#ifndef CONFIG_64BIT 238#ifndef CONFIG_64BIT
@@ -393,7 +393,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
393 if (addr == (addr_t) &dummy32->regs.psw.mask) { 393 if (addr == (addr_t) &dummy32->regs.psw.mask) {
394 /* Fake a 31 bit psw mask. */ 394 /* Fake a 31 bit psw mask. */
395 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); 395 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
396 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); 396 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
397 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 397 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
398 /* Fake a 31 bit psw address. */ 398 /* Fake a 31 bit psw address. */
399 tmp = (__u32) task_pt_regs(child)->psw.addr | 399 tmp = (__u32) task_pt_regs(child)->psw.addr |
@@ -468,11 +468,11 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
468 */ 468 */
469 if (addr == (addr_t) &dummy32->regs.psw.mask) { 469 if (addr == (addr_t) &dummy32->regs.psw.mask) {
470 /* Build a 64 bit psw mask from 31 bit mask. */ 470 /* Build a 64 bit psw mask from 31 bit mask. */
471 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) 471 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
472 /* Invalid psw mask. */ 472 /* Invalid psw mask. */
473 return -EINVAL; 473 return -EINVAL;
474 task_pt_regs(child)->psw.mask = 474 task_pt_regs(child)->psw.mask =
475 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); 475 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
476 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 476 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
477 /* Build a 64 bit psw address from 31 bit address. */ 477 /* Build a 64 bit psw address from 31 bit address. */
478 task_pt_regs(child)->psw.addr = 478 task_pt_regs(child)->psw.addr =
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 25bf7277d311..b1b9a931237d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -50,6 +50,13 @@
50#include <asm/page.h> 50#include <asm/page.h>
51#include <asm/ptrace.h> 51#include <asm/ptrace.h>
52#include <asm/sections.h> 52#include <asm/sections.h>
53#include <asm/compat.h>
54
55long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
56 PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
57long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
58 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
59 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
53 60
54/* 61/*
55 * User copy operations. 62 * User copy operations.
@@ -383,6 +390,84 @@ static int __init early_parse_ipldelay(char *p)
383} 390}
384early_param("ipldelay", early_parse_ipldelay); 391early_param("ipldelay", early_parse_ipldelay);
385 392
393#ifdef CONFIG_S390_SWITCH_AMODE
394unsigned int switch_amode = 0;
395EXPORT_SYMBOL_GPL(switch_amode);
396
397static inline void set_amode_and_uaccess(unsigned long user_amode,
398 unsigned long user32_amode)
399{
400 psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
401 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
402 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
403#ifdef CONFIG_COMPAT
404 psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
405 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
406 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
407 psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
408 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
409 PSW32_MASK_PSTATE;
410#endif
411 psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
412 PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
413
414 if (MACHINE_HAS_MVCOS) {
415 printk("mvcos available.\n");
416 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
417 } else {
418 printk("mvcos not available.\n");
419 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
420 }
421}
422
423/*
424 * Switch kernel/user addressing modes?
425 */
426static int __init early_parse_switch_amode(char *p)
427{
428 switch_amode = 1;
429 return 0;
430}
431early_param("switch_amode", early_parse_switch_amode);
432
433#else /* CONFIG_S390_SWITCH_AMODE */
434static inline void set_amode_and_uaccess(unsigned long user_amode,
435 unsigned long user32_amode)
436{
437}
438#endif /* CONFIG_S390_SWITCH_AMODE */
439
440#ifdef CONFIG_S390_EXEC_PROTECT
441unsigned int s390_noexec = 0;
442EXPORT_SYMBOL_GPL(s390_noexec);
443
444/*
445 * Enable execute protection?
446 */
447static int __init early_parse_noexec(char *p)
448{
449 if (!strncmp(p, "off", 3))
450 return 0;
451 switch_amode = 1;
452 s390_noexec = 1;
453 return 0;
454}
455early_param("noexec", early_parse_noexec);
456#endif /* CONFIG_S390_EXEC_PROTECT */
457
458static void setup_addressing_mode(void)
459{
460 if (s390_noexec) {
461 printk("S390 execute protection active, ");
462 set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
463 return;
464 }
465 if (switch_amode) {
466 printk("S390 address spaces switched, ");
467 set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
468 }
469}
470
386static void __init 471static void __init
387setup_lowcore(void) 472setup_lowcore(void)
388{ 473{
@@ -399,19 +484,21 @@ setup_lowcore(void)
399 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 484 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
400 lc->restart_psw.addr = 485 lc->restart_psw.addr =
401 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 486 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
402 lc->external_new_psw.mask = PSW_KERNEL_BITS; 487 if (switch_amode)
488 lc->restart_psw.mask |= PSW_ASC_HOME;
489 lc->external_new_psw.mask = psw_kernel_bits;
403 lc->external_new_psw.addr = 490 lc->external_new_psw.addr =
404 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 491 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
405 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 492 lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
406 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 493 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
407 lc->program_new_psw.mask = PSW_KERNEL_BITS; 494 lc->program_new_psw.mask = psw_kernel_bits;
408 lc->program_new_psw.addr = 495 lc->program_new_psw.addr =
409 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 496 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
410 lc->mcck_new_psw.mask = 497 lc->mcck_new_psw.mask =
411 PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; 498 psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
412 lc->mcck_new_psw.addr = 499 lc->mcck_new_psw.addr =
413 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 500 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
414 lc->io_new_psw.mask = PSW_KERNEL_BITS; 501 lc->io_new_psw.mask = psw_kernel_bits;
415 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 502 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
416 lc->ipl_device = S390_lowcore.ipl_device; 503 lc->ipl_device = S390_lowcore.ipl_device;
417 lc->jiffy_timer = -1LL; 504 lc->jiffy_timer = -1LL;
@@ -645,6 +732,7 @@ setup_arch(char **cmdline_p)
645 parse_early_param(); 732 parse_early_param();
646 733
647 setup_memory_end(); 734 setup_memory_end();
735 setup_addressing_mode();
648 setup_memory(); 736 setup_memory();
649 setup_resources(); 737 setup_resources();
650 setup_lowcore(); 738 setup_lowcore();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4c8a7954ef48..554f9cf7499c 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -119,7 +119,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
119 119
120 /* Copy a 'clean' PSW mask to the user to avoid leaking 120 /* Copy a 'clean' PSW mask to the user to avoid leaking
121 information about whether PER is currently on. */ 121 information about whether PER is currently on. */
122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); 122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
123 user_sregs.regs.psw.addr = regs->psw.addr; 123 user_sregs.regs.psw.addr = regs->psw.addr;
124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
125 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 125 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 3cb7e1032072..cb155d9fd749 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -244,7 +244,7 @@ static inline void do_wait_for_stop(void)
244void smp_send_stop(void) 244void smp_send_stop(void)
245{ 245{
246 /* Disable all interrupts/machine checks */ 246 /* Disable all interrupts/machine checks */
247 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 247 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
248 248
249 /* write magic number to zero page (absolute 0) */ 249 /* write magic number to zero page (absolute 0) */
250 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 250 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 78c48f88f5f7..6d8772339d76 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -162,6 +162,44 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
166{
167 char buf[256];
168 int rc;
169 size_t done, len, len_str;
170
171 done = 0;
172 do {
173 len = min(count - done, (size_t) 256);
174 rc = uaccess.copy_from_user(len, src + done, buf);
175 if (unlikely(rc == len))
176 return 0;
177 len -= rc;
178 len_str = strnlen(buf, len);
179 done += len_str;
180 } while ((len_str == len) && (done < count));
181 return done + 1;
182}
183
184static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
185 char *dst)
186{
187 int rc;
188 size_t done, len, len_str;
189
190 done = 0;
191 do {
192 len = min(count - done, (size_t) 4096);
193 rc = uaccess.copy_from_user(len, src + done, dst);
194 if (unlikely(rc == len))
195 return -EFAULT;
196 len -= rc;
197 len_str = strnlen(dst, len);
198 done += len_str;
199 } while ((len_str == len) && (done < count));
200 return done;
201}
202
165struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
166 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
167 .copy_from_user_small = copy_from_user_std, 205 .copy_from_user_small = copy_from_user_std,
@@ -174,3 +212,18 @@ struct uaccess_ops uaccess_mvcos = {
174 .futex_atomic_op = futex_atomic_op_std, 212 .futex_atomic_op = futex_atomic_op_std,
175 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
176}; 214};
215
216#ifdef CONFIG_S390_SWITCH_AMODE
217struct uaccess_ops uaccess_mvcos_switch = {
218 .copy_from_user = copy_from_user_mvcos,
219 .copy_from_user_small = copy_from_user_mvcos,
220 .copy_to_user = copy_to_user_mvcos,
221 .copy_to_user_small = copy_to_user_mvcos,
222 .copy_in_user = copy_in_user_mvcos,
223 .clear_user = clear_user_mvcos,
224 .strnlen_user = strnlen_user_mvcos,
225 .strncpy_from_user = strncpy_from_user_mvcos,
226 .futex_atomic_op = futex_atomic_op_pt,
227 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
228};
229#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 24ead559c7bb..637192fa7c9a 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/lib/uaccess_pt.c 2 * arch/s390/lib/uaccess_pt.c
3 * 3 *
4 * User access functions based on page table walks. 4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
5 * 6 *
6 * Copyright IBM Corp. 2006 7 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
@@ -134,6 +135,49 @@ fault:
134 goto retry; 135 goto retry;
135} 136}
136 137
138/*
139 * Do DAT for user address by page table walk, return kernel address.
140 * This function needs to be called with current->mm->page_table_lock held.
141 */
142static inline unsigned long __dat_user_addr(unsigned long uaddr)
143{
144 struct mm_struct *mm = current->mm;
145 unsigned long pfn, ret;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 pte_t *pte;
149 int rc;
150
151 ret = 0;
152retry:
153 pgd = pgd_offset(mm, uaddr);
154 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
155 goto fault;
156
157 pmd = pmd_offset(pgd, uaddr);
158 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
159 goto fault;
160
161 pte = pte_offset_map(pmd, uaddr);
162 if (!pte || !pte_present(*pte))
163 goto fault;
164
165 pfn = pte_pfn(*pte);
166 if (!pfn_valid(pfn))
167 goto out;
168
169 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
170out:
171 return ret;
172fault:
173 spin_unlock(&mm->page_table_lock);
174 rc = __handle_fault(mm, uaddr, 0);
175 spin_lock(&mm->page_table_lock);
176 if (rc)
177 goto out;
178 goto retry;
179}
180
137size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 181size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
138{ 182{
139 size_t rc; 183 size_t rc;
@@ -156,3 +200,277 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
156 } 200 }
157 return __user_copy_pt((unsigned long) to, (void *) from, n, 1); 201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
158} 202}
203
204static size_t clear_user_pt(size_t n, void __user *to)
205{
206 long done, size, ret;
207
208 if (segment_eq(get_fs(), KERNEL_DS)) {
209 memset((void __kernel __force *) to, 0, n);
210 return 0;
211 }
212 done = 0;
213 do {
214 if (n - done > PAGE_SIZE)
215 size = PAGE_SIZE;
216 else
217 size = n - done;
218 ret = __user_copy_pt((unsigned long) to + done,
219 &empty_zero_page, size, 1);
220 done += size;
221 if (ret)
222 return ret + n - done;
223 } while (done < n);
224 return 0;
225}
226
227static size_t strnlen_user_pt(size_t count, const char __user *src)
228{
229 char *addr;
230 unsigned long uaddr = (unsigned long) src;
231 struct mm_struct *mm = current->mm;
232 unsigned long offset, pfn, done, len;
233 pgd_t *pgd;
234 pmd_t *pmd;
235 pte_t *pte;
236 size_t len_str;
237
238 if (segment_eq(get_fs(), KERNEL_DS))
239 return strnlen((const char __kernel __force *) src, count) + 1;
240 done = 0;
241retry:
242 spin_lock(&mm->page_table_lock);
243 do {
244 pgd = pgd_offset(mm, uaddr);
245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
246 goto fault;
247
248 pmd = pmd_offset(pgd, uaddr);
249 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
250 goto fault;
251
252 pte = pte_offset_map(pmd, uaddr);
253 if (!pte || !pte_present(*pte))
254 goto fault;
255
256 pfn = pte_pfn(*pte);
257 if (!pfn_valid(pfn)) {
258 done = -1;
259 goto out;
260 }
261
262 offset = uaddr & (PAGE_SIZE-1);
263 addr = (char *)(pfn << PAGE_SHIFT) + offset;
264 len = min(count - done, PAGE_SIZE - offset);
265 len_str = strnlen(addr, len);
266 done += len_str;
267 uaddr += len_str;
268 } while ((len_str == len) && (done < count));
269out:
270 spin_unlock(&mm->page_table_lock);
271 return done + 1;
272fault:
273 spin_unlock(&mm->page_table_lock);
274 if (__handle_fault(mm, uaddr, 0)) {
275 return 0;
276 }
277 goto retry;
278}
279
280static size_t strncpy_from_user_pt(size_t count, const char __user *src,
281 char *dst)
282{
283 size_t n = strnlen_user_pt(count, src);
284
285 if (!n)
286 return -EFAULT;
287 if (n > count)
288 n = count;
289 if (segment_eq(get_fs(), KERNEL_DS)) {
290 memcpy(dst, (const char __kernel __force *) src, n);
291 if (dst[n-1] == '\0')
292 return n-1;
293 else
294 return n;
295 }
296 if (__user_copy_pt((unsigned long) src, dst, n, 0))
297 return -EFAULT;
298 if (dst[n-1] == '\0')
299 return n-1;
300 else
301 return n;
302}
303
304static size_t copy_in_user_pt(size_t n, void __user *to,
305 const void __user *from)
306{
307 struct mm_struct *mm = current->mm;
308 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
309 uaddr, done, size;
310 unsigned long uaddr_from = (unsigned long) from;
311 unsigned long uaddr_to = (unsigned long) to;
312 pgd_t *pgd_from, *pgd_to;
313 pmd_t *pmd_from, *pmd_to;
314 pte_t *pte_from, *pte_to;
315 int write_user;
316
317 done = 0;
318retry:
319 spin_lock(&mm->page_table_lock);
320 do {
321 pgd_from = pgd_offset(mm, uaddr_from);
322 if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
323 uaddr = uaddr_from;
324 write_user = 0;
325 goto fault;
326 }
327 pgd_to = pgd_offset(mm, uaddr_to);
328 if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
329 uaddr = uaddr_to;
330 write_user = 1;
331 goto fault;
332 }
333
334 pmd_from = pmd_offset(pgd_from, uaddr_from);
335 if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
336 uaddr = uaddr_from;
337 write_user = 0;
338 goto fault;
339 }
340 pmd_to = pmd_offset(pgd_to, uaddr_to);
341 if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
342 uaddr = uaddr_to;
343 write_user = 1;
344 goto fault;
345 }
346
347 pte_from = pte_offset_map(pmd_from, uaddr_from);
348 if (!pte_from || !pte_present(*pte_from)) {
349 uaddr = uaddr_from;
350 write_user = 0;
351 goto fault;
352 }
353 pte_to = pte_offset_map(pmd_to, uaddr_to);
354 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
355 uaddr = uaddr_to;
356 write_user = 1;
357 goto fault;
358 }
359
360 pfn_from = pte_pfn(*pte_from);
361 if (!pfn_valid(pfn_from))
362 goto out;
363 pfn_to = pte_pfn(*pte_to);
364 if (!pfn_valid(pfn_to))
365 goto out;
366
367 offset_from = uaddr_from & (PAGE_SIZE-1);
368 offset_to = uaddr_from & (PAGE_SIZE-1);
369 offset_max = max(offset_from, offset_to);
370 size = min(n - done, PAGE_SIZE - offset_max);
371
372 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
373 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
374 done += size;
375 uaddr_from += size;
376 uaddr_to += size;
377 } while (done < n);
378out:
379 spin_unlock(&mm->page_table_lock);
380 return n - done;
381fault:
382 spin_unlock(&mm->page_table_lock);
383 if (__handle_fault(mm, uaddr, write_user))
384 return n - done;
385 goto retry;
386}
387
388#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
389 asm volatile("0: l %1,0(%6)\n" \
390 "1: " insn \
391 "2: cs %1,%2,0(%6)\n" \
392 "3: jl 1b\n" \
393 " lhi %0,0\n" \
394 "4:\n" \
395 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
396 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
397 "=m" (*uaddr) \
398 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
399 "m" (*uaddr) : "cc" );
400
401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
402{
403 int oldval = 0, newval, ret;
404
405 spin_lock(&current->mm->page_table_lock);
406 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
407 if (!uaddr) {
408 spin_unlock(&current->mm->page_table_lock);
409 return -EFAULT;
410 }
411 get_page(virt_to_page(uaddr));
412 spin_unlock(&current->mm->page_table_lock);
413 switch (op) {
414 case FUTEX_OP_SET:
415 __futex_atomic_op("lr %2,%5\n",
416 ret, oldval, newval, uaddr, oparg);
417 break;
418 case FUTEX_OP_ADD:
419 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
420 ret, oldval, newval, uaddr, oparg);
421 break;
422 case FUTEX_OP_OR:
423 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
424 ret, oldval, newval, uaddr, oparg);
425 break;
426 case FUTEX_OP_ANDN:
427 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
428 ret, oldval, newval, uaddr, oparg);
429 break;
430 case FUTEX_OP_XOR:
431 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
432 ret, oldval, newval, uaddr, oparg);
433 break;
434 default:
435 ret = -ENOSYS;
436 }
437 put_page(virt_to_page(uaddr));
438 *old = oldval;
439 return ret;
440}
441
442int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
443{
444 int ret;
445
446 spin_lock(&current->mm->page_table_lock);
447 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
448 if (!uaddr) {
449 spin_unlock(&current->mm->page_table_lock);
450 return -EFAULT;
451 }
452 get_page(virt_to_page(uaddr));
453 spin_unlock(&current->mm->page_table_lock);
454 asm volatile(" cs %1,%4,0(%5)\n"
455 "0: lr %0,%1\n"
456 "1:\n"
457 EX_TABLE(0b,1b)
458 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
459 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
460 : "cc", "memory" );
461 put_page(virt_to_page(uaddr));
462 return ret;
463}
464
465struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
467 .copy_from_user_small = copy_from_user_pt,
468 .copy_to_user = copy_to_user_pt,
469 .copy_to_user_small = copy_to_user_pt,
470 .copy_in_user = copy_in_user_pt,
471 .clear_user = clear_user_pt,
472 .strnlen_user = strnlen_user_pt,
473 .strncpy_from_user = strncpy_from_user_pt,
474 .futex_atomic_op = futex_atomic_op_pt,
475 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
476};
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3382e29f34a4..9ff143e87746 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -137,7 +137,9 @@ static int __check_access_register(struct pt_regs *regs, int error_code)
137 137
138/* 138/*
139 * Check which address space the address belongs to. 139 * Check which address space the address belongs to.
140 * Returns 1 for user space and 0 for kernel space. 140 * May return 1 or 2 for user space and 0 for kernel space.
141 * Returns 2 for user space in primary addressing mode with
142 * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
141 */ 143 */
142static inline int check_user_space(struct pt_regs *regs, int error_code) 144static inline int check_user_space(struct pt_regs *regs, int error_code)
143{ 145{
@@ -154,7 +156,7 @@ static inline int check_user_space(struct pt_regs *regs, int error_code)
154 return __check_access_register(regs, error_code); 156 return __check_access_register(regs, error_code);
155 if (descriptor == 2) 157 if (descriptor == 2)
156 return current->thread.mm_segment.ar4; 158 return current->thread.mm_segment.ar4;
157 return descriptor != 0; 159 return ((descriptor != 0) ^ (switch_amode)) << s390_noexec;
158} 160}
159 161
160/* 162/*
@@ -183,6 +185,77 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
183 force_sig_info(SIGSEGV, &si, current); 185 force_sig_info(SIGSEGV, &si, current);
184} 186}
185 187
188#ifdef CONFIG_S390_EXEC_PROTECT
189extern long sys_sigreturn(struct pt_regs *regs);
190extern long sys_rt_sigreturn(struct pt_regs *regs);
191extern long sys32_sigreturn(struct pt_regs *regs);
192extern long sys32_rt_sigreturn(struct pt_regs *regs);
193
194static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs,
195 int rt)
196{
197 up_read(&mm->mmap_sem);
198 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
199#ifdef CONFIG_COMPAT
200 if (test_tsk_thread_flag(current, TIF_31BIT)) {
201 if (rt)
202 sys32_rt_sigreturn(regs);
203 else
204 sys32_sigreturn(regs);
205 return;
206 }
207#endif /* CONFIG_COMPAT */
208 if (rt)
209 sys_rt_sigreturn(regs);
210 else
211 sys_sigreturn(regs);
212 return;
213}
214
215static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
216 unsigned long address, unsigned long error_code)
217{
218 pgd_t *pgd;
219 pmd_t *pmd;
220 pte_t *pte;
221 u16 *instruction;
222 unsigned long pfn, uaddr = regs->psw.addr;
223
224 spin_lock(&mm->page_table_lock);
225 pgd = pgd_offset(mm, uaddr);
226 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
227 goto out_fault;
228 pmd = pmd_offset(pgd, uaddr);
229 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
230 goto out_fault;
231 pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
232 if (!pte || !pte_present(*pte))
233 goto out_fault;
234 pfn = pte_pfn(*pte);
235 if (!pfn_valid(pfn))
236 goto out_fault;
237 spin_unlock(&mm->page_table_lock);
238
239 instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
240 if (*instruction == 0x0a77)
241 do_sigreturn(mm, regs, 0);
242 else if (*instruction == 0x0aad)
243 do_sigreturn(mm, regs, 1);
244 else {
245 printk("- XXX - do_exception: task = %s, primary, NO EXEC "
246 "-> SIGSEGV\n", current->comm);
247 up_read(&mm->mmap_sem);
248 current->thread.prot_addr = address;
249 current->thread.trap_no = error_code;
250 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
251 }
252 return 0;
253out_fault:
254 spin_unlock(&mm->page_table_lock);
255 return -EFAULT;
256}
257#endif /* CONFIG_S390_EXEC_PROTECT */
258
186/* 259/*
187 * This routine handles page faults. It determines the address, 260 * This routine handles page faults. It determines the address,
188 * and the problem, and then passes it off to one of the appropriate 261 * and the problem, and then passes it off to one of the appropriate
@@ -260,6 +333,17 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
260 vma = find_vma(mm, address); 333 vma = find_vma(mm, address);
261 if (!vma) 334 if (!vma)
262 goto bad_area; 335 goto bad_area;
336
337#ifdef CONFIG_S390_EXEC_PROTECT
338 if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC)))
339 if (!signal_return(mm, regs, address, error_code))
340 /*
341 * signal_return() has done an up_read(&mm->mmap_sem)
342 * if it returns 0.
343 */
344 return;
345#endif
346
263 if (vma->vm_start <= address) 347 if (vma->vm_start <= address)
264 goto good_area; 348 goto good_area;
265 if (!(vma->vm_flags & VM_GROWSDOWN)) 349 if (!(vma->vm_flags & VM_GROWSDOWN))
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 0e7e9acab9e1..162a338a5575 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -104,7 +104,7 @@ static void __init setup_ro_region(void)
104 pmd = pmd_offset(pgd, address); 104 pmd = pmd_offset(pgd, address);
105 pte = pte_offset_kernel(pmd, address); 105 pte = pte_offset_kernel(pmd, address);
106 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); 106 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
107 set_pte(pte, new_pte); 107 *pte = new_pte;
108 } 108 }
109} 109}
110 110
@@ -124,11 +124,11 @@ void __init paging_init(void)
124#ifdef CONFIG_64BIT 124#ifdef CONFIG_64BIT
125 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 125 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
126 for (i = 0; i < PTRS_PER_PGD; i++) 126 for (i = 0; i < PTRS_PER_PGD; i++)
127 pgd_clear(pg_dir + i); 127 pgd_clear_kernel(pg_dir + i);
128#else 128#else
129 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 129 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
130 for (i = 0; i < PTRS_PER_PGD; i++) 130 for (i = 0; i < PTRS_PER_PGD; i++)
131 pmd_clear((pmd_t *)(pg_dir + i)); 131 pmd_clear_kernel((pmd_t *)(pg_dir + i));
132#endif 132#endif
133 vmem_map_init(); 133 vmem_map_init();
134 setup_ro_region(); 134 setup_ro_region();
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index cd3d93e8c211..92a565190028 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -82,7 +82,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
82 if (!pmd) 82 if (!pmd)
83 return NULL; 83 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++) 84 for (i = 0; i < PTRS_PER_PMD; i++)
85 pmd_clear(pmd + i); 85 pmd_clear_kernel(pmd + i);
86 return pmd; 86 return pmd;
87} 87}
88 88
@@ -97,7 +97,7 @@ static inline pte_t *vmem_pte_alloc(void)
97 return NULL; 97 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 for (i = 0; i < PTRS_PER_PTE; i++) 99 for (i = 0; i < PTRS_PER_PTE; i++)
100 set_pte(pte + i, empty_pte); 100 pte[i] = empty_pte;
101 return pte; 101 return pte;
102} 102}
103 103
@@ -119,7 +119,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
119 pm_dir = vmem_pmd_alloc(); 119 pm_dir = vmem_pmd_alloc();
120 if (!pm_dir) 120 if (!pm_dir)
121 goto out; 121 goto out;
122 pgd_populate(&init_mm, pg_dir, pm_dir); 122 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
123 } 123 }
124 124
125 pm_dir = pmd_offset(pg_dir, address); 125 pm_dir = pmd_offset(pg_dir, address);
@@ -132,7 +132,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
132 132
133 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); 134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135 set_pte(pt_dir, pte); 135 *pt_dir = pte;
136 } 136 }
137 ret = 0; 137 ret = 0;
138out: 138out:
@@ -161,7 +161,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
161 if (pmd_none(*pm_dir)) 161 if (pmd_none(*pm_dir))
162 continue; 162 continue;
163 pt_dir = pte_offset_kernel(pm_dir, address); 163 pt_dir = pte_offset_kernel(pm_dir, address);
164 set_pte(pt_dir, pte); 164 *pt_dir = pte;
165 } 165 }
166 flush_tlb_kernel_range(start, start + size); 166 flush_tlb_kernel_range(start, start + size);
167} 167}
@@ -191,7 +191,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
191 pm_dir = vmem_pmd_alloc(); 191 pm_dir = vmem_pmd_alloc();
192 if (!pm_dir) 192 if (!pm_dir)
193 goto out; 193 goto out;
194 pgd_populate(&init_mm, pg_dir, pm_dir); 194 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
195 } 195 }
196 196
197 pm_dir = pmd_offset(pg_dir, address); 197 pm_dir = pmd_offset(pg_dir, address);
@@ -210,7 +210,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
210 if (!new_page) 210 if (!new_page)
211 goto out; 211 goto out;
212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213 set_pte(pt_dir, pte); 213 *pt_dir = pte;
214 } 214 }
215 } 215 }
216 ret = 0; 216 ret = 0;