aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/setup.h17
-rw-r--r--arch/s390/kernel/setup.c36
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/lib/uaccess_mvcos.c4
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/pgtable.c2
11 files changed, 38 insertions, 58 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 16c673096a22..c80235206c01 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -220,23 +220,8 @@ config AUDIT_ARCH
220 bool 220 bool
221 default y 221 default y
222 222
223config S390_SWITCH_AMODE
224 bool "Switch kernel/user addressing modes"
225 help
226 This option allows to switch the addressing modes of kernel and user
227 space. The kernel parameter switch_amode=on will enable this feature,
228 default is disabled. Enabling this (via kernel parameter) on machines
229 earlier than IBM System z9-109 EC/BC will reduce system performance.
230
231 Note that this option will also be selected by selecting the execute
232 protection option below. Enabling the execute protection via the
233 noexec kernel parameter will also switch the addressing modes,
234 independent of the switch_amode kernel parameter.
235
236
237config S390_EXEC_PROTECT 223config S390_EXEC_PROTECT
238 bool "Data execute protection" 224 bool "Data execute protection"
239 select S390_SWITCH_AMODE
240 help 225 help
241 This option allows to enable a buffer overflow protection for user 226 This option allows to enable a buffer overflow protection for user
242 space programs and it also selects the addressing mode option above. 227 space programs and it also selects the addressing mode option above.
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index ab4464486b7a..f4e53c6708dc 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -185,7 +185,6 @@ CONFIG_HOTPLUG_CPU=y
185CONFIG_COMPAT=y 185CONFIG_COMPAT=y
186CONFIG_SYSVIPC_COMPAT=y 186CONFIG_SYSVIPC_COMPAT=y
187CONFIG_AUDIT_ARCH=y 187CONFIG_AUDIT_ARCH=y
188CONFIG_S390_SWITCH_AMODE=y
189CONFIG_S390_EXEC_PROTECT=y 188CONFIG_S390_EXEC_PROTECT=y
190 189
191# 190#
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index fc7edd6f41b6..976e273988c2 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -36,7 +36,7 @@ static inline int init_new_context(struct task_struct *tsk,
36 mm->context.has_pgste = 1; 36 mm->context.has_pgste = 1;
37 mm->context.alloc_pgste = 1; 37 mm->context.alloc_pgste = 1;
38 } else { 38 } else {
39 mm->context.noexec = s390_noexec; 39 mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
40 mm->context.has_pgste = 0; 40 mm->context.has_pgste = 0;
41 mm->context.alloc_pgste = 0; 41 mm->context.alloc_pgste = 0;
42 } 42 }
@@ -58,7 +58,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
58 pgd_t *pgd = mm->pgd; 58 pgd_t *pgd = mm->pgd;
59 59
60 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 60 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
61 if (switch_amode) { 61 if (user_mode != HOME_SPACE_MODE) {
62 /* Load primary space page table origin. */ 62 /* Load primary space page table origin. */
63 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd; 63 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
64 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd); 64 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index ddad5903341c..68940d0bad91 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -143,7 +143,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
143 spin_lock_init(&mm->context.list_lock); 143 spin_lock_init(&mm->context.list_lock);
144 INIT_LIST_HEAD(&mm->context.crst_list); 144 INIT_LIST_HEAD(&mm->context.crst_list);
145 INIT_LIST_HEAD(&mm->context.pgtable_list); 145 INIT_LIST_HEAD(&mm->context.pgtable_list);
146 return (pgd_t *) crst_table_alloc(mm, s390_noexec); 146 return (pgd_t *)
147 crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
147} 148}
148#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 149#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
149 150
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index e37478e87286..52a779c337e8 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -49,17 +49,12 @@ extern unsigned long memory_end;
49 49
50void detect_memory_layout(struct mem_chunk chunk[]); 50void detect_memory_layout(struct mem_chunk chunk[]);
51 51
52#ifdef CONFIG_S390_SWITCH_AMODE 52#define PRIMARY_SPACE_MODE 0
53extern unsigned int switch_amode; 53#define ACCESS_REGISTER_MODE 1
54#else 54#define SECONDARY_SPACE_MODE 2
55#define switch_amode (0) 55#define HOME_SPACE_MODE 3
56#endif 56
57 57extern unsigned int user_mode;
58#ifdef CONFIG_S390_EXEC_PROTECT
59extern unsigned int s390_noexec;
60#else
61#define s390_noexec (0)
62#endif
63 58
64/* 59/*
65 * Machine features detected in head.S 60 * Machine features detected in head.S
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 061479ff029f..0663287fa1b3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,9 +305,8 @@ static int __init early_parse_mem(char *p)
305} 305}
306early_param("mem", early_parse_mem); 306early_param("mem", early_parse_mem);
307 307
308#ifdef CONFIG_S390_SWITCH_AMODE 308unsigned int user_mode = HOME_SPACE_MODE;
309unsigned int switch_amode = 0; 309EXPORT_SYMBOL_GPL(user_mode);
310EXPORT_SYMBOL_GPL(switch_amode);
311 310
312static int set_amode_and_uaccess(unsigned long user_amode, 311static int set_amode_and_uaccess(unsigned long user_amode,
313 unsigned long user32_amode) 312 unsigned long user32_amode)
@@ -340,23 +339,29 @@ static int set_amode_and_uaccess(unsigned long user_amode,
340 */ 339 */
341static int __init early_parse_switch_amode(char *p) 340static int __init early_parse_switch_amode(char *p)
342{ 341{
343 switch_amode = 1; 342 if (user_mode != SECONDARY_SPACE_MODE)
343 user_mode = PRIMARY_SPACE_MODE;
344 return 0; 344 return 0;
345} 345}
346early_param("switch_amode", early_parse_switch_amode); 346early_param("switch_amode", early_parse_switch_amode);
347 347
348#else /* CONFIG_S390_SWITCH_AMODE */ 348static int __init early_parse_user_mode(char *p)
349static inline int set_amode_and_uaccess(unsigned long user_amode,
350 unsigned long user32_amode)
351{ 349{
350 if (p && strcmp(p, "primary") == 0)
351 user_mode = PRIMARY_SPACE_MODE;
352#ifdef CONFIG_S390_EXEC_PROTECT
353 else if (p && strcmp(p, "secondary") == 0)
354 user_mode = SECONDARY_SPACE_MODE;
355#endif
356 else if (!p || strcmp(p, "home") == 0)
357 user_mode = HOME_SPACE_MODE;
358 else
359 return 1;
352 return 0; 360 return 0;
353} 361}
354#endif /* CONFIG_S390_SWITCH_AMODE */ 362early_param("user_mode", early_parse_user_mode);
355 363
356#ifdef CONFIG_S390_EXEC_PROTECT 364#ifdef CONFIG_S390_EXEC_PROTECT
357unsigned int s390_noexec = 0;
358EXPORT_SYMBOL_GPL(s390_noexec);
359
360/* 365/*
361 * Enable execute protection? 366 * Enable execute protection?
362 */ 367 */
@@ -364,8 +369,7 @@ static int __init early_parse_noexec(char *p)
364{ 369{
365 if (!strncmp(p, "off", 3)) 370 if (!strncmp(p, "off", 3))
366 return 0; 371 return 0;
367 switch_amode = 1; 372 user_mode = SECONDARY_SPACE_MODE;
368 s390_noexec = 1;
369 return 0; 373 return 0;
370} 374}
371early_param("noexec", early_parse_noexec); 375early_param("noexec", early_parse_noexec);
@@ -373,7 +377,7 @@ early_param("noexec", early_parse_noexec);
373 377
374static void setup_addressing_mode(void) 378static void setup_addressing_mode(void)
375{ 379{
376 if (s390_noexec) { 380 if (user_mode == SECONDARY_SPACE_MODE) {
377 if (set_amode_and_uaccess(PSW_ASC_SECONDARY, 381 if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
378 PSW32_ASC_SECONDARY)) 382 PSW32_ASC_SECONDARY))
379 pr_info("Execute protection active, " 383 pr_info("Execute protection active, "
@@ -381,7 +385,7 @@ static void setup_addressing_mode(void)
381 else 385 else
382 pr_info("Execute protection active, " 386 pr_info("Execute protection active, "
383 "mvcos not available\n"); 387 "mvcos not available\n");
384 } else if (switch_amode) { 388 } else if (user_mode == PRIMARY_SPACE_MODE) {
385 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) 389 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
386 pr_info("Address spaces switched, " 390 pr_info("Address spaces switched, "
387 "mvcos available\n"); 391 "mvcos available\n");
@@ -411,7 +415,7 @@ setup_lowcore(void)
411 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 415 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
412 lc->restart_psw.addr = 416 lc->restart_psw.addr =
413 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 417 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
414 if (switch_amode) 418 if (user_mode != HOME_SPACE_MODE)
415 lc->restart_psw.mask |= PSW_ASC_HOME; 419 lc->restart_psw.mask |= PSW_ASC_HOME;
416 lc->external_new_psw.mask = psw_kernel_bits; 420 lc->external_new_psw.mask = psw_kernel_bits;
417 lc->external_new_psw.addr = 421 lc->external_new_psw.addr =
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index adfb32aa6d59..5f99e66c51c3 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -86,7 +86,8 @@ static void vdso_init_data(struct vdso_data *vd)
86 unsigned int facility_list; 86 unsigned int facility_list;
87 87
88 facility_list = stfl(); 88 facility_list = stfl();
89 vd->ectg_available = switch_amode && (facility_list & 1); 89 vd->ectg_available =
90 user_mode != HOME_SPACE_MODE && (facility_list & 1);
90} 91}
91 92
92#ifdef CONFIG_64BIT 93#ifdef CONFIG_64BIT
@@ -114,7 +115,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
114 115
115 lowcore->vdso_per_cpu_data = __LC_PASTE; 116 lowcore->vdso_per_cpu_data = __LC_PASTE;
116 117
117 if (!switch_amode || !vdso_enabled) 118 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
118 return 0; 119 return 0;
119 120
120 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 121 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -160,7 +161,7 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
160 unsigned long segment_table, page_table, page_frame; 161 unsigned long segment_table, page_table, page_frame;
161 u32 *psal, *aste; 162 u32 *psal, *aste;
162 163
163 if (!switch_amode || !vdso_enabled) 164 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
164 return; 165 return;
165 166
166 psal = (u32 *)(addr_t) lowcore->paste[4]; 167 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -184,7 +185,7 @@ static void __vdso_init_cr5(void *dummy)
184 185
185static void vdso_init_cr5(void) 186static void vdso_init_cr5(void)
186{ 187{
187 if (switch_amode && vdso_enabled) 188 if (user_mode != HOME_SPACE_MODE && vdso_enabled)
188 on_each_cpu(__vdso_init_cr5, NULL, 1); 189 on_each_cpu(__vdso_init_cr5, NULL, 1);
189} 190}
190#endif /* CONFIG_64BIT */ 191#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index bf164fc21864..6ee55ae84ce2 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -20,7 +20,6 @@ config KVM
20 depends on HAVE_KVM && EXPERIMENTAL 20 depends on HAVE_KVM && EXPERIMENTAL
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select S390_SWITCH_AMODE
24 ---help--- 23 ---help---
25 Support hosting paravirtualized guest machines using the SIE 24 Support hosting paravirtualized guest machines using the SIE
26 virtualization capability on the mainframe. This should work 25 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 58da3f461214..60455f104ea3 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -162,7 +162,6 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165#ifdef CONFIG_S390_SWITCH_AMODE
166static size_t strnlen_user_mvcos(size_t count, const char __user *src) 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
167{ 166{
168 char buf[256]; 167 char buf[256];
@@ -200,7 +199,6 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
200 } while ((len_str == len) && (done < count)); 199 } while ((len_str == len) && (done < count));
201 return done; 200 return done;
202} 201}
203#endif /* CONFIG_S390_SWITCH_AMODE */
204 202
205struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
206 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -215,7 +213,6 @@ struct uaccess_ops uaccess_mvcos = {
215 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
216}; 214};
217 215
218#ifdef CONFIG_S390_SWITCH_AMODE
219struct uaccess_ops uaccess_mvcos_switch = { 216struct uaccess_ops uaccess_mvcos_switch = {
220 .copy_from_user = copy_from_user_mvcos, 217 .copy_from_user = copy_from_user_mvcos,
221 .copy_from_user_small = copy_from_user_mvcos, 218 .copy_from_user_small = copy_from_user_mvcos,
@@ -228,4 +225,3 @@ struct uaccess_ops uaccess_mvcos_switch = {
228 .futex_atomic_op = futex_atomic_op_pt, 225 .futex_atomic_op = futex_atomic_op_pt,
229 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, 226 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
230}; 227};
231#endif
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3df5b918cfe2..77108e34fc1a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -112,7 +112,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
112 if (trans_exc_code == 2) 112 if (trans_exc_code == 2)
113 /* Access via secondary space, set_fs setting decides */ 113 /* Access via secondary space, set_fs setting decides */
114 return current->thread.mm_segment.ar4; 114 return current->thread.mm_segment.ar4;
115 if (!switch_amode) 115 if (user_mode == HOME_SPACE_MODE)
116 /* User space if the access has been done via home space. */ 116 /* User space if the access has been done via home space. */
117 return trans_exc_code == 3; 117 return trans_exc_code == 3;
118 /* 118 /*
@@ -168,7 +168,7 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
168 * terminate things with extreme prejudice. 168 * terminate things with extreme prejudice.
169 */ 169 */
170 address = trans_exc_code & __FAIL_ADDR_MASK; 170 address = trans_exc_code & __FAIL_ADDR_MASK;
171 if (user_space_fault(trans_exc_code) == 0) 171 if (!user_space_fault(trans_exc_code))
172 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 172 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
173 " at virtual kernel address %p\n", (void *)address); 173 " at virtual kernel address %p\n", (void *)address);
174 else 174 else
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 2757c5616a07..ad621e06ada3 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -269,7 +269,7 @@ int s390_enable_sie(void)
269 struct mm_struct *mm, *old_mm; 269 struct mm_struct *mm, *old_mm;
270 270
271 /* Do we have switched amode? If no, we cannot do sie */ 271 /* Do we have switched amode? If no, we cannot do sie */
272 if (!switch_amode) 272 if (user_mode == HOME_SPACE_MODE)
273 return -EINVAL; 273 return -EINVAL;
274 274
275 /* Do we have pgstes? if yes, we are done */ 275 /* Do we have pgstes? if yes, we are done */