aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2007-02-05 15:18:17 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-02-05 15:18:17 -0500
commitc1821c2e9711adc3cd298a16b7237c92a2cee78d (patch)
tree9155b089db35a37d95863125ea4c5f918bd7801b /include
parent86aa9fc2456d8a662f299a70bdb70987209170f0 (diff)
[S390] noexec protection
This provides a noexec protection on s390 hardware. Our hardware does not have any bits left in the pte for a hw noexec bit, so this is a different approach using shadow page tables and a special addressing mode that allows separate address spaces for code and data. As a special feature of our "secondary-space" addressing mode, separate page tables can be specified for the translation of data addresses (storage operands) and instruction addresses. The shadow page table is used for the instruction addresses and the standard page table for the data addresses. The shadow page table is linked to the standard page table by a pointer in page->lru.next of the struct page corresponding to the page that contains the standard page table (since page->private is not really private with the pte_lock and the page table pages are not in the LRU list). Depending on the software bits of a pte, it is either inserted into both page tables or just into the standard (data) page table. Pages of a vma that does not have the VM_EXEC bit set get mapped only in the data address space. Any try to execute code on such a page will cause a page translation exception. The standard reaction to this is a SIGSEGV with two exceptions: the two system call opcodes 0x0a77 (sys_sigreturn) and 0x0aad (sys_rt_sigreturn) are allowed. They are stored by the kernel to the signal stack frame. Unfortunately, the signal return mechanism cannot be modified to use an SA_RESTORER because the exception unwinding code depends on the system call opcode stored behind the signal stack frame. This feature requires that user space is executed in secondary-space mode and the kernel in home-space mode, which means that the addressing modes need to be switched and that the noexec protection only works for user space. After switching the addressing modes, we cannot use the mvcp/mvcs instructions anymore to copy between kernel and user space. A new mvcos instruction has been added to the z9 EC/BC hardware which allows to copy between arbitrary address spaces, but on older hardware the page tables need to be walked manually. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include')
-rw-r--r--include/asm-s390/compat.h28
-rw-r--r--include/asm-s390/lowcore.h6
-rw-r--r--include/asm-s390/mmu_context.h50
-rw-r--r--include/asm-s390/pgalloc.h85
-rw-r--r--include/asm-s390/pgtable.h146
-rw-r--r--include/asm-s390/processor.h6
-rw-r--r--include/asm-s390/ptrace.h11
-rw-r--r--include/asm-s390/setup.h12
-rw-r--r--include/asm-s390/smp.h2
-rw-r--r--include/asm-s390/system.h4
-rw-r--r--include/asm-s390/tlbflush.h9
-rw-r--r--include/asm-s390/uaccess.h2
12 files changed, 307 insertions, 54 deletions
diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h
index 356a0b183539..296f4f1a20e1 100644
--- a/include/asm-s390/compat.h
+++ b/include/asm-s390/compat.h
@@ -6,6 +6,34 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8 8
9#define PSW32_MASK_PER 0x40000000UL
10#define PSW32_MASK_DAT 0x04000000UL
11#define PSW32_MASK_IO 0x02000000UL
12#define PSW32_MASK_EXT 0x01000000UL
13#define PSW32_MASK_KEY 0x00F00000UL
14#define PSW32_MASK_MCHECK 0x00040000UL
15#define PSW32_MASK_WAIT 0x00020000UL
16#define PSW32_MASK_PSTATE 0x00010000UL
17#define PSW32_MASK_ASC 0x0000C000UL
18#define PSW32_MASK_CC 0x00003000UL
19#define PSW32_MASK_PM 0x00000f00UL
20
21#define PSW32_ADDR_AMODE31 0x80000000UL
22#define PSW32_ADDR_INSN 0x7FFFFFFFUL
23
24#define PSW32_BASE_BITS 0x00080000UL
25
26#define PSW32_ASC_PRIMARY 0x00000000UL
27#define PSW32_ASC_ACCREG 0x00004000UL
28#define PSW32_ASC_SECONDARY 0x00008000UL
29#define PSW32_ASC_HOME 0x0000C000UL
30
31#define PSW32_MASK_MERGE(CURRENT,NEW) \
32 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
33 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
34
35extern long psw32_user_bits;
36
9#define COMPAT_USER_HZ 100 37#define COMPAT_USER_HZ 100
10 38
11typedef u32 compat_size_t; 39typedef u32 compat_size_t;
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 74f7389bd3ee..4a31d0a7ee83 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -220,7 +220,8 @@ struct _lowcore
220 __u32 kernel_asce; /* 0xc4c */ 220 __u32 kernel_asce; /* 0xc4c */
221 __u32 user_asce; /* 0xc50 */ 221 __u32 user_asce; /* 0xc50 */
222 __u32 panic_stack; /* 0xc54 */ 222 __u32 panic_stack; /* 0xc54 */
223 __u8 pad10[0xc60-0xc58]; /* 0xc58 */ 223 __u32 user_exec_asce; /* 0xc58 */
224 __u8 pad10[0xc60-0xc5c]; /* 0xc5c */
224 /* entry.S sensitive area start */ 225 /* entry.S sensitive area start */
225 struct cpuinfo_S390 cpu_data; /* 0xc60 */ 226 struct cpuinfo_S390 cpu_data; /* 0xc60 */
226 __u32 ipl_device; /* 0xc7c */ 227 __u32 ipl_device; /* 0xc7c */
@@ -310,7 +311,8 @@ struct _lowcore
310 __u64 kernel_asce; /* 0xd58 */ 311 __u64 kernel_asce; /* 0xd58 */
311 __u64 user_asce; /* 0xd60 */ 312 __u64 user_asce; /* 0xd60 */
312 __u64 panic_stack; /* 0xd68 */ 313 __u64 panic_stack; /* 0xd68 */
313 __u8 pad10[0xd80-0xd70]; /* 0xd70 */ 314 __u64 user_exec_asce; /* 0xd70 */
315 __u8 pad10[0xd80-0xd78]; /* 0xd78 */
314 /* entry.S sensitive area start */ 316 /* entry.S sensitive area start */
315 struct cpuinfo_S390 cpu_data; /* 0xd80 */ 317 struct cpuinfo_S390 cpu_data; /* 0xd80 */
316 __u32 ipl_device; /* 0xdb8 */ 318 __u32 ipl_device; /* 0xdb8 */
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index bcf24a873874..1d21da220d49 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -9,6 +9,7 @@
9#ifndef __S390_MMU_CONTEXT_H 9#ifndef __S390_MMU_CONTEXT_H
10#define __S390_MMU_CONTEXT_H 10#define __S390_MMU_CONTEXT_H
11 11
12#include <asm/pgalloc.h>
12/* 13/*
13 * get a new mmu context.. S390 don't know about contexts. 14 * get a new mmu context.. S390 don't know about contexts.
14 */ 15 */
@@ -16,29 +17,44 @@
16 17
17#define destroy_context(mm) do { } while (0) 18#define destroy_context(mm) do { } while (0)
18 19
20#ifndef __s390x__
21#define LCTL_OPCODE "lctl"
22#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
23#else
24#define LCTL_OPCODE "lctlg"
25#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
26#endif
27
19static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void enter_lazy_tlb(struct mm_struct *mm,
20 struct task_struct *tsk) 29 struct task_struct *tsk)
21{ 30{
22} 31}
23 32
24static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
25 struct task_struct *tsk) 34 struct task_struct *tsk)
26{ 35{
27 if (prev != next) { 36 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd);
28#ifndef __s390x__ 37
29 S390_lowcore.user_asce = (__pa(next->pgd)&PAGE_MASK) | 38 if (prev != next) {
30 (_SEGMENT_TABLE|USER_STD_MASK); 39 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
31 /* Load home space page table origin. */ 40 PGTABLE_BITS;
32 asm volatile("lctl 13,13,%0" 41 if (shadow_pgd) {
33 : : "m" (S390_lowcore.user_asce) ); 42 /* Load primary/secondary space page table origin. */
34#else /* __s390x__ */ 43 S390_lowcore.user_exec_asce =
35 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | 44 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
36 (_REGION_TABLE|USER_STD_MASK); 45 asm volatile(LCTL_OPCODE" 1,1,%0\n"
37 /* Load home space page table origin. */ 46 LCTL_OPCODE" 7,7,%1"
38 asm volatile("lctlg 13,13,%0" 47 : : "m" (S390_lowcore.user_exec_asce),
39 : : "m" (S390_lowcore.user_asce) ); 48 "m" (S390_lowcore.user_asce) );
40#endif /* __s390x__ */ 49 } else if (switch_amode) {
41 } 50 /* Load primary space page table origin. */
51 asm volatile(LCTL_OPCODE" 1,1,%0"
52 : : "m" (S390_lowcore.user_asce) );
53 } else
54 /* Load home space page table origin. */
55 asm volatile(LCTL_OPCODE" 13,13,%0"
56 : : "m" (S390_lowcore.user_asce) );
57 }
42 cpu_set(smp_processor_id(), next->cpu_vm_mask); 58 cpu_set(smp_processor_id(), next->cpu_vm_mask);
43} 59}
44 60
@@ -51,4 +67,4 @@ static inline void activate_mm(struct mm_struct *prev,
51 set_fs(current->thread.mm_segment); 67 set_fs(current->thread.mm_segment);
52} 68}
53 69
54#endif 70#endif /* __S390_MMU_CONTEXT_H */
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 0707a7e2fc16..56c8a6c80e2e 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -47,6 +47,17 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
47 47
48 if (!pgd) 48 if (!pgd)
49 return NULL; 49 return NULL;
50 if (s390_noexec) {
51 pgd_t *shadow_pgd = (pgd_t *)
52 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
53 struct page *page = virt_to_page(pgd);
54
55 if (!shadow_pgd) {
56 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
57 return NULL;
58 }
59 page->lru.next = (void *) shadow_pgd;
60 }
50 for (i = 0; i < PTRS_PER_PGD; i++) 61 for (i = 0; i < PTRS_PER_PGD; i++)
51#ifndef __s390x__ 62#ifndef __s390x__
52 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); 63 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
@@ -58,6 +69,10 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
58 69
59static inline void pgd_free(pgd_t *pgd) 70static inline void pgd_free(pgd_t *pgd)
60{ 71{
72 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
73
74 if (shadow_pgd)
75 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
61 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); 76 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
62} 77}
63 78
@@ -71,6 +86,7 @@ static inline void pgd_free(pgd_t *pgd)
71#define pmd_free(x) do { } while (0) 86#define pmd_free(x) do { } while (0)
72#define __pmd_free_tlb(tlb,x) do { } while (0) 87#define __pmd_free_tlb(tlb,x) do { } while (0)
73#define pgd_populate(mm, pmd, pte) BUG() 88#define pgd_populate(mm, pmd, pte) BUG()
89#define pgd_populate_kernel(mm, pmd, pte) BUG()
74#else /* __s390x__ */ 90#else /* __s390x__ */
75static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 91static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
76{ 92{
@@ -79,6 +95,17 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
79 95
80 if (!pmd) 96 if (!pmd)
81 return NULL; 97 return NULL;
98 if (s390_noexec) {
99 pmd_t *shadow_pmd = (pmd_t *)
100 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
101 struct page *page = virt_to_page(pmd);
102
103 if (!shadow_pmd) {
104 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
105 return NULL;
106 }
107 page->lru.next = (void *) shadow_pmd;
108 }
82 for (i=0; i < PTRS_PER_PMD; i++) 109 for (i=0; i < PTRS_PER_PMD; i++)
83 pmd_clear(pmd + i); 110 pmd_clear(pmd + i);
84 return pmd; 111 return pmd;
@@ -86,6 +113,10 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
86 113
87static inline void pmd_free (pmd_t *pmd) 114static inline void pmd_free (pmd_t *pmd)
88{ 115{
116 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
117
118 if (shadow_pmd)
119 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
89 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); 120 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
90} 121}
91 122
@@ -95,11 +126,22 @@ static inline void pmd_free (pmd_t *pmd)
95 pmd_free(pmd); \ 126 pmd_free(pmd); \
96 } while (0) 127 } while (0)
97 128
98static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 129static inline void
130pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
99{ 131{
100 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); 132 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
101} 133}
102 134
135static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
136{
137 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
138 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
139
140 if (shadow_pgd && shadow_pmd)
141 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
142 pgd_populate_kernel(mm, pgd, pmd);
143}
144
103#endif /* __s390x__ */ 145#endif /* __s390x__ */
104 146
105static inline void 147static inline void
@@ -119,7 +161,13 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
119static inline void 161static inline void
120pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 162pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
121{ 163{
122 pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page)); 164 pte_t *pte = (pte_t *)page_to_phys(page);
165 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
166 pte_t *shadow_pte = get_shadow_pte(pte);
167
168 pmd_populate_kernel(mm, pmd, pte);
169 if (shadow_pmd && shadow_pte)
170 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
123} 171}
124 172
125/* 173/*
@@ -133,6 +181,17 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
133 181
134 if (!pte) 182 if (!pte)
135 return NULL; 183 return NULL;
184 if (s390_noexec) {
185 pte_t *shadow_pte = (pte_t *)
186 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
187 struct page *page = virt_to_page(pte);
188
189 if (!shadow_pte) {
190 free_page((unsigned long) pte);
191 return NULL;
192 }
193 page->lru.next = (void *) shadow_pte;
194 }
136 for (i=0; i < PTRS_PER_PTE; i++) { 195 for (i=0; i < PTRS_PER_PTE; i++) {
137 pte_clear(mm, vmaddr, pte + i); 196 pte_clear(mm, vmaddr, pte + i);
138 vmaddr += PAGE_SIZE; 197 vmaddr += PAGE_SIZE;
@@ -151,14 +210,30 @@ pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
151 210
152static inline void pte_free_kernel(pte_t *pte) 211static inline void pte_free_kernel(pte_t *pte)
153{ 212{
154 free_page((unsigned long) pte); 213 pte_t *shadow_pte = get_shadow_pte(pte);
214
215 if (shadow_pte)
216 free_page((unsigned long) shadow_pte);
217 free_page((unsigned long) pte);
155} 218}
156 219
157static inline void pte_free(struct page *pte) 220static inline void pte_free(struct page *pte)
158{ 221{
159 __free_page(pte); 222 struct page *shadow_page = get_shadow_page(pte);
223
224 if (shadow_page)
225 __free_page(shadow_page);
226 __free_page(pte);
160} 227}
161 228
162#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte) 229#define __pte_free_tlb(tlb, pte) \
230({ \
231 struct mmu_gather *__tlb = (tlb); \
232 struct page *__pte = (pte); \
233 struct page *shadow_page = get_shadow_page(__pte); \
234 if (shadow_page) \
235 tlb_remove_page(__tlb, shadow_page); \
236 tlb_remove_page(__tlb, __pte); \
237})
163 238
164#endif /* _S390_PGALLOC_H */ 239#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 304ee7736413..13c16546eff5 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -224,6 +224,8 @@ extern unsigned long vmalloc_end;
224#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 224#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
225#define _PAGE_TYPE_RO 0x200 225#define _PAGE_TYPE_RO 0x200
226#define _PAGE_TYPE_RW 0x000 226#define _PAGE_TYPE_RW 0x000
227#define _PAGE_TYPE_EX_RO 0x202
228#define _PAGE_TYPE_EX_RW 0x002
227 229
228/* 230/*
229 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 231 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
@@ -244,11 +246,13 @@ extern unsigned long vmalloc_end;
244 * _PAGE_TYPE_FILE 11?1 -> 11?1 246 * _PAGE_TYPE_FILE 11?1 -> 11?1
245 * _PAGE_TYPE_RO 0100 -> 1100 247 * _PAGE_TYPE_RO 0100 -> 1100
246 * _PAGE_TYPE_RW 0000 -> 1000 248 * _PAGE_TYPE_RW 0000 -> 1000
249 * _PAGE_TYPE_EX_RO 0110 -> 1110
250 * _PAGE_TYPE_EX_RW 0010 -> 1010
247 * 251 *
248 * pte_none is true for bits combinations 1000, 1100 252 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
249 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 253 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
250 * pte_file is true for bits combinations 1101, 1111 254 * pte_file is true for bits combinations 1101, 1111
251 * swap pte is 1011 and 0001, 0011, 0101, 0111, 1010 and 1110 are invalid. 255 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
252 */ 256 */
253 257
254#ifndef __s390x__ 258#ifndef __s390x__
@@ -313,33 +317,100 @@ extern unsigned long vmalloc_end;
313#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 317#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
314#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 318#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
315#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 319#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
320#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
321#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
316 322
317#define PAGE_KERNEL PAGE_RW 323#define PAGE_KERNEL PAGE_RW
318#define PAGE_COPY PAGE_RO 324#define PAGE_COPY PAGE_RO
319 325
320/* 326/*
321 * The S390 can't do page protection for execute, and considers that the 327 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
322 * same are read. Also, write permissions imply read permissions. This is 328 * Write permission always implies read permission. In theory with a
323 * the closest we can get.. 329 * primary/secondary page table execute only can be implemented but
330 * it would cost an additional bit in the pte to distinguish all the
331 * different pte types. To avoid that execute permission currently
332 * implies read permission as well.
324 */ 333 */
325 /*xwr*/ 334 /*xwr*/
326#define __P000 PAGE_NONE 335#define __P000 PAGE_NONE
327#define __P001 PAGE_RO 336#define __P001 PAGE_RO
328#define __P010 PAGE_RO 337#define __P010 PAGE_RO
329#define __P011 PAGE_RO 338#define __P011 PAGE_RO
330#define __P100 PAGE_RO 339#define __P100 PAGE_EX_RO
331#define __P101 PAGE_RO 340#define __P101 PAGE_EX_RO
332#define __P110 PAGE_RO 341#define __P110 PAGE_EX_RO
333#define __P111 PAGE_RO 342#define __P111 PAGE_EX_RO
334 343
335#define __S000 PAGE_NONE 344#define __S000 PAGE_NONE
336#define __S001 PAGE_RO 345#define __S001 PAGE_RO
337#define __S010 PAGE_RW 346#define __S010 PAGE_RW
338#define __S011 PAGE_RW 347#define __S011 PAGE_RW
339#define __S100 PAGE_RO 348#define __S100 PAGE_EX_RO
340#define __S101 PAGE_RO 349#define __S101 PAGE_EX_RO
341#define __S110 PAGE_RW 350#define __S110 PAGE_EX_RW
342#define __S111 PAGE_RW 351#define __S111 PAGE_EX_RW
352
353#ifndef __s390x__
354# define PMD_SHADOW_SHIFT 1
355# define PGD_SHADOW_SHIFT 1
356#else /* __s390x__ */
357# define PMD_SHADOW_SHIFT 2
358# define PGD_SHADOW_SHIFT 2
359#endif /* __s390x__ */
360
361static inline struct page *get_shadow_page(struct page *page)
362{
363 if (s390_noexec && !list_empty(&page->lru))
364 return virt_to_page(page->lru.next);
365 return NULL;
366}
367
368static inline pte_t *get_shadow_pte(pte_t *ptep)
369{
370 unsigned long pteptr = (unsigned long) (ptep);
371
372 if (s390_noexec) {
373 unsigned long offset = pteptr & (PAGE_SIZE - 1);
374 void *addr = (void *) (pteptr ^ offset);
375 struct page *page = virt_to_page(addr);
376 if (!list_empty(&page->lru))
377 return (pte_t *) ((unsigned long) page->lru.next |
378 offset);
379 }
380 return NULL;
381}
382
383static inline pmd_t *get_shadow_pmd(pmd_t *pmdp)
384{
385 unsigned long pmdptr = (unsigned long) (pmdp);
386
387 if (s390_noexec) {
388 unsigned long offset = pmdptr &
389 ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1);
390 void *addr = (void *) (pmdptr ^ offset);
391 struct page *page = virt_to_page(addr);
392 if (!list_empty(&page->lru))
393 return (pmd_t *) ((unsigned long) page->lru.next |
394 offset);
395 }
396 return NULL;
397}
398
399static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
400{
401 unsigned long pgdptr = (unsigned long) (pgdp);
402
403 if (s390_noexec) {
404 unsigned long offset = pgdptr &
405 ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1);
406 void *addr = (void *) (pgdptr ^ offset);
407 struct page *page = virt_to_page(addr);
408 if (!list_empty(&page->lru))
409 return (pgd_t *) ((unsigned long) page->lru.next |
410 offset);
411 }
412 return NULL;
413}
343 414
344/* 415/*
345 * Certain architectures need to do special things when PTEs 416 * Certain architectures need to do special things when PTEs
@@ -348,7 +419,16 @@ extern unsigned long vmalloc_end;
348 */ 419 */
349static inline void set_pte(pte_t *pteptr, pte_t pteval) 420static inline void set_pte(pte_t *pteptr, pte_t pteval)
350{ 421{
422 pte_t *shadow_pte = get_shadow_pte(pteptr);
423
351 *pteptr = pteval; 424 *pteptr = pteval;
425 if (shadow_pte) {
426 if (!(pte_val(pteval) & _PAGE_INVALID) &&
427 (pte_val(pteval) & _PAGE_SWX))
428 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
429 else
430 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
431 }
352} 432}
353#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 433#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
354 434
@@ -466,7 +546,7 @@ static inline int pte_read(pte_t pte)
466 546
467static inline void pgd_clear(pgd_t * pgdp) { } 547static inline void pgd_clear(pgd_t * pgdp) { }
468 548
469static inline void pmd_clear(pmd_t * pmdp) 549static inline void pmd_clear_kernel(pmd_t * pmdp)
470{ 550{
471 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 551 pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
472 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; 552 pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
@@ -474,24 +554,55 @@ static inline void pmd_clear(pmd_t * pmdp)
474 pmd_val(pmdp[3]) = _PAGE_TABLE_INV; 554 pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
475} 555}
476 556
557static inline void pmd_clear(pmd_t * pmdp)
558{
559 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
560
561 pmd_clear_kernel(pmdp);
562 if (shadow_pmd)
563 pmd_clear_kernel(shadow_pmd);
564}
565
477#else /* __s390x__ */ 566#else /* __s390x__ */
478 567
479static inline void pgd_clear(pgd_t * pgdp) 568static inline void pgd_clear_kernel(pgd_t * pgdp)
480{ 569{
481 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 570 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;
482} 571}
483 572
484static inline void pmd_clear(pmd_t * pmdp) 573static inline void pgd_clear(pgd_t * pgdp)
574{
575 pgd_t *shadow_pgd = get_shadow_pgd(pgdp);
576
577 pgd_clear_kernel(pgdp);
578 if (shadow_pgd)
579 pgd_clear_kernel(shadow_pgd);
580}
581
582static inline void pmd_clear_kernel(pmd_t * pmdp)
485{ 583{
486 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 584 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
487 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 585 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
488} 586}
489 587
588static inline void pmd_clear(pmd_t * pmdp)
589{
590 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
591
592 pmd_clear_kernel(pmdp);
593 if (shadow_pmd)
594 pmd_clear_kernel(shadow_pmd);
595}
596
490#endif /* __s390x__ */ 597#endif /* __s390x__ */
491 598
492static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 599static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
493{ 600{
601 pte_t *shadow_pte = get_shadow_pte(ptep);
602
494 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 603 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
604 if (shadow_pte)
605 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
495} 606}
496 607
497/* 608/*
@@ -609,8 +720,11 @@ ptep_clear_flush(struct vm_area_struct *vma,
609 unsigned long address, pte_t *ptep) 720 unsigned long address, pte_t *ptep)
610{ 721{
611 pte_t pte = *ptep; 722 pte_t pte = *ptep;
723 pte_t *shadow_pte = get_shadow_pte(ptep);
612 724
613 __ptep_ipte(address, ptep); 725 __ptep_ipte(address, ptep);
726 if (shadow_pte)
727 __ptep_ipte(address, shadow_pte);
614 return pte; 728 return pte;
615} 729}
616 730
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 7a7f50efcbd9..5af853576cbd 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -145,7 +145,7 @@ struct stack_frame {
145 145
146#define start_thread(regs, new_psw, new_stackp) do { \ 146#define start_thread(regs, new_psw, new_stackp) do { \
147 set_fs(USER_DS); \ 147 set_fs(USER_DS); \
148 regs->psw.mask = PSW_USER_BITS; \ 148 regs->psw.mask = psw_user_bits; \
149 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 149 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
150 regs->gprs[15] = new_stackp ; \ 150 regs->gprs[15] = new_stackp ; \
151} while (0) 151} while (0)
@@ -154,14 +154,14 @@ struct stack_frame {
154 154
155#define start_thread(regs, new_psw, new_stackp) do { \ 155#define start_thread(regs, new_psw, new_stackp) do { \
156 set_fs(USER_DS); \ 156 set_fs(USER_DS); \
157 regs->psw.mask = PSW_USER_BITS; \ 157 regs->psw.mask = psw_user_bits; \
158 regs->psw.addr = new_psw; \ 158 regs->psw.addr = new_psw; \
159 regs->gprs[15] = new_stackp; \ 159 regs->gprs[15] = new_stackp; \
160} while (0) 160} while (0)
161 161
162#define start_thread31(regs, new_psw, new_stackp) do { \ 162#define start_thread31(regs, new_psw, new_stackp) do { \
163 set_fs(USER_DS); \ 163 set_fs(USER_DS); \
164 regs->psw.mask = PSW_USER32_BITS; \ 164 regs->psw.mask = psw_user32_bits; \
165 regs->psw.addr = new_psw; \ 165 regs->psw.addr = new_psw; \
166 regs->gprs[15] = new_stackp; \ 166 regs->gprs[15] = new_stackp; \
167} while (0) 167} while (0)
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 7b768c5c68a8..fa6ca87080e8 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -266,17 +266,12 @@ typedef struct
266#define PSW_ASC_SECONDARY 0x0000800000000000UL 266#define PSW_ASC_SECONDARY 0x0000800000000000UL
267#define PSW_ASC_HOME 0x0000C00000000000UL 267#define PSW_ASC_HOME 0x0000C00000000000UL
268 268
269#define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ 269extern long psw_user32_bits;
270 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
271 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
272 270
273#endif /* __s390x__ */ 271#endif /* __s390x__ */
274 272
275#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ 273extern long psw_kernel_bits;
276 PSW_MASK_MCHECK | PSW_DEFAULT_KEY) 274extern long psw_user_bits;
277#define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
278 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
279 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
280 275
281/* This macro merges a NEW PSW mask specified by the user into 276/* This macro merges a NEW PSW mask specified by the user into
282 the currently active PSW mask CURRENT, modifying only those 277 the currently active PSW mask CURRENT, modifying only those
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 542769736fc5..6b68ddda39a7 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -42,6 +42,18 @@ struct mem_chunk {
42 42
43extern struct mem_chunk memory_chunk[]; 43extern struct mem_chunk memory_chunk[];
44 44
45#ifdef CONFIG_S390_SWITCH_AMODE
46extern unsigned int switch_amode;
47#else
48#define switch_amode (0)
49#endif
50
51#ifdef CONFIG_S390_EXEC_PROTECT
52extern unsigned int s390_noexec;
53#else
54#define s390_noexec (0)
55#endif
56
45/* 57/*
46 * Machine features detected in head.S 58 * Machine features detected in head.S
47 */ 59 */
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 2d9e15367c07..b957e4cda464 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -110,7 +110,7 @@ smp_call_function_on(void (*func) (void *info), void *info,
110static inline void smp_send_stop(void) 110static inline void smp_send_stop(void)
111{ 111{
112 /* Disable all interrupts/machine checks */ 112 /* Disable all interrupts/machine checks */
113 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 113 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
114} 114}
115 115
116#define smp_cpu_not_running(cpu) 1 116#define smp_cpu_not_running(cpu) 1
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index bd0b05ae87d2..bbe137c3ed69 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -373,8 +373,8 @@ __set_psw_mask(unsigned long mask)
373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
374} 374}
375 375
376#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) 376#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
377#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) 377#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
378 378
379#ifdef CONFIG_SMP 379#ifdef CONFIG_SMP
380 380
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index fa4dc916a9bf..66793f55c8b2 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/pgalloc.h>
6 7
7/* 8/*
8 * TLB flushing: 9 * TLB flushing:
@@ -102,6 +103,14 @@ static inline void __flush_tlb_mm(struct mm_struct * mm)
102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 103 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return; 104 return;
104 if (MACHINE_HAS_IDTE) { 105 if (MACHINE_HAS_IDTE) {
106 pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd);
107
108 if (shadow_pgd) {
109 asm volatile(
110 " .insn rrf,0xb98e0000,0,%0,%1,0"
111 : : "a" (2048),
112 "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" );
113 }
105 asm volatile( 114 asm volatile(
106 " .insn rrf,0xb98e0000,0,%0,%1,0" 115 " .insn rrf,0xb98e0000,0,%0,%1,0"
107 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); 116 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 73ac4e82217b..0235970278f0 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -90,6 +90,8 @@ struct uaccess_ops {
90extern struct uaccess_ops uaccess; 90extern struct uaccess_ops uaccess;
91extern struct uaccess_ops uaccess_std; 91extern struct uaccess_ops uaccess_std;
92extern struct uaccess_ops uaccess_mvcos; 92extern struct uaccess_ops uaccess_mvcos;
93extern struct uaccess_ops uaccess_mvcos_switch;
94extern struct uaccess_ops uaccess_pt;
93 95
94static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 96static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
95{ 97{