diff options
-rw-r--r-- | arch/s390/Kconfig | 11 | ||||
-rw-r--r-- | arch/s390/include/asm/elf.h | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/hugetlb.h | 17 | ||||
-rw-r--r-- | arch/s390/include/asm/lowcore.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 6 | ||||
-rw-r--r-- | arch/s390/include/asm/pgalloc.h | 28 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 75 | ||||
-rw-r--r-- | arch/s390/include/asm/tlbflush.h | 11 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 31 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 39 | ||||
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 10 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 68 |
14 files changed, 35 insertions, 283 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 4a7f14079e03..ff2d2371b2e9 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -230,17 +230,6 @@ config SYSVIPC_COMPAT | |||
230 | config AUDIT_ARCH | 230 | config AUDIT_ARCH |
231 | def_bool y | 231 | def_bool y |
232 | 232 | ||
233 | config S390_EXEC_PROTECT | ||
234 | def_bool y | ||
235 | prompt "Data execute protection" | ||
236 | help | ||
237 | This option allows to enable a buffer overflow protection for user | ||
238 | space programs and it also selects the addressing mode option above. | ||
239 | The kernel parameter noexec=on will enable this feature and also | ||
240 | switch the addressing modes, default is disabled. Enabling this (via | ||
241 | kernel parameter) on machines earlier than IBM System z9 this will | ||
242 | reduce system performance. | ||
243 | |||
244 | comment "Code generation options" | 233 | comment "Code generation options" |
245 | 234 | ||
246 | choice | 235 | choice |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 10c029cfcc7d..64b61bf72e93 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -196,18 +196,6 @@ do { \ | |||
196 | } while (0) | 196 | } while (0) |
197 | #endif /* __s390x__ */ | 197 | #endif /* __s390x__ */ |
198 | 198 | ||
199 | /* | ||
200 | * An executable for which elf_read_implies_exec() returns TRUE will | ||
201 | * have the READ_IMPLIES_EXEC personality flag set automatically. | ||
202 | */ | ||
203 | #define elf_read_implies_exec(ex, executable_stack) \ | ||
204 | ({ \ | ||
205 | if (current->mm->context.noexec && \ | ||
206 | executable_stack != EXSTACK_DISABLE_X) \ | ||
207 | disable_noexec(current->mm, current); \ | ||
208 | current->mm->context.noexec == 0; \ | ||
209 | }) | ||
210 | |||
211 | #define STACK_RND_MASK 0x7ffUL | 199 | #define STACK_RND_MASK 0x7ffUL |
212 | 200 | ||
213 | #define ARCH_DLINFO \ | 201 | #define ARCH_DLINFO \ |
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index b56403c2df28..799ed0f1643d 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h | |||
@@ -111,21 +111,10 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, | |||
111 | { | 111 | { |
112 | pmd_t *pmdp = (pmd_t *) ptep; | 112 | pmd_t *pmdp = (pmd_t *) ptep; |
113 | 113 | ||
114 | if (!MACHINE_HAS_IDTE) { | 114 | if (MACHINE_HAS_IDTE) |
115 | __pmd_csp(pmdp); | ||
116 | if (mm->context.noexec) { | ||
117 | pmdp = get_shadow_table(pmdp); | ||
118 | __pmd_csp(pmdp); | ||
119 | } | ||
120 | return; | ||
121 | } | ||
122 | |||
123 | __pmd_idte(address, pmdp); | ||
124 | if (mm->context.noexec) { | ||
125 | pmdp = get_shadow_table(pmdp); | ||
126 | __pmd_idte(address, pmdp); | 115 | __pmd_idte(address, pmdp); |
127 | } | 116 | else |
128 | return; | 117 | __pmd_csp(pmdp); |
129 | } | 118 | } |
130 | 119 | ||
131 | #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ | 120 | #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 65e172f8209d..b8624d53c379 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -124,7 +124,7 @@ struct _lowcore { | |||
124 | /* Address space pointer. */ | 124 | /* Address space pointer. */ |
125 | __u32 kernel_asce; /* 0x02ac */ | 125 | __u32 kernel_asce; /* 0x02ac */ |
126 | __u32 user_asce; /* 0x02b0 */ | 126 | __u32 user_asce; /* 0x02b0 */ |
127 | __u32 user_exec_asce; /* 0x02b4 */ | 127 | __u8 pad_0x02b4[0x02b8-0x02b4]; /* 0x02b4 */ |
128 | 128 | ||
129 | /* SMP info area */ | 129 | /* SMP info area */ |
130 | __u32 cpu_nr; /* 0x02b8 */ | 130 | __u32 cpu_nr; /* 0x02b8 */ |
@@ -255,7 +255,7 @@ struct _lowcore { | |||
255 | /* Address space pointer. */ | 255 | /* Address space pointer. */ |
256 | __u64 kernel_asce; /* 0x0310 */ | 256 | __u64 kernel_asce; /* 0x0310 */ |
257 | __u64 user_asce; /* 0x0318 */ | 257 | __u64 user_asce; /* 0x0318 */ |
258 | __u64 user_exec_asce; /* 0x0320 */ | 258 | __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */ |
259 | 259 | ||
260 | /* SMP info area */ | 260 | /* SMP info area */ |
261 | __u32 cpu_nr; /* 0x0328 */ | 261 | __u32 cpu_nr; /* 0x0328 */ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 78522cdefdd4..818e8298a6bd 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -5,19 +5,16 @@ typedef struct { | |||
5 | atomic_t attach_count; | 5 | atomic_t attach_count; |
6 | unsigned int flush_mm; | 6 | unsigned int flush_mm; |
7 | spinlock_t list_lock; | 7 | spinlock_t list_lock; |
8 | struct list_head crst_list; | ||
9 | struct list_head pgtable_list; | 8 | struct list_head pgtable_list; |
10 | unsigned long asce_bits; | 9 | unsigned long asce_bits; |
11 | unsigned long asce_limit; | 10 | unsigned long asce_limit; |
12 | unsigned long vdso_base; | 11 | unsigned long vdso_base; |
13 | int noexec; | ||
14 | int has_pgste; /* The mmu context has extended page tables */ | 12 | int has_pgste; /* The mmu context has extended page tables */ |
15 | int alloc_pgste; /* cloned contexts will have extended page tables */ | 13 | int alloc_pgste; /* cloned contexts will have extended page tables */ |
16 | } mm_context_t; | 14 | } mm_context_t; |
17 | 15 | ||
18 | #define INIT_MM_CONTEXT(name) \ | 16 | #define INIT_MM_CONTEXT(name) \ |
19 | .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ | 17 | .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ |
20 | .context.crst_list = LIST_HEAD_INIT(name.context.crst_list), \ | ||
21 | .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), | 18 | .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), |
22 | 19 | ||
23 | #endif | 20 | #endif |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 8c277caa8d3a..5682f160ff82 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -35,11 +35,9 @@ static inline int init_new_context(struct task_struct *tsk, | |||
35 | * and if has_pgste is set, it will create extended page | 35 | * and if has_pgste is set, it will create extended page |
36 | * tables. | 36 | * tables. |
37 | */ | 37 | */ |
38 | mm->context.noexec = 0; | ||
39 | mm->context.has_pgste = 1; | 38 | mm->context.has_pgste = 1; |
40 | mm->context.alloc_pgste = 1; | 39 | mm->context.alloc_pgste = 1; |
41 | } else { | 40 | } else { |
42 | mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE); | ||
43 | mm->context.has_pgste = 0; | 41 | mm->context.has_pgste = 0; |
44 | mm->context.alloc_pgste = 0; | 42 | mm->context.alloc_pgste = 0; |
45 | } | 43 | } |
@@ -63,10 +61,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) | |||
63 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); | 61 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); |
64 | if (user_mode != HOME_SPACE_MODE) { | 62 | if (user_mode != HOME_SPACE_MODE) { |
65 | /* Load primary space page table origin. */ | 63 | /* Load primary space page table origin. */ |
66 | pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd; | ||
67 | S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd); | ||
68 | asm volatile(LCTL_OPCODE" 1,1,%0\n" | 64 | asm volatile(LCTL_OPCODE" 1,1,%0\n" |
69 | : : "m" (S390_lowcore.user_exec_asce) ); | 65 | : : "m" (S390_lowcore.user_asce) ); |
70 | } else | 66 | } else |
71 | /* Load home space page table origin. */ | 67 | /* Load home space page table origin. */ |
72 | asm volatile(LCTL_OPCODE" 13,13,%0" | 68 | asm volatile(LCTL_OPCODE" 13,13,%0" |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 082eb4e50e8b..739ff9ec1395 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -19,14 +19,13 @@ | |||
19 | 19 | ||
20 | #define check_pgt_cache() do {} while (0) | 20 | #define check_pgt_cache() do {} while (0) |
21 | 21 | ||
22 | unsigned long *crst_table_alloc(struct mm_struct *, int); | 22 | unsigned long *crst_table_alloc(struct mm_struct *); |
23 | void crst_table_free(struct mm_struct *, unsigned long *); | 23 | void crst_table_free(struct mm_struct *, unsigned long *); |
24 | void crst_table_free_rcu(struct mm_struct *, unsigned long *); | 24 | void crst_table_free_rcu(struct mm_struct *, unsigned long *); |
25 | 25 | ||
26 | unsigned long *page_table_alloc(struct mm_struct *); | 26 | unsigned long *page_table_alloc(struct mm_struct *); |
27 | void page_table_free(struct mm_struct *, unsigned long *); | 27 | void page_table_free(struct mm_struct *, unsigned long *); |
28 | void page_table_free_rcu(struct mm_struct *, unsigned long *); | 28 | void page_table_free_rcu(struct mm_struct *, unsigned long *); |
29 | void disable_noexec(struct mm_struct *, struct task_struct *); | ||
30 | 29 | ||
31 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | 30 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) |
32 | { | 31 | { |
@@ -50,9 +49,6 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | |||
50 | static inline void crst_table_init(unsigned long *crst, unsigned long entry) | 49 | static inline void crst_table_init(unsigned long *crst, unsigned long entry) |
51 | { | 50 | { |
52 | clear_table(crst, entry, sizeof(unsigned long)*2048); | 51 | clear_table(crst, entry, sizeof(unsigned long)*2048); |
53 | crst = get_shadow_table(crst); | ||
54 | if (crst) | ||
55 | clear_table(crst, entry, sizeof(unsigned long)*2048); | ||
56 | } | 52 | } |
57 | 53 | ||
58 | #ifndef __s390x__ | 54 | #ifndef __s390x__ |
@@ -90,7 +86,7 @@ void crst_table_downgrade(struct mm_struct *, unsigned long limit); | |||
90 | 86 | ||
91 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) | 87 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) |
92 | { | 88 | { |
93 | unsigned long *table = crst_table_alloc(mm, mm->context.noexec); | 89 | unsigned long *table = crst_table_alloc(mm); |
94 | if (table) | 90 | if (table) |
95 | crst_table_init(table, _REGION3_ENTRY_EMPTY); | 91 | crst_table_init(table, _REGION3_ENTRY_EMPTY); |
96 | return (pud_t *) table; | 92 | return (pud_t *) table; |
@@ -99,7 +95,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) | |||
99 | 95 | ||
100 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | 96 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) |
101 | { | 97 | { |
102 | unsigned long *table = crst_table_alloc(mm, mm->context.noexec); | 98 | unsigned long *table = crst_table_alloc(mm); |
103 | if (table) | 99 | if (table) |
104 | crst_table_init(table, _SEGMENT_ENTRY_EMPTY); | 100 | crst_table_init(table, _SEGMENT_ENTRY_EMPTY); |
105 | return (pmd_t *) table; | 101 | return (pmd_t *) table; |
@@ -115,11 +111,6 @@ static inline void pgd_populate_kernel(struct mm_struct *mm, | |||
115 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) | 111 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) |
116 | { | 112 | { |
117 | pgd_populate_kernel(mm, pgd, pud); | 113 | pgd_populate_kernel(mm, pgd, pud); |
118 | if (mm->context.noexec) { | ||
119 | pgd = get_shadow_table(pgd); | ||
120 | pud = get_shadow_table(pud); | ||
121 | pgd_populate_kernel(mm, pgd, pud); | ||
122 | } | ||
123 | } | 114 | } |
124 | 115 | ||
125 | static inline void pud_populate_kernel(struct mm_struct *mm, | 116 | static inline void pud_populate_kernel(struct mm_struct *mm, |
@@ -131,11 +122,6 @@ static inline void pud_populate_kernel(struct mm_struct *mm, | |||
131 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | 122 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
132 | { | 123 | { |
133 | pud_populate_kernel(mm, pud, pmd); | 124 | pud_populate_kernel(mm, pud, pmd); |
134 | if (mm->context.noexec) { | ||
135 | pud = get_shadow_table(pud); | ||
136 | pmd = get_shadow_table(pmd); | ||
137 | pud_populate_kernel(mm, pud, pmd); | ||
138 | } | ||
139 | } | 125 | } |
140 | 126 | ||
141 | #endif /* __s390x__ */ | 127 | #endif /* __s390x__ */ |
@@ -143,10 +129,8 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
143 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 129 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
144 | { | 130 | { |
145 | spin_lock_init(&mm->context.list_lock); | 131 | spin_lock_init(&mm->context.list_lock); |
146 | INIT_LIST_HEAD(&mm->context.crst_list); | ||
147 | INIT_LIST_HEAD(&mm->context.pgtable_list); | 132 | INIT_LIST_HEAD(&mm->context.pgtable_list); |
148 | return (pgd_t *) | 133 | return (pgd_t *) crst_table_alloc(mm); |
149 | crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE); | ||
150 | } | 134 | } |
151 | #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) | 135 | #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) |
152 | 136 | ||
@@ -160,10 +144,6 @@ static inline void pmd_populate(struct mm_struct *mm, | |||
160 | pmd_t *pmd, pgtable_t pte) | 144 | pmd_t *pmd, pgtable_t pte) |
161 | { | 145 | { |
162 | pmd_populate_kernel(mm, pmd, pte); | 146 | pmd_populate_kernel(mm, pmd, pte); |
163 | if (mm->context.noexec) { | ||
164 | pmd = get_shadow_table(pmd); | ||
165 | pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE); | ||
166 | } | ||
167 | } | 147 | } |
168 | 148 | ||
169 | #define pmd_pgtable(pmd) \ | 149 | #define pmd_pgtable(pmd) \ |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 02ace3491c51..763620ec7925 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -256,8 +256,6 @@ extern unsigned long VMALLOC_START; | |||
256 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ | 256 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ |
257 | #define _PAGE_TYPE_RO 0x200 | 257 | #define _PAGE_TYPE_RO 0x200 |
258 | #define _PAGE_TYPE_RW 0x000 | 258 | #define _PAGE_TYPE_RW 0x000 |
259 | #define _PAGE_TYPE_EX_RO 0x202 | ||
260 | #define _PAGE_TYPE_EX_RW 0x002 | ||
261 | 259 | ||
262 | /* | 260 | /* |
263 | * Only four types for huge pages, using the invalid bit and protection bit | 261 | * Only four types for huge pages, using the invalid bit and protection bit |
@@ -287,8 +285,6 @@ extern unsigned long VMALLOC_START; | |||
287 | * _PAGE_TYPE_FILE 11?1 -> 11?1 | 285 | * _PAGE_TYPE_FILE 11?1 -> 11?1 |
288 | * _PAGE_TYPE_RO 0100 -> 1100 | 286 | * _PAGE_TYPE_RO 0100 -> 1100 |
289 | * _PAGE_TYPE_RW 0000 -> 1000 | 287 | * _PAGE_TYPE_RW 0000 -> 1000 |
290 | * _PAGE_TYPE_EX_RO 0110 -> 1110 | ||
291 | * _PAGE_TYPE_EX_RW 0010 -> 1010 | ||
292 | * | 288 | * |
293 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 | 289 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 |
294 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 | 290 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 |
@@ -387,55 +383,33 @@ extern unsigned long VMALLOC_START; | |||
387 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) | 383 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) |
388 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) | 384 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) |
389 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) | 385 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) |
390 | #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) | ||
391 | #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) | ||
392 | 386 | ||
393 | #define PAGE_KERNEL PAGE_RW | 387 | #define PAGE_KERNEL PAGE_RW |
394 | #define PAGE_COPY PAGE_RO | 388 | #define PAGE_COPY PAGE_RO |
395 | 389 | ||
396 | /* | 390 | /* |
397 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. | 391 | * On s390 the page table entry has an invalid bit and a read-only bit. |
398 | * Write permission always implies read permission. In theory with a | 392 | * Read permission implies execute permission and write permission |
399 | * primary/secondary page table execute only can be implemented but | 393 | * implies read permission. |
400 | * it would cost an additional bit in the pte to distinguish all the | ||
401 | * different pte types. To avoid that execute permission currently | ||
402 | * implies read permission as well. | ||
403 | */ | 394 | */ |
404 | /*xwr*/ | 395 | /*xwr*/ |
405 | #define __P000 PAGE_NONE | 396 | #define __P000 PAGE_NONE |
406 | #define __P001 PAGE_RO | 397 | #define __P001 PAGE_RO |
407 | #define __P010 PAGE_RO | 398 | #define __P010 PAGE_RO |
408 | #define __P011 PAGE_RO | 399 | #define __P011 PAGE_RO |
409 | #define __P100 PAGE_EX_RO | 400 | #define __P100 PAGE_RO |
410 | #define __P101 PAGE_EX_RO | 401 | #define __P101 PAGE_RO |
411 | #define __P110 PAGE_EX_RO | 402 | #define __P110 PAGE_RO |
412 | #define __P111 PAGE_EX_RO | 403 | #define __P111 PAGE_RO |
413 | 404 | ||
414 | #define __S000 PAGE_NONE | 405 | #define __S000 PAGE_NONE |
415 | #define __S001 PAGE_RO | 406 | #define __S001 PAGE_RO |
416 | #define __S010 PAGE_RW | 407 | #define __S010 PAGE_RW |
417 | #define __S011 PAGE_RW | 408 | #define __S011 PAGE_RW |
418 | #define __S100 PAGE_EX_RO | 409 | #define __S100 PAGE_RO |
419 | #define __S101 PAGE_EX_RO | 410 | #define __S101 PAGE_RO |
420 | #define __S110 PAGE_EX_RW | 411 | #define __S110 PAGE_RW |
421 | #define __S111 PAGE_EX_RW | 412 | #define __S111 PAGE_RW |
422 | |||
423 | #ifndef __s390x__ | ||
424 | # define PxD_SHADOW_SHIFT 1 | ||
425 | #else /* __s390x__ */ | ||
426 | # define PxD_SHADOW_SHIFT 2 | ||
427 | #endif /* __s390x__ */ | ||
428 | |||
429 | static inline void *get_shadow_table(void *table) | ||
430 | { | ||
431 | unsigned long addr, offset; | ||
432 | struct page *page; | ||
433 | |||
434 | addr = (unsigned long) table; | ||
435 | offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); | ||
436 | page = virt_to_page((void *)(addr ^ offset)); | ||
437 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); | ||
438 | } | ||
439 | 413 | ||
440 | /* | 414 | /* |
441 | * Certain architectures need to do special things when PTEs | 415 | * Certain architectures need to do special things when PTEs |
@@ -446,14 +420,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
446 | pte_t *ptep, pte_t entry) | 420 | pte_t *ptep, pte_t entry) |
447 | { | 421 | { |
448 | *ptep = entry; | 422 | *ptep = entry; |
449 | if (mm->context.noexec) { | ||
450 | if (!(pte_val(entry) & _PAGE_INVALID) && | ||
451 | (pte_val(entry) & _PAGE_SWX)) | ||
452 | pte_val(entry) |= _PAGE_RO; | ||
453 | else | ||
454 | pte_val(entry) = _PAGE_TYPE_EMPTY; | ||
455 | ptep[PTRS_PER_PTE] = entry; | ||
456 | } | ||
457 | } | 423 | } |
458 | 424 | ||
459 | /* | 425 | /* |
@@ -662,11 +628,7 @@ static inline void pgd_clear_kernel(pgd_t * pgd) | |||
662 | 628 | ||
663 | static inline void pgd_clear(pgd_t * pgd) | 629 | static inline void pgd_clear(pgd_t * pgd) |
664 | { | 630 | { |
665 | pgd_t *shadow = get_shadow_table(pgd); | ||
666 | |||
667 | pgd_clear_kernel(pgd); | 631 | pgd_clear_kernel(pgd); |
668 | if (shadow) | ||
669 | pgd_clear_kernel(shadow); | ||
670 | } | 632 | } |
671 | 633 | ||
672 | static inline void pud_clear_kernel(pud_t *pud) | 634 | static inline void pud_clear_kernel(pud_t *pud) |
@@ -677,13 +639,8 @@ static inline void pud_clear_kernel(pud_t *pud) | |||
677 | 639 | ||
678 | static inline void pud_clear(pud_t *pud) | 640 | static inline void pud_clear(pud_t *pud) |
679 | { | 641 | { |
680 | pud_t *shadow = get_shadow_table(pud); | ||
681 | |||
682 | pud_clear_kernel(pud); | 642 | pud_clear_kernel(pud); |
683 | if (shadow) | ||
684 | pud_clear_kernel(shadow); | ||
685 | } | 643 | } |
686 | |||
687 | #endif /* __s390x__ */ | 644 | #endif /* __s390x__ */ |
688 | 645 | ||
689 | static inline void pmd_clear_kernel(pmd_t * pmdp) | 646 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
@@ -693,18 +650,12 @@ static inline void pmd_clear_kernel(pmd_t * pmdp) | |||
693 | 650 | ||
694 | static inline void pmd_clear(pmd_t *pmd) | 651 | static inline void pmd_clear(pmd_t *pmd) |
695 | { | 652 | { |
696 | pmd_t *shadow = get_shadow_table(pmd); | ||
697 | |||
698 | pmd_clear_kernel(pmd); | 653 | pmd_clear_kernel(pmd); |
699 | if (shadow) | ||
700 | pmd_clear_kernel(shadow); | ||
701 | } | 654 | } |
702 | 655 | ||
703 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 656 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
704 | { | 657 | { |
705 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 658 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
706 | if (mm->context.noexec) | ||
707 | pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; | ||
708 | } | 659 | } |
709 | 660 | ||
710 | /* | 661 | /* |
@@ -903,10 +854,6 @@ static inline void ptep_invalidate(struct mm_struct *mm, | |||
903 | } | 854 | } |
904 | __ptep_ipte(address, ptep); | 855 | __ptep_ipte(address, ptep); |
905 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 856 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
906 | if (mm->context.noexec) { | ||
907 | __ptep_ipte(address, ptep + PTRS_PER_PTE); | ||
908 | pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY; | ||
909 | } | ||
910 | } | 857 | } |
911 | 858 | ||
912 | /* | 859 | /* |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 29d5d6d4becc..4fdcefc1a98d 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -80,16 +80,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
80 | * on all cpus instead of doing a local flush if the mm | 80 | * on all cpus instead of doing a local flush if the mm |
81 | * only ran on the local cpu. | 81 | * only ran on the local cpu. |
82 | */ | 82 | */ |
83 | if (MACHINE_HAS_IDTE) { | 83 | if (MACHINE_HAS_IDTE) |
84 | if (mm->context.noexec) | ||
85 | __tlb_flush_idte((unsigned long) | ||
86 | get_shadow_table(mm->pgd) | | ||
87 | mm->context.asce_bits); | ||
88 | __tlb_flush_idte((unsigned long) mm->pgd | | 84 | __tlb_flush_idte((unsigned long) mm->pgd | |
89 | mm->context.asce_bits); | 85 | mm->context.asce_bits); |
90 | return; | 86 | else |
91 | } | 87 | __tlb_flush_full(mm); |
92 | __tlb_flush_full(mm); | ||
93 | } | 88 | } |
94 | 89 | ||
95 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | 90 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index fe03c140002a..ef4555611013 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -128,9 +128,6 @@ int main(void) | |||
128 | DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); | 128 | DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); |
129 | DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); | 129 | DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); |
130 | DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); | 130 | DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); |
131 | DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); | ||
132 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); | ||
133 | DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce)); | ||
134 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); | 131 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); |
135 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); | 132 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); |
136 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); | 133 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f5434d1ecb31..0c35dee10b00 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -305,8 +305,7 @@ static int set_amode_and_uaccess(unsigned long user_amode, | |||
305 | */ | 305 | */ |
306 | static int __init early_parse_switch_amode(char *p) | 306 | static int __init early_parse_switch_amode(char *p) |
307 | { | 307 | { |
308 | if (user_mode != SECONDARY_SPACE_MODE) | 308 | user_mode = PRIMARY_SPACE_MODE; |
309 | user_mode = PRIMARY_SPACE_MODE; | ||
310 | return 0; | 309 | return 0; |
311 | } | 310 | } |
312 | early_param("switch_amode", early_parse_switch_amode); | 311 | early_param("switch_amode", early_parse_switch_amode); |
@@ -315,10 +314,6 @@ static int __init early_parse_user_mode(char *p) | |||
315 | { | 314 | { |
316 | if (p && strcmp(p, "primary") == 0) | 315 | if (p && strcmp(p, "primary") == 0) |
317 | user_mode = PRIMARY_SPACE_MODE; | 316 | user_mode = PRIMARY_SPACE_MODE; |
318 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
319 | else if (p && strcmp(p, "secondary") == 0) | ||
320 | user_mode = SECONDARY_SPACE_MODE; | ||
321 | #endif | ||
322 | else if (!p || strcmp(p, "home") == 0) | 317 | else if (!p || strcmp(p, "home") == 0) |
323 | user_mode = HOME_SPACE_MODE; | 318 | user_mode = HOME_SPACE_MODE; |
324 | else | 319 | else |
@@ -327,31 +322,9 @@ static int __init early_parse_user_mode(char *p) | |||
327 | } | 322 | } |
328 | early_param("user_mode", early_parse_user_mode); | 323 | early_param("user_mode", early_parse_user_mode); |
329 | 324 | ||
330 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
331 | /* | ||
332 | * Enable execute protection? | ||
333 | */ | ||
334 | static int __init early_parse_noexec(char *p) | ||
335 | { | ||
336 | if (!strncmp(p, "off", 3)) | ||
337 | return 0; | ||
338 | user_mode = SECONDARY_SPACE_MODE; | ||
339 | return 0; | ||
340 | } | ||
341 | early_param("noexec", early_parse_noexec); | ||
342 | #endif /* CONFIG_S390_EXEC_PROTECT */ | ||
343 | |||
344 | static void setup_addressing_mode(void) | 325 | static void setup_addressing_mode(void) |
345 | { | 326 | { |
346 | if (user_mode == SECONDARY_SPACE_MODE) { | 327 | if (user_mode == PRIMARY_SPACE_MODE) { |
347 | if (set_amode_and_uaccess(PSW_ASC_SECONDARY, | ||
348 | PSW32_ASC_SECONDARY)) | ||
349 | pr_info("Execute protection active, " | ||
350 | "mvcos available\n"); | ||
351 | else | ||
352 | pr_info("Execute protection active, " | ||
353 | "mvcos not available\n"); | ||
354 | } else if (user_mode == PRIMARY_SPACE_MODE) { | ||
355 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) | 328 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) |
356 | pr_info("Address spaces switched, " | 329 | pr_info("Address spaces switched, " |
357 | "mvcos available\n"); | 330 | "mvcos available\n"); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index ab988135e5c6..177745c520ca 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -225,33 +225,6 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code, | |||
225 | force_sig_info(SIGBUS, &si, tsk); | 225 | force_sig_info(SIGBUS, &si, tsk); |
226 | } | 226 | } |
227 | 227 | ||
228 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
229 | static noinline int signal_return(struct pt_regs *regs, long int_code, | ||
230 | unsigned long trans_exc_code) | ||
231 | { | ||
232 | u16 instruction; | ||
233 | int rc; | ||
234 | |||
235 | rc = __get_user(instruction, (u16 __user *) regs->psw.addr); | ||
236 | |||
237 | if (!rc && instruction == 0x0a77) { | ||
238 | clear_tsk_thread_flag(current, TIF_PER_TRAP); | ||
239 | if (is_compat_task()) | ||
240 | sys32_sigreturn(); | ||
241 | else | ||
242 | sys_sigreturn(); | ||
243 | } else if (!rc && instruction == 0x0aad) { | ||
244 | clear_tsk_thread_flag(current, TIF_PER_TRAP); | ||
245 | if (is_compat_task()) | ||
246 | sys32_rt_sigreturn(); | ||
247 | else | ||
248 | sys_rt_sigreturn(); | ||
249 | } else | ||
250 | do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code); | ||
251 | return 0; | ||
252 | } | ||
253 | #endif /* CONFIG_S390_EXEC_PROTECT */ | ||
254 | |||
255 | static noinline void do_fault_error(struct pt_regs *regs, long int_code, | 228 | static noinline void do_fault_error(struct pt_regs *regs, long int_code, |
256 | unsigned long trans_exc_code, int fault) | 229 | unsigned long trans_exc_code, int fault) |
257 | { | 230 | { |
@@ -259,13 +232,6 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code, | |||
259 | 232 | ||
260 | switch (fault) { | 233 | switch (fault) { |
261 | case VM_FAULT_BADACCESS: | 234 | case VM_FAULT_BADACCESS: |
262 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
263 | if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY && | ||
264 | (trans_exc_code & 3) == 0) { | ||
265 | signal_return(regs, int_code, trans_exc_code); | ||
266 | break; | ||
267 | } | ||
268 | #endif /* CONFIG_S390_EXEC_PROTECT */ | ||
269 | case VM_FAULT_BADMAP: | 235 | case VM_FAULT_BADMAP: |
270 | /* Bad memory access. Check if it is kernel or user space. */ | 236 | /* Bad memory access. Check if it is kernel or user space. */ |
271 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 237 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
@@ -414,11 +380,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, | |||
414 | int access, fault; | 380 | int access, fault; |
415 | 381 | ||
416 | access = VM_READ | VM_EXEC | VM_WRITE; | 382 | access = VM_READ | VM_EXEC | VM_WRITE; |
417 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
418 | if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY && | ||
419 | (trans_exc_code & 3) == 0) | ||
420 | access = VM_EXEC; | ||
421 | #endif | ||
422 | fault = do_exception(regs, access, trans_exc_code); | 383 | fault = do_exception(regs, access, trans_exc_code); |
423 | if (unlikely(fault)) | 384 | if (unlikely(fault)) |
424 | do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); | 385 | do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); |
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 639cd21f2218..a4d856db9154 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -13,7 +13,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |||
13 | pte_t *pteptr, pte_t pteval) | 13 | pte_t *pteptr, pte_t pteval) |
14 | { | 14 | { |
15 | pmd_t *pmdp = (pmd_t *) pteptr; | 15 | pmd_t *pmdp = (pmd_t *) pteptr; |
16 | pte_t shadow_pteval = pteval; | ||
17 | unsigned long mask; | 16 | unsigned long mask; |
18 | 17 | ||
19 | if (!MACHINE_HAS_HPAGE) { | 18 | if (!MACHINE_HAS_HPAGE) { |
@@ -21,18 +20,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |||
21 | mask = pte_val(pteval) & | 20 | mask = pte_val(pteval) & |
22 | (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); | 21 | (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); |
23 | pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; | 22 | pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; |
24 | if (mm->context.noexec) { | ||
25 | pteptr += PTRS_PER_PTE; | ||
26 | pte_val(shadow_pteval) = | ||
27 | (_SEGMENT_ENTRY + __pa(pteptr)) | mask; | ||
28 | } | ||
29 | } | 23 | } |
30 | 24 | ||
31 | pmd_val(*pmdp) = pte_val(pteval); | 25 | pmd_val(*pmdp) = pte_val(pteval); |
32 | if (mm->context.noexec) { | ||
33 | pmdp = get_shadow_table(pmdp); | ||
34 | pmd_val(*pmdp) = pte_val(shadow_pteval); | ||
35 | } | ||
36 | } | 26 | } |
37 | 27 | ||
38 | int arch_prepare_hugepage(struct page *page) | 28 | int arch_prepare_hugepage(struct page *page) |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index e1850c28cd68..8d4330642512 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |||
40 | static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); | 40 | static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); |
41 | 41 | ||
42 | static void __page_table_free(struct mm_struct *mm, unsigned long *table); | 42 | static void __page_table_free(struct mm_struct *mm, unsigned long *table); |
43 | static void __crst_table_free(struct mm_struct *mm, unsigned long *table); | ||
44 | 43 | ||
45 | static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) | 44 | static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) |
46 | { | 45 | { |
@@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head) | |||
67 | while (batch->pgt_index > 0) | 66 | while (batch->pgt_index > 0) |
68 | __page_table_free(batch->mm, batch->table[--batch->pgt_index]); | 67 | __page_table_free(batch->mm, batch->table[--batch->pgt_index]); |
69 | while (batch->crst_index < RCU_FREELIST_SIZE) | 68 | while (batch->crst_index < RCU_FREELIST_SIZE) |
70 | __crst_table_free(batch->mm, batch->table[batch->crst_index++]); | 69 | crst_table_free(batch->mm, batch->table[batch->crst_index++]); |
71 | free_page((unsigned long) batch); | 70 | free_page((unsigned long) batch); |
72 | } | 71 | } |
73 | 72 | ||
@@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg) | |||
125 | } | 124 | } |
126 | early_param("vmalloc", parse_vmalloc); | 125 | early_param("vmalloc", parse_vmalloc); |
127 | 126 | ||
128 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | 127 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
129 | { | 128 | { |
130 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | 129 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
131 | 130 | ||
132 | if (!page) | 131 | if (!page) |
133 | return NULL; | 132 | return NULL; |
134 | page->index = 0; | ||
135 | if (noexec) { | ||
136 | struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | ||
137 | if (!shadow) { | ||
138 | __free_pages(page, ALLOC_ORDER); | ||
139 | return NULL; | ||
140 | } | ||
141 | page->index = page_to_phys(shadow); | ||
142 | } | ||
143 | spin_lock_bh(&mm->context.list_lock); | ||
144 | list_add(&page->lru, &mm->context.crst_list); | ||
145 | spin_unlock_bh(&mm->context.list_lock); | ||
146 | return (unsigned long *) page_to_phys(page); | 133 | return (unsigned long *) page_to_phys(page); |
147 | } | 134 | } |
148 | 135 | ||
149 | static void __crst_table_free(struct mm_struct *mm, unsigned long *table) | ||
150 | { | ||
151 | unsigned long *shadow = get_shadow_table(table); | ||
152 | |||
153 | if (shadow) | ||
154 | free_pages((unsigned long) shadow, ALLOC_ORDER); | ||
155 | free_pages((unsigned long) table, ALLOC_ORDER); | ||
156 | } | ||
157 | |||
158 | void crst_table_free(struct mm_struct *mm, unsigned long *table) | 136 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
159 | { | 137 | { |
160 | struct page *page = virt_to_page(table); | 138 | free_pages((unsigned long) table, ALLOC_ORDER); |
161 | |||
162 | spin_lock_bh(&mm->context.list_lock); | ||
163 | list_del(&page->lru); | ||
164 | spin_unlock_bh(&mm->context.list_lock); | ||
165 | __crst_table_free(mm, table); | ||
166 | } | 139 | } |
167 | 140 | ||
168 | void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) | 141 | void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) |
169 | { | 142 | { |
170 | struct rcu_table_freelist *batch; | 143 | struct rcu_table_freelist *batch; |
171 | struct page *page = virt_to_page(table); | ||
172 | 144 | ||
173 | spin_lock_bh(&mm->context.list_lock); | ||
174 | list_del(&page->lru); | ||
175 | spin_unlock_bh(&mm->context.list_lock); | ||
176 | if (atomic_read(&mm->mm_users) < 2 && | 145 | if (atomic_read(&mm->mm_users) < 2 && |
177 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { | 146 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { |
178 | __crst_table_free(mm, table); | 147 | crst_table_free(mm, table); |
179 | return; | 148 | return; |
180 | } | 149 | } |
181 | batch = rcu_table_freelist_get(mm); | 150 | batch = rcu_table_freelist_get(mm); |
182 | if (!batch) { | 151 | if (!batch) { |
183 | smp_call_function(smp_sync, NULL, 1); | 152 | smp_call_function(smp_sync, NULL, 1); |
184 | __crst_table_free(mm, table); | 153 | crst_table_free(mm, table); |
185 | return; | 154 | return; |
186 | } | 155 | } |
187 | batch->table[--batch->crst_index] = table; | 156 | batch->table[--batch->crst_index] = table; |
@@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | |||
197 | 166 | ||
198 | BUG_ON(limit > (1UL << 53)); | 167 | BUG_ON(limit > (1UL << 53)); |
199 | repeat: | 168 | repeat: |
200 | table = crst_table_alloc(mm, mm->context.noexec); | 169 | table = crst_table_alloc(mm); |
201 | if (!table) | 170 | if (!table) |
202 | return -ENOMEM; | 171 | return -ENOMEM; |
203 | spin_lock_bh(&mm->page_table_lock); | 172 | spin_lock_bh(&mm->page_table_lock); |
@@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
273 | unsigned long *table; | 242 | unsigned long *table; |
274 | unsigned long bits; | 243 | unsigned long bits; |
275 | 244 | ||
276 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 245 | bits = (mm->context.has_pgste) ? 3UL : 1UL; |
277 | spin_lock_bh(&mm->context.list_lock); | 246 | spin_lock_bh(&mm->context.list_lock); |
278 | page = NULL; | 247 | page = NULL; |
279 | if (!list_empty(&mm->context.pgtable_list)) { | 248 | if (!list_empty(&mm->context.pgtable_list)) { |
@@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
329 | struct page *page; | 298 | struct page *page; |
330 | unsigned long bits; | 299 | unsigned long bits; |
331 | 300 | ||
332 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 301 | bits = (mm->context.has_pgste) ? 3UL : 1UL; |
333 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | 302 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
334 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 303 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
335 | spin_lock_bh(&mm->context.list_lock); | 304 | spin_lock_bh(&mm->context.list_lock); |
@@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) | |||
366 | page_table_free(mm, table); | 335 | page_table_free(mm, table); |
367 | return; | 336 | return; |
368 | } | 337 | } |
369 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 338 | bits = (mm->context.has_pgste) ? 3UL : 1UL; |
370 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | 339 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
371 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 340 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
372 | spin_lock_bh(&mm->context.list_lock); | 341 | spin_lock_bh(&mm->context.list_lock); |
@@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) | |||
379 | rcu_table_freelist_finish(); | 348 | rcu_table_freelist_finish(); |
380 | } | 349 | } |
381 | 350 | ||
382 | void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | ||
383 | { | ||
384 | struct page *page; | ||
385 | |||
386 | spin_lock_bh(&mm->context.list_lock); | ||
387 | /* Free shadow region and segment tables. */ | ||
388 | list_for_each_entry(page, &mm->context.crst_list, lru) | ||
389 | if (page->index) { | ||
390 | free_pages((unsigned long) page->index, ALLOC_ORDER); | ||
391 | page->index = 0; | ||
392 | } | ||
393 | /* "Free" second halves of page tables. */ | ||
394 | list_for_each_entry(page, &mm->context.pgtable_list, lru) | ||
395 | page->flags &= ~SECOND_HALVES; | ||
396 | spin_unlock_bh(&mm->context.list_lock); | ||
397 | mm->context.noexec = 0; | ||
398 | update_mm(mm, tsk); | ||
399 | } | ||
400 | |||
401 | /* | 351 | /* |
402 | * switch on pgstes for its userspace process (for kvm) | 352 | * switch on pgstes for its userspace process (for kvm) |
403 | */ | 353 | */ |