aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2011-05-23 04:24:23 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-05-23 04:24:28 -0400
commit043d07084b5347a26eab0a07aa13a4a929ad9e71 (patch)
tree13c2a902ccb2ecb779722c8b81ada32d242760c8 /arch/s390/include/asm
parent9bf05098ce34e68a9e15f09ad6cdfea4ed64057a (diff)
[S390] Remove data execution protection
The noexec support on s390 does not rely on a bit in the page table entry but utilizes the secondary space mode to distinguish between memory accesses for instructions vs. data. The noexec code relies on the assumption that the cpu will always use the secondary space page table for data accesses while it is running in the secondary space mode. Up to the z9-109 class machines this has been the case. Unfortunately this is not true anymore with z10 and later machines. The load-relative-long instructions lrl, lgrl and lgfrl access the memory operand using the same addressing-space mode that has been used to fetch the instruction. This breaks the noexec mode for all user space binaries compiled with march=z10 or later. The only option is to remove the current noexec support. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/elf.h12
-rw-r--r--arch/s390/include/asm/hugetlb.h17
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/mmu.h3
-rw-r--r--arch/s390/include/asm/mmu_context.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h28
-rw-r--r--arch/s390/include/asm/pgtable.h75
-rw-r--r--arch/s390/include/asm/tlbflush.h11
8 files changed, 24 insertions, 132 deletions
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 10c029cfcc7d..64b61bf72e93 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -196,18 +196,6 @@ do { \
196} while (0) 196} while (0)
197#endif /* __s390x__ */ 197#endif /* __s390x__ */
198 198
199/*
200 * An executable for which elf_read_implies_exec() returns TRUE will
201 * have the READ_IMPLIES_EXEC personality flag set automatically.
202 */
203#define elf_read_implies_exec(ex, executable_stack) \
204({ \
205 if (current->mm->context.noexec && \
206 executable_stack != EXSTACK_DISABLE_X) \
207 disable_noexec(current->mm, current); \
208 current->mm->context.noexec == 0; \
209})
210
211#define STACK_RND_MASK 0x7ffUL 199#define STACK_RND_MASK 0x7ffUL
212 200
213#define ARCH_DLINFO \ 201#define ARCH_DLINFO \
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index b56403c2df28..799ed0f1643d 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -111,21 +111,10 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
111{ 111{
112 pmd_t *pmdp = (pmd_t *) ptep; 112 pmd_t *pmdp = (pmd_t *) ptep;
113 113
114 if (!MACHINE_HAS_IDTE) { 114 if (MACHINE_HAS_IDTE)
115 __pmd_csp(pmdp);
116 if (mm->context.noexec) {
117 pmdp = get_shadow_table(pmdp);
118 __pmd_csp(pmdp);
119 }
120 return;
121 }
122
123 __pmd_idte(address, pmdp);
124 if (mm->context.noexec) {
125 pmdp = get_shadow_table(pmdp);
126 __pmd_idte(address, pmdp); 115 __pmd_idte(address, pmdp);
127 } 116 else
128 return; 117 __pmd_csp(pmdp);
129} 118}
130 119
131#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 120#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 65e172f8209d..b8624d53c379 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -124,7 +124,7 @@ struct _lowcore {
124 /* Address space pointer. */ 124 /* Address space pointer. */
125 __u32 kernel_asce; /* 0x02ac */ 125 __u32 kernel_asce; /* 0x02ac */
126 __u32 user_asce; /* 0x02b0 */ 126 __u32 user_asce; /* 0x02b0 */
127 __u32 user_exec_asce; /* 0x02b4 */ 127 __u8 pad_0x02b4[0x02b8-0x02b4]; /* 0x02b4 */
128 128
129 /* SMP info area */ 129 /* SMP info area */
130 __u32 cpu_nr; /* 0x02b8 */ 130 __u32 cpu_nr; /* 0x02b8 */
@@ -255,7 +255,7 @@ struct _lowcore {
255 /* Address space pointer. */ 255 /* Address space pointer. */
256 __u64 kernel_asce; /* 0x0310 */ 256 __u64 kernel_asce; /* 0x0310 */
257 __u64 user_asce; /* 0x0318 */ 257 __u64 user_asce; /* 0x0318 */
258 __u64 user_exec_asce; /* 0x0320 */ 258 __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */
259 259
260 /* SMP info area */ 260 /* SMP info area */
261 __u32 cpu_nr; /* 0x0328 */ 261 __u32 cpu_nr; /* 0x0328 */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 78522cdefdd4..818e8298a6bd 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -5,19 +5,16 @@ typedef struct {
5 atomic_t attach_count; 5 atomic_t attach_count;
6 unsigned int flush_mm; 6 unsigned int flush_mm;
7 spinlock_t list_lock; 7 spinlock_t list_lock;
8 struct list_head crst_list;
9 struct list_head pgtable_list; 8 struct list_head pgtable_list;
10 unsigned long asce_bits; 9 unsigned long asce_bits;
11 unsigned long asce_limit; 10 unsigned long asce_limit;
12 unsigned long vdso_base; 11 unsigned long vdso_base;
13 int noexec;
14 int has_pgste; /* The mmu context has extended page tables */ 12 int has_pgste; /* The mmu context has extended page tables */
15 int alloc_pgste; /* cloned contexts will have extended page tables */ 13 int alloc_pgste; /* cloned contexts will have extended page tables */
16} mm_context_t; 14} mm_context_t;
17 15
18#define INIT_MM_CONTEXT(name) \ 16#define INIT_MM_CONTEXT(name) \
19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 17 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
20 .context.crst_list = LIST_HEAD_INIT(name.context.crst_list), \
21 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), 18 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),
22 19
23#endif 20#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8c277caa8d3a..5682f160ff82 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -35,11 +35,9 @@ static inline int init_new_context(struct task_struct *tsk,
35 * and if has_pgste is set, it will create extended page 35 * and if has_pgste is set, it will create extended page
36 * tables. 36 * tables.
37 */ 37 */
38 mm->context.noexec = 0;
39 mm->context.has_pgste = 1; 38 mm->context.has_pgste = 1;
40 mm->context.alloc_pgste = 1; 39 mm->context.alloc_pgste = 1;
41 } else { 40 } else {
42 mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
43 mm->context.has_pgste = 0; 41 mm->context.has_pgste = 0;
44 mm->context.alloc_pgste = 0; 42 mm->context.alloc_pgste = 0;
45 } 43 }
@@ -63,10 +61,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
63 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 61 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
64 if (user_mode != HOME_SPACE_MODE) { 62 if (user_mode != HOME_SPACE_MODE) {
65 /* Load primary space page table origin. */ 63 /* Load primary space page table origin. */
66 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
67 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
68 asm volatile(LCTL_OPCODE" 1,1,%0\n" 64 asm volatile(LCTL_OPCODE" 1,1,%0\n"
69 : : "m" (S390_lowcore.user_exec_asce) ); 65 : : "m" (S390_lowcore.user_asce) );
70 } else 66 } else
71 /* Load home space page table origin. */ 67 /* Load home space page table origin. */
72 asm volatile(LCTL_OPCODE" 13,13,%0" 68 asm volatile(LCTL_OPCODE" 13,13,%0"
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 082eb4e50e8b..739ff9ec1395 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -19,14 +19,13 @@
19 19
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22unsigned long *crst_table_alloc(struct mm_struct *, int); 22unsigned long *crst_table_alloc(struct mm_struct *);
23void crst_table_free(struct mm_struct *, unsigned long *); 23void crst_table_free(struct mm_struct *, unsigned long *);
24void crst_table_free_rcu(struct mm_struct *, unsigned long *); 24void crst_table_free_rcu(struct mm_struct *, unsigned long *);
25 25
26unsigned long *page_table_alloc(struct mm_struct *); 26unsigned long *page_table_alloc(struct mm_struct *);
27void page_table_free(struct mm_struct *, unsigned long *); 27void page_table_free(struct mm_struct *, unsigned long *);
28void page_table_free_rcu(struct mm_struct *, unsigned long *); 28void page_table_free_rcu(struct mm_struct *, unsigned long *);
29void disable_noexec(struct mm_struct *, struct task_struct *);
30 29
31static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
32{ 31{
@@ -50,9 +49,6 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
50static inline void crst_table_init(unsigned long *crst, unsigned long entry) 49static inline void crst_table_init(unsigned long *crst, unsigned long entry)
51{ 50{
52 clear_table(crst, entry, sizeof(unsigned long)*2048); 51 clear_table(crst, entry, sizeof(unsigned long)*2048);
53 crst = get_shadow_table(crst);
54 if (crst)
55 clear_table(crst, entry, sizeof(unsigned long)*2048);
56} 52}
57 53
58#ifndef __s390x__ 54#ifndef __s390x__
@@ -90,7 +86,7 @@ void crst_table_downgrade(struct mm_struct *, unsigned long limit);
90 86
91static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 87static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
92{ 88{
93 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 89 unsigned long *table = crst_table_alloc(mm);
94 if (table) 90 if (table)
95 crst_table_init(table, _REGION3_ENTRY_EMPTY); 91 crst_table_init(table, _REGION3_ENTRY_EMPTY);
96 return (pud_t *) table; 92 return (pud_t *) table;
@@ -99,7 +95,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
99 95
100static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 96static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
101{ 97{
102 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 98 unsigned long *table = crst_table_alloc(mm);
103 if (table) 99 if (table)
104 crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 100 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
105 return (pmd_t *) table; 101 return (pmd_t *) table;
@@ -115,11 +111,6 @@ static inline void pgd_populate_kernel(struct mm_struct *mm,
115static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 111static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
116{ 112{
117 pgd_populate_kernel(mm, pgd, pud); 113 pgd_populate_kernel(mm, pgd, pud);
118 if (mm->context.noexec) {
119 pgd = get_shadow_table(pgd);
120 pud = get_shadow_table(pud);
121 pgd_populate_kernel(mm, pgd, pud);
122 }
123} 114}
124 115
125static inline void pud_populate_kernel(struct mm_struct *mm, 116static inline void pud_populate_kernel(struct mm_struct *mm,
@@ -131,11 +122,6 @@ static inline void pud_populate_kernel(struct mm_struct *mm,
131static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 122static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
132{ 123{
133 pud_populate_kernel(mm, pud, pmd); 124 pud_populate_kernel(mm, pud, pmd);
134 if (mm->context.noexec) {
135 pud = get_shadow_table(pud);
136 pmd = get_shadow_table(pmd);
137 pud_populate_kernel(mm, pud, pmd);
138 }
139} 125}
140 126
141#endif /* __s390x__ */ 127#endif /* __s390x__ */
@@ -143,10 +129,8 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
143static inline pgd_t *pgd_alloc(struct mm_struct *mm) 129static inline pgd_t *pgd_alloc(struct mm_struct *mm)
144{ 130{
145 spin_lock_init(&mm->context.list_lock); 131 spin_lock_init(&mm->context.list_lock);
146 INIT_LIST_HEAD(&mm->context.crst_list);
147 INIT_LIST_HEAD(&mm->context.pgtable_list); 132 INIT_LIST_HEAD(&mm->context.pgtable_list);
148 return (pgd_t *) 133 return (pgd_t *) crst_table_alloc(mm);
149 crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
150} 134}
151#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 135#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
152 136
@@ -160,10 +144,6 @@ static inline void pmd_populate(struct mm_struct *mm,
160 pmd_t *pmd, pgtable_t pte) 144 pmd_t *pmd, pgtable_t pte)
161{ 145{
162 pmd_populate_kernel(mm, pmd, pte); 146 pmd_populate_kernel(mm, pmd, pte);
163 if (mm->context.noexec) {
164 pmd = get_shadow_table(pmd);
165 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
166 }
167} 147}
168 148
169#define pmd_pgtable(pmd) \ 149#define pmd_pgtable(pmd) \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 02ace3491c51..763620ec7925 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -256,8 +256,6 @@ extern unsigned long VMALLOC_START;
256#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 256#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
257#define _PAGE_TYPE_RO 0x200 257#define _PAGE_TYPE_RO 0x200
258#define _PAGE_TYPE_RW 0x000 258#define _PAGE_TYPE_RW 0x000
259#define _PAGE_TYPE_EX_RO 0x202
260#define _PAGE_TYPE_EX_RW 0x002
261 259
262/* 260/*
263 * Only four types for huge pages, using the invalid bit and protection bit 261 * Only four types for huge pages, using the invalid bit and protection bit
@@ -287,8 +285,6 @@ extern unsigned long VMALLOC_START;
287 * _PAGE_TYPE_FILE 11?1 -> 11?1 285 * _PAGE_TYPE_FILE 11?1 -> 11?1
288 * _PAGE_TYPE_RO 0100 -> 1100 286 * _PAGE_TYPE_RO 0100 -> 1100
289 * _PAGE_TYPE_RW 0000 -> 1000 287 * _PAGE_TYPE_RW 0000 -> 1000
290 * _PAGE_TYPE_EX_RO 0110 -> 1110
291 * _PAGE_TYPE_EX_RW 0010 -> 1010
292 * 288 *
293 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 289 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
294 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 290 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
@@ -387,55 +383,33 @@ extern unsigned long VMALLOC_START;
387#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 383#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
388#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 384#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
389#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 385#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
390#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
391#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
392 386
393#define PAGE_KERNEL PAGE_RW 387#define PAGE_KERNEL PAGE_RW
394#define PAGE_COPY PAGE_RO 388#define PAGE_COPY PAGE_RO
395 389
396/* 390/*
397 * Dependent on the EXEC_PROTECT option s390 can do execute protection. 391 * On s390 the page table entry has an invalid bit and a read-only bit.
398 * Write permission always implies read permission. In theory with a 392 * Read permission implies execute permission and write permission
399 * primary/secondary page table execute only can be implemented but 393 * implies read permission.
400 * it would cost an additional bit in the pte to distinguish all the
401 * different pte types. To avoid that execute permission currently
402 * implies read permission as well.
403 */ 394 */
404 /*xwr*/ 395 /*xwr*/
405#define __P000 PAGE_NONE 396#define __P000 PAGE_NONE
406#define __P001 PAGE_RO 397#define __P001 PAGE_RO
407#define __P010 PAGE_RO 398#define __P010 PAGE_RO
408#define __P011 PAGE_RO 399#define __P011 PAGE_RO
409#define __P100 PAGE_EX_RO 400#define __P100 PAGE_RO
410#define __P101 PAGE_EX_RO 401#define __P101 PAGE_RO
411#define __P110 PAGE_EX_RO 402#define __P110 PAGE_RO
412#define __P111 PAGE_EX_RO 403#define __P111 PAGE_RO
413 404
414#define __S000 PAGE_NONE 405#define __S000 PAGE_NONE
415#define __S001 PAGE_RO 406#define __S001 PAGE_RO
416#define __S010 PAGE_RW 407#define __S010 PAGE_RW
417#define __S011 PAGE_RW 408#define __S011 PAGE_RW
418#define __S100 PAGE_EX_RO 409#define __S100 PAGE_RO
419#define __S101 PAGE_EX_RO 410#define __S101 PAGE_RO
420#define __S110 PAGE_EX_RW 411#define __S110 PAGE_RW
421#define __S111 PAGE_EX_RW 412#define __S111 PAGE_RW
422
423#ifndef __s390x__
424# define PxD_SHADOW_SHIFT 1
425#else /* __s390x__ */
426# define PxD_SHADOW_SHIFT 2
427#endif /* __s390x__ */
428
429static inline void *get_shadow_table(void *table)
430{
431 unsigned long addr, offset;
432 struct page *page;
433
434 addr = (unsigned long) table;
435 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
436 page = virt_to_page((void *)(addr ^ offset));
437 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
438}
439 413
440/* 414/*
441 * Certain architectures need to do special things when PTEs 415 * Certain architectures need to do special things when PTEs
@@ -446,14 +420,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
446 pte_t *ptep, pte_t entry) 420 pte_t *ptep, pte_t entry)
447{ 421{
448 *ptep = entry; 422 *ptep = entry;
449 if (mm->context.noexec) {
450 if (!(pte_val(entry) & _PAGE_INVALID) &&
451 (pte_val(entry) & _PAGE_SWX))
452 pte_val(entry) |= _PAGE_RO;
453 else
454 pte_val(entry) = _PAGE_TYPE_EMPTY;
455 ptep[PTRS_PER_PTE] = entry;
456 }
457} 423}
458 424
459/* 425/*
@@ -662,11 +628,7 @@ static inline void pgd_clear_kernel(pgd_t * pgd)
662 628
663static inline void pgd_clear(pgd_t * pgd) 629static inline void pgd_clear(pgd_t * pgd)
664{ 630{
665 pgd_t *shadow = get_shadow_table(pgd);
666
667 pgd_clear_kernel(pgd); 631 pgd_clear_kernel(pgd);
668 if (shadow)
669 pgd_clear_kernel(shadow);
670} 632}
671 633
672static inline void pud_clear_kernel(pud_t *pud) 634static inline void pud_clear_kernel(pud_t *pud)
@@ -677,13 +639,8 @@ static inline void pud_clear_kernel(pud_t *pud)
677 639
678static inline void pud_clear(pud_t *pud) 640static inline void pud_clear(pud_t *pud)
679{ 641{
680 pud_t *shadow = get_shadow_table(pud);
681
682 pud_clear_kernel(pud); 642 pud_clear_kernel(pud);
683 if (shadow)
684 pud_clear_kernel(shadow);
685} 643}
686
687#endif /* __s390x__ */ 644#endif /* __s390x__ */
688 645
689static inline void pmd_clear_kernel(pmd_t * pmdp) 646static inline void pmd_clear_kernel(pmd_t * pmdp)
@@ -693,18 +650,12 @@ static inline void pmd_clear_kernel(pmd_t * pmdp)
693 650
694static inline void pmd_clear(pmd_t *pmd) 651static inline void pmd_clear(pmd_t *pmd)
695{ 652{
696 pmd_t *shadow = get_shadow_table(pmd);
697
698 pmd_clear_kernel(pmd); 653 pmd_clear_kernel(pmd);
699 if (shadow)
700 pmd_clear_kernel(shadow);
701} 654}
702 655
703static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 656static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
704{ 657{
705 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 658 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
706 if (mm->context.noexec)
707 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
708} 659}
709 660
710/* 661/*
@@ -903,10 +854,6 @@ static inline void ptep_invalidate(struct mm_struct *mm,
903 } 854 }
904 __ptep_ipte(address, ptep); 855 __ptep_ipte(address, ptep);
905 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 856 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
906 if (mm->context.noexec) {
907 __ptep_ipte(address, ptep + PTRS_PER_PTE);
908 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
909 }
910} 857}
911 858
912/* 859/*
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 29d5d6d4becc..4fdcefc1a98d 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -80,16 +80,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
80 * on all cpus instead of doing a local flush if the mm 80 * on all cpus instead of doing a local flush if the mm
81 * only ran on the local cpu. 81 * only ran on the local cpu.
82 */ 82 */
83 if (MACHINE_HAS_IDTE) { 83 if (MACHINE_HAS_IDTE)
84 if (mm->context.noexec)
85 __tlb_flush_idte((unsigned long)
86 get_shadow_table(mm->pgd) |
87 mm->context.asce_bits);
88 __tlb_flush_idte((unsigned long) mm->pgd | 84 __tlb_flush_idte((unsigned long) mm->pgd |
89 mm->context.asce_bits); 85 mm->context.asce_bits);
90 return; 86 else
91 } 87 __tlb_flush_full(mm);
92 __tlb_flush_full(mm);
93} 88}
94 89
95static inline void __tlb_flush_mm_cond(struct mm_struct * mm) 90static inline void __tlb_flush_mm_cond(struct mm_struct * mm)