aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/s390/mm/pgtable.c102
-rw-r--r--arch/s390/mm/vmem.c14
-rw-r--r--include/asm-s390/elf.h13
-rw-r--r--include/asm-s390/mmu.h8
-rw-r--r--include/asm-s390/mmu_context.h14
-rw-r--r--include/asm-s390/page.h36
-rw-r--r--include/asm-s390/pgalloc.h79
-rw-r--r--include/asm-s390/pgtable.h105
-rw-r--r--include/asm-s390/tlb.h6
-rw-r--r--include/asm-s390/tlbflush.h11
11 files changed, 207 insertions, 183 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 983ec6ec0e7c..01dfe20f846d 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -184,7 +184,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
184 pmd = pmd_offset(pud, address); 184 pmd = pmd_offset(pud, address);
185 pte = pte_offset_kernel(pmd, address); 185 pte = pte_offset_kernel(pmd, address);
186 if (!enable) { 186 if (!enable) {
187 ptep_invalidate(address, pte); 187 ptep_invalidate(&init_mm, address, pte);
188 continue; 188 continue;
189 } 189 }
190 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 190 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 019f518cd5a0..809e77893039 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -26,8 +26,14 @@
26 26
27#ifndef CONFIG_64BIT 27#ifndef CONFIG_64BIT
28#define ALLOC_ORDER 1 28#define ALLOC_ORDER 1
29#define TABLES_PER_PAGE 4
30#define FRAG_MASK 15UL
31#define SECOND_HALVES 10UL
29#else 32#else
30#define ALLOC_ORDER 2 33#define ALLOC_ORDER 2
34#define TABLES_PER_PAGE 2
35#define FRAG_MASK 3UL
36#define SECOND_HALVES 2UL
31#endif 37#endif
32 38
33unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 39unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
@@ -45,13 +51,20 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
45 } 51 }
46 page->index = page_to_phys(shadow); 52 page->index = page_to_phys(shadow);
47 } 53 }
54 spin_lock(&mm->page_table_lock);
55 list_add(&page->lru, &mm->context.crst_list);
56 spin_unlock(&mm->page_table_lock);
48 return (unsigned long *) page_to_phys(page); 57 return (unsigned long *) page_to_phys(page);
49} 58}
50 59
51void crst_table_free(unsigned long *table) 60void crst_table_free(struct mm_struct *mm, unsigned long *table)
52{ 61{
53 unsigned long *shadow = get_shadow_table(table); 62 unsigned long *shadow = get_shadow_table(table);
63 struct page *page = virt_to_page(table);
54 64
65 spin_lock(&mm->page_table_lock);
66 list_del(&page->lru);
67 spin_unlock(&mm->page_table_lock);
55 if (shadow) 68 if (shadow)
56 free_pages((unsigned long) shadow, ALLOC_ORDER); 69 free_pages((unsigned long) shadow, ALLOC_ORDER);
57 free_pages((unsigned long) table, ALLOC_ORDER); 70 free_pages((unsigned long) table, ALLOC_ORDER);
@@ -60,37 +73,84 @@ void crst_table_free(unsigned long *table)
60/* 73/*
61 * page table entry allocation/free routines. 74 * page table entry allocation/free routines.
62 */ 75 */
63unsigned long *page_table_alloc(int noexec) 76unsigned long *page_table_alloc(struct mm_struct *mm)
64{ 77{
65 struct page *page = alloc_page(GFP_KERNEL); 78 struct page *page;
66 unsigned long *table; 79 unsigned long *table;
80 unsigned long bits;
67 81
68 if (!page) 82 bits = mm->context.noexec ? 3UL : 1UL;
69 return NULL; 83 spin_lock(&mm->page_table_lock);
70 page->index = 0; 84 page = NULL;
71 if (noexec) { 85 if (!list_empty(&mm->context.pgtable_list)) {
72 struct page *shadow = alloc_page(GFP_KERNEL); 86 page = list_first_entry(&mm->context.pgtable_list,
73 if (!shadow) { 87 struct page, lru);
74 __free_page(page); 88 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
89 page = NULL;
90 }
91 if (!page) {
92 spin_unlock(&mm->page_table_lock);
93 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
94 if (!page)
75 return NULL; 95 return NULL;
76 } 96 pgtable_page_ctor(page);
77 table = (unsigned long *) page_to_phys(shadow); 97 page->flags &= ~FRAG_MASK;
98 table = (unsigned long *) page_to_phys(page);
78 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 99 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
79 page->index = (addr_t) table; 100 spin_lock(&mm->page_table_lock);
101 list_add(&page->lru, &mm->context.pgtable_list);
80 } 102 }
81 pgtable_page_ctor(page);
82 table = (unsigned long *) page_to_phys(page); 103 table = (unsigned long *) page_to_phys(page);
83 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 104 while (page->flags & bits) {
105 table += 256;
106 bits <<= 1;
107 }
108 page->flags |= bits;
109 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
110 list_move_tail(&page->lru, &mm->context.pgtable_list);
111 spin_unlock(&mm->page_table_lock);
84 return table; 112 return table;
85} 113}
86 114
87void page_table_free(unsigned long *table) 115void page_table_free(struct mm_struct *mm, unsigned long *table)
88{ 116{
89 unsigned long *shadow = get_shadow_pte(table); 117 struct page *page;
118 unsigned long bits;
90 119
91 pgtable_page_dtor(virt_to_page(table)); 120 bits = mm->context.noexec ? 3UL : 1UL;
92 if (shadow) 121 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
93 free_page((unsigned long) shadow); 122 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
94 free_page((unsigned long) table); 123 spin_lock(&mm->page_table_lock);
124 page->flags ^= bits;
125 if (page->flags & FRAG_MASK) {
126 /* Page now has some free pgtable fragments. */
127 list_move(&page->lru, &mm->context.pgtable_list);
128 page = NULL;
129 } else
130 /* All fragments of the 4K page have been freed. */
131 list_del(&page->lru);
132 spin_unlock(&mm->page_table_lock);
133 if (page) {
134 pgtable_page_dtor(page);
135 __free_page(page);
136 }
137}
95 138
139void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
140{
141 struct page *page;
142
143 spin_lock(&mm->page_table_lock);
144 /* Free shadow region and segment tables. */
145 list_for_each_entry(page, &mm->context.crst_list, lru)
146 if (page->index) {
147 free_pages((unsigned long) page->index, ALLOC_ORDER);
148 page->index = 0;
149 }
150 /* "Free" second halves of page tables. */
151 list_for_each_entry(page, &mm->context.pgtable_list, lru)
152 page->flags &= ~SECOND_HALVES;
153 spin_unlock(&mm->page_table_lock);
154 mm->context.noexec = 0;
155 update_mm(mm, tsk);
96} 156}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 7c1287ccf788..434491f8f47c 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -84,13 +84,18 @@ static inline pmd_t *vmem_pmd_alloc(void)
84 return pmd; 84 return pmd;
85} 85}
86 86
87static inline pte_t *vmem_pte_alloc(void) 87static pte_t __init_refok *vmem_pte_alloc(void)
88{ 88{
89 pte_t *pte = vmem_alloc_pages(0); 89 pte_t *pte;
90 90
91 if (slab_is_available())
92 pte = (pte_t *) page_table_alloc(&init_mm);
93 else
94 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
91 if (!pte) 95 if (!pte)
92 return NULL; 96 return NULL;
93 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE); 97 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
98 PTRS_PER_PTE * sizeof(pte_t));
94 return pte; 99 return pte;
95} 100}
96 101
@@ -360,6 +365,9 @@ void __init vmem_map_init(void)
360{ 365{
361 int i; 366 int i;
362 367
368 INIT_LIST_HEAD(&init_mm.context.crst_list);
369 INIT_LIST_HEAD(&init_mm.context.pgtable_list);
370 init_mm.context.noexec = 0;
363 NODE_DATA(0)->node_mem_map = VMEM_MAP; 371 NODE_DATA(0)->node_mem_map = VMEM_MAP;
364 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 372 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
365 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); 373 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
index b73a424d0f97..8181ca5b98f4 100644
--- a/include/asm-s390/elf.h
+++ b/include/asm-s390/elf.h
@@ -115,6 +115,7 @@ typedef s390_regs elf_gregset_t;
115 115
116#include <linux/sched.h> /* for task_struct */ 116#include <linux/sched.h> /* for task_struct */
117#include <asm/system.h> /* for save_access_regs */ 117#include <asm/system.h> /* for save_access_regs */
118#include <asm/mmu_context.h>
118 119
119/* 120/*
120 * This is used to ensure we don't load something for the wrong architecture. 121 * This is used to ensure we don't load something for the wrong architecture.
@@ -214,4 +215,16 @@ do { \
214} while (0) 215} while (0)
215#endif /* __s390x__ */ 216#endif /* __s390x__ */
216 217
218/*
219 * An executable for which elf_read_implies_exec() returns TRUE will
220 * have the READ_IMPLIES_EXEC personality flag set automatically.
221 */
222#define elf_read_implies_exec(ex, executable_stack) \
223({ \
224 if (current->mm->context.noexec && \
225 executable_stack != EXSTACK_DISABLE_X) \
226 disable_noexec(current->mm, current); \
227 current->mm->context.noexec == 0; \
228})
229
217#endif 230#endif
diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h
index ccd36d26615a..13ec4215f437 100644
--- a/include/asm-s390/mmu.h
+++ b/include/asm-s390/mmu.h
@@ -1,7 +1,11 @@
1#ifndef __MMU_H 1#ifndef __MMU_H
2#define __MMU_H 2#define __MMU_H
3 3
4/* Default "unsigned long" context */ 4typedef struct {
5typedef unsigned long mm_context_t; 5 struct list_head crst_list;
6 struct list_head pgtable_list;
7 unsigned long asce_bits;
8 int noexec;
9} mm_context_t;
6 10
7#endif 11#endif
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index a77d4ba3c8eb..3eaac5efc632 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -10,15 +10,17 @@
10#define __S390_MMU_CONTEXT_H 10#define __S390_MMU_CONTEXT_H
11 11
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm/uaccess.h>
13#include <asm-generic/mm_hooks.h> 14#include <asm-generic/mm_hooks.h>
14 15
15static inline int init_new_context(struct task_struct *tsk, 16static inline int init_new_context(struct task_struct *tsk,
16 struct mm_struct *mm) 17 struct mm_struct *mm)
17{ 18{
18 mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 19 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
19#ifdef CONFIG_64BIT 20#ifdef CONFIG_64BIT
20 mm->context |= _ASCE_TYPE_REGION3; 21 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
21#endif 22#endif
23 mm->context.noexec = s390_noexec;
22 return 0; 24 return 0;
23} 25}
24 26
@@ -32,11 +34,13 @@ static inline int init_new_context(struct task_struct *tsk,
32 34
33static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) 35static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
34{ 36{
35 S390_lowcore.user_asce = mm->context | __pa(mm->pgd); 37 pgd_t *pgd = mm->pgd;
38
39 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
36 if (switch_amode) { 40 if (switch_amode) {
37 /* Load primary space page table origin. */ 41 /* Load primary space page table origin. */
38 pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd; 42 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
39 S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd); 43 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
40 asm volatile(LCTL_OPCODE" 1,1,%0\n" 44 asm volatile(LCTL_OPCODE" 1,1,%0\n"
41 : : "m" (S390_lowcore.user_exec_asce) ); 45 : : "m" (S390_lowcore.user_exec_asce) );
42 } else 46 } else
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 7f29a981f48c..fe7f92b6ae6d 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -74,43 +74,17 @@ static inline void copy_page(void *to, void *from)
74 74
75typedef struct { unsigned long pgprot; } pgprot_t; 75typedef struct { unsigned long pgprot; } pgprot_t;
76typedef struct { unsigned long pte; } pte_t; 76typedef struct { unsigned long pte; } pte_t;
77
78#define pte_val(x) ((x).pte)
79#define pgprot_val(x) ((x).pgprot)
80
81#ifndef __s390x__
82
83typedef struct { unsigned long pmd; } pmd_t; 77typedef struct { unsigned long pmd; } pmd_t;
84typedef struct { unsigned long pud; } pud_t; 78typedef struct { unsigned long pud; } pud_t;
85typedef struct {
86 unsigned long pgd0;
87 unsigned long pgd1;
88 unsigned long pgd2;
89 unsigned long pgd3;
90 } pgd_t;
91
92#define pmd_val(x) ((x).pmd)
93#define pud_val(x) ((x).pud)
94#define pgd_val(x) ((x).pgd0)
95
96#else /* __s390x__ */
97
98typedef struct {
99 unsigned long pmd0;
100 unsigned long pmd1;
101 } pmd_t;
102typedef struct { unsigned long pud; } pud_t;
103typedef struct { unsigned long pgd; } pgd_t; 79typedef struct { unsigned long pgd; } pgd_t;
80typedef pte_t *pgtable_t;
104 81
105#define pmd_val(x) ((x).pmd0) 82#define pgprot_val(x) ((x).pgprot)
106#define pmd_val1(x) ((x).pmd1) 83#define pte_val(x) ((x).pte)
84#define pmd_val(x) ((x).pmd)
107#define pud_val(x) ((x).pud) 85#define pud_val(x) ((x).pud)
108#define pgd_val(x) ((x).pgd) 86#define pgd_val(x) ((x).pgd)
109 87
110#endif /* __s390x__ */
111
112typedef struct page *pgtable_t;
113
114#define __pte(x) ((pte_t) { (x) } ) 88#define __pte(x) ((pte_t) { (x) } )
115#define __pmd(x) ((pmd_t) { (x) } ) 89#define __pmd(x) ((pmd_t) { (x) } )
116#define __pgd(x) ((pgd_t) { (x) } ) 90#define __pgd(x) ((pgd_t) { (x) } )
@@ -167,7 +141,7 @@ static inline int pfn_valid(unsigned long pfn)
167#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 141#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
168#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 142#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
169 143
170#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 144#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
171 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 145 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
172 146
173#include <asm-generic/memory_model.h> 147#include <asm-generic/memory_model.h>
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 900d44807e10..af4aee856df3 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -20,10 +20,11 @@
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22unsigned long *crst_table_alloc(struct mm_struct *, int); 22unsigned long *crst_table_alloc(struct mm_struct *, int);
23void crst_table_free(unsigned long *); 23void crst_table_free(struct mm_struct *, unsigned long *);
24 24
25unsigned long *page_table_alloc(int); 25unsigned long *page_table_alloc(struct mm_struct *);
26void page_table_free(unsigned long *); 26void page_table_free(struct mm_struct *, unsigned long *);
27void disable_noexec(struct mm_struct *, struct task_struct *);
27 28
28static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 29static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
29{ 30{
@@ -80,12 +81,12 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
80 81
81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 82static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
82{ 83{
83 unsigned long *crst = crst_table_alloc(mm, s390_noexec); 84 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
84 if (crst) 85 if (table)
85 crst_table_init(crst, _SEGMENT_ENTRY_EMPTY); 86 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
86 return (pmd_t *) crst; 87 return (pmd_t *) table;
87} 88}
88#define pmd_free(mm, pmd) crst_table_free((unsigned long *)pmd) 89#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
89 90
90#define pgd_populate(mm, pgd, pud) BUG() 91#define pgd_populate(mm, pgd, pud) BUG()
91#define pgd_populate_kernel(mm, pgd, pud) BUG() 92#define pgd_populate_kernel(mm, pgd, pud) BUG()
@@ -98,63 +99,55 @@ static inline void pud_populate_kernel(struct mm_struct *mm,
98 99
99static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 100static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
100{ 101{
101 pud_t *shadow_pud = get_shadow_table(pud);
102 pmd_t *shadow_pmd = get_shadow_table(pmd);
103
104 if (shadow_pud && shadow_pmd)
105 pud_populate_kernel(mm, shadow_pud, shadow_pmd);
106 pud_populate_kernel(mm, pud, pmd); 102 pud_populate_kernel(mm, pud, pmd);
103 if (mm->context.noexec) {
104 pud = get_shadow_table(pud);
105 pmd = get_shadow_table(pmd);
106 pud_populate_kernel(mm, pud, pmd);
107 }
107} 108}
108 109
109#endif /* __s390x__ */ 110#endif /* __s390x__ */
110 111
111static inline pgd_t *pgd_alloc(struct mm_struct *mm) 112static inline pgd_t *pgd_alloc(struct mm_struct *mm)
112{ 113{
113 unsigned long *crst = crst_table_alloc(mm, s390_noexec); 114 unsigned long *crst;
115
116 INIT_LIST_HEAD(&mm->context.crst_list);
117 INIT_LIST_HEAD(&mm->context.pgtable_list);
118 crst = crst_table_alloc(mm, s390_noexec);
114 if (crst) 119 if (crst)
115 crst_table_init(crst, pgd_entry_type(mm)); 120 crst_table_init(crst, pgd_entry_type(mm));
116 return (pgd_t *) crst; 121 return (pgd_t *) crst;
117} 122}
118#define pgd_free(mm, pgd) crst_table_free((unsigned long *) pgd) 123#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
119 124
120static inline void 125static inline void pmd_populate_kernel(struct mm_struct *mm,
121pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 126 pmd_t *pmd, pte_t *pte)
122{ 127{
123#ifndef __s390x__
124 pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
125 pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
126 pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
127 pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
128#else /* __s390x__ */
129 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); 128 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
130 pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
131#endif /* __s390x__ */
132} 129}
133 130
134static inline void 131static inline void pmd_populate(struct mm_struct *mm,
135pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) 132 pmd_t *pmd, pgtable_t pte)
136{ 133{
137 pte_t *pte = (pte_t *)page_to_phys(page);
138 pmd_t *shadow_pmd = get_shadow_table(pmd);
139 pte_t *shadow_pte = get_shadow_pte(pte);
140
141 pmd_populate_kernel(mm, pmd, pte); 134 pmd_populate_kernel(mm, pmd, pte);
142 if (shadow_pmd && shadow_pte) 135 if (mm->context.noexec) {
143 pmd_populate_kernel(mm, shadow_pmd, shadow_pte); 136 pmd = get_shadow_table(pmd);
137 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
138 }
144} 139}
145#define pmd_pgtable(pmd) pmd_page(pmd) 140
141#define pmd_pgtable(pmd) \
142 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
146 143
147/* 144/*
148 * page table entry allocation/free routines. 145 * page table entry allocation/free routines.
149 */ 146 */
150#define pte_alloc_one_kernel(mm, vmaddr) \ 147#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
151 ((pte_t *) page_table_alloc(s390_noexec)) 148#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
152#define pte_alloc_one(mm, vmaddr) \ 149
153 virt_to_page(page_table_alloc(s390_noexec)) 150#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
154 151#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
155#define pte_free_kernel(mm, pte) \
156 page_table_free((unsigned long *) pte)
157#define pte_free(mm, pte) \
158 page_table_free((unsigned long *) page_to_phys((struct page *) pte))
159 152
160#endif /* _S390_PGALLOC_H */ 153#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 65d333849150..4fc937711482 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -57,11 +57,11 @@ extern char empty_zero_page[PAGE_SIZE];
57 * PGDIR_SHIFT determines what a third-level page table entry can map 57 * PGDIR_SHIFT determines what a third-level page table entry can map
58 */ 58 */
59#ifndef __s390x__ 59#ifndef __s390x__
60# define PMD_SHIFT 22 60# define PMD_SHIFT 20
61# define PUD_SHIFT 22 61# define PUD_SHIFT 20
62# define PGDIR_SHIFT 22 62# define PGDIR_SHIFT 20
63#else /* __s390x__ */ 63#else /* __s390x__ */
64# define PMD_SHIFT 21 64# define PMD_SHIFT 20
65# define PUD_SHIFT 31 65# define PUD_SHIFT 31
66# define PGDIR_SHIFT 31 66# define PGDIR_SHIFT 31
67#endif /* __s390x__ */ 67#endif /* __s390x__ */
@@ -79,17 +79,14 @@ extern char empty_zero_page[PAGE_SIZE];
79 * for S390 segment-table entries are combined to one PGD 79 * for S390 segment-table entries are combined to one PGD
80 * that leads to 1024 pte per pgd 80 * that leads to 1024 pte per pgd
81 */ 81 */
82#define PTRS_PER_PTE 256
82#ifndef __s390x__ 83#ifndef __s390x__
83# define PTRS_PER_PTE 1024 84#define PTRS_PER_PMD 1
84# define PTRS_PER_PMD 1
85# define PTRS_PER_PUD 1
86# define PTRS_PER_PGD 512
87#else /* __s390x__ */ 85#else /* __s390x__ */
88# define PTRS_PER_PTE 512 86#define PTRS_PER_PMD 2048
89# define PTRS_PER_PMD 1024
90# define PTRS_PER_PUD 1
91# define PTRS_PER_PGD 2048
92#endif /* __s390x__ */ 87#endif /* __s390x__ */
88#define PTRS_PER_PUD 1
89#define PTRS_PER_PGD 2048
93 90
94#define FIRST_USER_ADDRESS 0 91#define FIRST_USER_ADDRESS 0
95 92
@@ -376,24 +373,6 @@ extern char empty_zero_page[PAGE_SIZE];
376# define PxD_SHADOW_SHIFT 2 373# define PxD_SHADOW_SHIFT 2
377#endif /* __s390x__ */ 374#endif /* __s390x__ */
378 375
379static inline struct page *get_shadow_page(struct page *page)
380{
381 if (s390_noexec && page->index)
382 return virt_to_page((void *)(addr_t) page->index);
383 return NULL;
384}
385
386static inline void *get_shadow_pte(void *table)
387{
388 unsigned long addr, offset;
389 struct page *page;
390
391 addr = (unsigned long) table;
392 offset = addr & (PAGE_SIZE - 1);
393 page = virt_to_page((void *)(addr ^ offset));
394 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
395}
396
397static inline void *get_shadow_table(void *table) 376static inline void *get_shadow_table(void *table)
398{ 377{
399 unsigned long addr, offset; 378 unsigned long addr, offset;
@@ -411,17 +390,16 @@ static inline void *get_shadow_table(void *table)
411 * hook is made available. 390 * hook is made available.
412 */ 391 */
413static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 392static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
414 pte_t *pteptr, pte_t pteval) 393 pte_t *ptep, pte_t entry)
415{ 394{
416 pte_t *shadow_pte = get_shadow_pte(pteptr); 395 *ptep = entry;
417 396 if (mm->context.noexec) {
418 *pteptr = pteval; 397 if (!(pte_val(entry) & _PAGE_INVALID) &&
419 if (shadow_pte) { 398 (pte_val(entry) & _PAGE_SWX))
420 if (!(pte_val(pteval) & _PAGE_INVALID) && 399 pte_val(entry) |= _PAGE_RO;
421 (pte_val(pteval) & _PAGE_SWX))
422 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
423 else 400 else
424 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 401 pte_val(entry) = _PAGE_TYPE_EMPTY;
402 ptep[PTRS_PER_PTE] = entry;
425 } 403 }
426} 404}
427 405
@@ -536,14 +514,6 @@ static inline int pte_young(pte_t pte)
536#define pgd_clear(pgd) do { } while (0) 514#define pgd_clear(pgd) do { } while (0)
537#define pud_clear(pud) do { } while (0) 515#define pud_clear(pud) do { } while (0)
538 516
539static inline void pmd_clear_kernel(pmd_t * pmdp)
540{
541 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
542 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
543 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
544 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
545}
546
547#else /* __s390x__ */ 517#else /* __s390x__ */
548 518
549#define pgd_clear(pgd) do { } while (0) 519#define pgd_clear(pgd) do { } while (0)
@@ -562,30 +532,27 @@ static inline void pud_clear(pud_t * pud)
562 pud_clear_kernel(shadow); 532 pud_clear_kernel(shadow);
563} 533}
564 534
535#endif /* __s390x__ */
536
565static inline void pmd_clear_kernel(pmd_t * pmdp) 537static inline void pmd_clear_kernel(pmd_t * pmdp)
566{ 538{
567 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 539 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
568 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
569} 540}
570 541
571#endif /* __s390x__ */ 542static inline void pmd_clear(pmd_t *pmd)
572
573static inline void pmd_clear(pmd_t * pmdp)
574{ 543{
575 pmd_t *shadow_pmd = get_shadow_table(pmdp); 544 pmd_t *shadow = get_shadow_table(pmd);
576 545
577 pmd_clear_kernel(pmdp); 546 pmd_clear_kernel(pmd);
578 if (shadow_pmd) 547 if (shadow)
579 pmd_clear_kernel(shadow_pmd); 548 pmd_clear_kernel(shadow);
580} 549}
581 550
582static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 551static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
583{ 552{
584 pte_t *shadow_pte = get_shadow_pte(ptep);
585
586 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 553 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
587 if (shadow_pte) 554 if (mm->context.noexec)
588 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 555 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
589} 556}
590 557
591/* 558/*
@@ -666,7 +633,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
666{ 633{
667 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 634 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
668#ifndef __s390x__ 635#ifndef __s390x__
669 /* S390 has 1mb segments, we are emulating 4MB segments */ 636 /* pto must point to the start of the segment table */
670 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 637 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
671#else 638#else
672 /* ipte in zarch mode can do the math */ 639 /* ipte in zarch mode can do the math */
@@ -680,12 +647,12 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
680 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 647 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
681} 648}
682 649
683static inline void ptep_invalidate(unsigned long address, pte_t *ptep) 650static inline void ptep_invalidate(struct mm_struct *mm,
651 unsigned long address, pte_t *ptep)
684{ 652{
685 __ptep_ipte(address, ptep); 653 __ptep_ipte(address, ptep);
686 ptep = get_shadow_pte(ptep); 654 if (mm->context.noexec)
687 if (ptep) 655 __ptep_ipte(address, ptep + PTRS_PER_PTE);
688 __ptep_ipte(address, ptep);
689} 656}
690 657
691/* 658/*
@@ -707,7 +674,7 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
707 pte_t __pte = *(__ptep); \ 674 pte_t __pte = *(__ptep); \
708 if (atomic_read(&(__mm)->mm_users) > 1 || \ 675 if (atomic_read(&(__mm)->mm_users) > 1 || \
709 (__mm) != current->active_mm) \ 676 (__mm) != current->active_mm) \
710 ptep_invalidate(__address, __ptep); \ 677 ptep_invalidate(__mm, __address, __ptep); \
711 else \ 678 else \
712 pte_clear((__mm), (__address), (__ptep)); \ 679 pte_clear((__mm), (__address), (__ptep)); \
713 __pte; \ 680 __pte; \
@@ -718,7 +685,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
718 unsigned long address, pte_t *ptep) 685 unsigned long address, pte_t *ptep)
719{ 686{
720 pte_t pte = *ptep; 687 pte_t pte = *ptep;
721 ptep_invalidate(address, ptep); 688 ptep_invalidate(vma->vm_mm, address, ptep);
722 return pte; 689 return pte;
723} 690}
724 691
@@ -739,7 +706,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
739 if (full) 706 if (full)
740 pte_clear(mm, addr, ptep); 707 pte_clear(mm, addr, ptep);
741 else 708 else
742 ptep_invalidate(addr, ptep); 709 ptep_invalidate(mm, addr, ptep);
743 return pte; 710 return pte;
744} 711}
745 712
@@ -750,7 +717,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
750 if (pte_write(__pte)) { \ 717 if (pte_write(__pte)) { \
751 if (atomic_read(&(__mm)->mm_users) > 1 || \ 718 if (atomic_read(&(__mm)->mm_users) > 1 || \
752 (__mm) != current->active_mm) \ 719 (__mm) != current->active_mm) \
753 ptep_invalidate(__addr, __ptep); \ 720 ptep_invalidate(__mm, __addr, __ptep); \
754 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ 721 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
755 } \ 722 } \
756}) 723})
@@ -760,7 +727,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
760({ \ 727({ \
761 int __changed = !pte_same(*(__ptep), __entry); \ 728 int __changed = !pte_same(*(__ptep), __entry); \
762 if (__changed) { \ 729 if (__changed) { \
763 ptep_invalidate(__addr, __ptep); \ 730 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
764 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 731 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
765 } \ 732 } \
766 __changed; \ 733 __changed; \
diff --git a/include/asm-s390/tlb.h b/include/asm-s390/tlb.h
index 3c8177fa9e06..ecac75ec6cb0 100644
--- a/include/asm-s390/tlb.h
+++ b/include/asm-s390/tlb.h
@@ -95,14 +95,14 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
95 * pte_free_tlb frees a pte table and clears the CRSTE for the 95 * pte_free_tlb frees a pte table and clears the CRSTE for the
96 * page table from the tlb. 96 * page table from the tlb.
97 */ 97 */
98static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t page) 98static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
99{ 99{
100 if (!tlb->fullmm) { 100 if (!tlb->fullmm) {
101 tlb->array[tlb->nr_ptes++] = page; 101 tlb->array[tlb->nr_ptes++] = pte;
102 if (tlb->nr_ptes >= tlb->nr_pmds) 102 if (tlb->nr_ptes >= tlb->nr_pmds)
103 tlb_flush_mmu(tlb, 0, 0); 103 tlb_flush_mmu(tlb, 0, 0);
104 } else 104 } else
105 pte_free(tlb->mm, page); 105 pte_free(tlb->mm, pte);
106} 106}
107 107
108/* 108/*
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 70fa5ae58180..35fb4f9127b2 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -61,11 +61,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
61 * only ran on the local cpu. 61 * only ran on the local cpu.
62 */ 62 */
63 if (MACHINE_HAS_IDTE) { 63 if (MACHINE_HAS_IDTE) {
64 pgd_t *shadow = get_shadow_table(mm->pgd); 64 if (mm->context.noexec)
65 65 __tlb_flush_idte((unsigned long)
66 if (shadow) 66 get_shadow_table(mm->pgd) |
67 __tlb_flush_idte((unsigned long) shadow | mm->context); 67 mm->context.asce_bits);
68 __tlb_flush_idte((unsigned long) mm->pgd | mm->context); 68 __tlb_flush_idte((unsigned long) mm->pgd |
69 mm->context.asce_bits);
69 return; 70 return;
70 } 71 }
71 preempt_disable(); 72 preempt_disable();