aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/pgtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r--arch/s390/mm/pgtable.c68
1 files changed, 9 insertions, 59 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e1850c28cd68..8d4330642512 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -40,7 +40,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); 40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 41
42static void __page_table_free(struct mm_struct *mm, unsigned long *table); 42static void __page_table_free(struct mm_struct *mm, unsigned long *table);
43static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
44 43
45static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) 44static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
46{ 45{
@@ -67,7 +66,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
67 while (batch->pgt_index > 0) 66 while (batch->pgt_index > 0)
68 __page_table_free(batch->mm, batch->table[--batch->pgt_index]); 67 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
69 while (batch->crst_index < RCU_FREELIST_SIZE) 68 while (batch->crst_index < RCU_FREELIST_SIZE)
70 __crst_table_free(batch->mm, batch->table[batch->crst_index++]); 69 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
71 free_page((unsigned long) batch); 70 free_page((unsigned long) batch);
72} 71}
73 72
@@ -125,63 +124,33 @@ static int __init parse_vmalloc(char *arg)
125} 124}
126early_param("vmalloc", parse_vmalloc); 125early_param("vmalloc", parse_vmalloc);
127 126
128unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 127unsigned long *crst_table_alloc(struct mm_struct *mm)
129{ 128{
130 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 129 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
131 130
132 if (!page) 131 if (!page)
133 return NULL; 132 return NULL;
134 page->index = 0;
135 if (noexec) {
136 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
137 if (!shadow) {
138 __free_pages(page, ALLOC_ORDER);
139 return NULL;
140 }
141 page->index = page_to_phys(shadow);
142 }
143 spin_lock_bh(&mm->context.list_lock);
144 list_add(&page->lru, &mm->context.crst_list);
145 spin_unlock_bh(&mm->context.list_lock);
146 return (unsigned long *) page_to_phys(page); 133 return (unsigned long *) page_to_phys(page);
147} 134}
148 135
149static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
150{
151 unsigned long *shadow = get_shadow_table(table);
152
153 if (shadow)
154 free_pages((unsigned long) shadow, ALLOC_ORDER);
155 free_pages((unsigned long) table, ALLOC_ORDER);
156}
157
158void crst_table_free(struct mm_struct *mm, unsigned long *table) 136void crst_table_free(struct mm_struct *mm, unsigned long *table)
159{ 137{
160 struct page *page = virt_to_page(table); 138 free_pages((unsigned long) table, ALLOC_ORDER);
161
162 spin_lock_bh(&mm->context.list_lock);
163 list_del(&page->lru);
164 spin_unlock_bh(&mm->context.list_lock);
165 __crst_table_free(mm, table);
166} 139}
167 140
168void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) 141void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
169{ 142{
170 struct rcu_table_freelist *batch; 143 struct rcu_table_freelist *batch;
171 struct page *page = virt_to_page(table);
172 144
173 spin_lock_bh(&mm->context.list_lock);
174 list_del(&page->lru);
175 spin_unlock_bh(&mm->context.list_lock);
176 if (atomic_read(&mm->mm_users) < 2 && 145 if (atomic_read(&mm->mm_users) < 2 &&
177 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 146 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
178 __crst_table_free(mm, table); 147 crst_table_free(mm, table);
179 return; 148 return;
180 } 149 }
181 batch = rcu_table_freelist_get(mm); 150 batch = rcu_table_freelist_get(mm);
182 if (!batch) { 151 if (!batch) {
183 smp_call_function(smp_sync, NULL, 1); 152 smp_call_function(smp_sync, NULL, 1);
184 __crst_table_free(mm, table); 153 crst_table_free(mm, table);
185 return; 154 return;
186 } 155 }
187 batch->table[--batch->crst_index] = table; 156 batch->table[--batch->crst_index] = table;
@@ -197,7 +166,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
197 166
198 BUG_ON(limit > (1UL << 53)); 167 BUG_ON(limit > (1UL << 53));
199repeat: 168repeat:
200 table = crst_table_alloc(mm, mm->context.noexec); 169 table = crst_table_alloc(mm);
201 if (!table) 170 if (!table)
202 return -ENOMEM; 171 return -ENOMEM;
203 spin_lock_bh(&mm->page_table_lock); 172 spin_lock_bh(&mm->page_table_lock);
@@ -273,7 +242,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
273 unsigned long *table; 242 unsigned long *table;
274 unsigned long bits; 243 unsigned long bits;
275 244
276 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 245 bits = (mm->context.has_pgste) ? 3UL : 1UL;
277 spin_lock_bh(&mm->context.list_lock); 246 spin_lock_bh(&mm->context.list_lock);
278 page = NULL; 247 page = NULL;
279 if (!list_empty(&mm->context.pgtable_list)) { 248 if (!list_empty(&mm->context.pgtable_list)) {
@@ -329,7 +298,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
329 struct page *page; 298 struct page *page;
330 unsigned long bits; 299 unsigned long bits;
331 300
332 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 301 bits = (mm->context.has_pgste) ? 3UL : 1UL;
333 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 302 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
334 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 303 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
335 spin_lock_bh(&mm->context.list_lock); 304 spin_lock_bh(&mm->context.list_lock);
@@ -366,7 +335,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
366 page_table_free(mm, table); 335 page_table_free(mm, table);
367 return; 336 return;
368 } 337 }
369 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 338 bits = (mm->context.has_pgste) ? 3UL : 1UL;
370 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 339 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
371 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 340 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
372 spin_lock_bh(&mm->context.list_lock); 341 spin_lock_bh(&mm->context.list_lock);
@@ -379,25 +348,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
379 rcu_table_freelist_finish(); 348 rcu_table_freelist_finish();
380} 349}
381 350
382void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
383{
384 struct page *page;
385
386 spin_lock_bh(&mm->context.list_lock);
387 /* Free shadow region and segment tables. */
388 list_for_each_entry(page, &mm->context.crst_list, lru)
389 if (page->index) {
390 free_pages((unsigned long) page->index, ALLOC_ORDER);
391 page->index = 0;
392 }
393 /* "Free" second halves of page tables. */
394 list_for_each_entry(page, &mm->context.pgtable_list, lru)
395 page->flags &= ~SECOND_HALVES;
396 spin_unlock_bh(&mm->context.list_lock);
397 mm->context.noexec = 0;
398 update_mm(mm, tsk);
399}
400
401/* 351/*
402 * switch on pgstes for its userspace process (for kvm) 352 * switch on pgstes for its userspace process (for kvm)
403 */ 353 */