aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/pgtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r--arch/s390/mm/pgtable.c69
1 files changed, 9 insertions, 60 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e1850c28cd68..14c6fae6fe6b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -36,11 +36,9 @@ struct rcu_table_freelist {
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \ 36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long)) 37 / sizeof(unsigned long))
38 38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); 39static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 40
42static void __page_table_free(struct mm_struct *mm, unsigned long *table); 41static void __page_table_free(struct mm_struct *mm, unsigned long *table);
43static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
44 42
45static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) 43static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
46{ 44{
@@ -67,7 +65,7 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
67 while (batch->pgt_index > 0) 65 while (batch->pgt_index > 0)
68 __page_table_free(batch->mm, batch->table[--batch->pgt_index]); 66 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
69 while (batch->crst_index < RCU_FREELIST_SIZE) 67 while (batch->crst_index < RCU_FREELIST_SIZE)
70 __crst_table_free(batch->mm, batch->table[batch->crst_index++]); 68 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
71 free_page((unsigned long) batch); 69 free_page((unsigned long) batch);
72} 70}
73 71
@@ -125,63 +123,33 @@ static int __init parse_vmalloc(char *arg)
125} 123}
126early_param("vmalloc", parse_vmalloc); 124early_param("vmalloc", parse_vmalloc);
127 125
128unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 126unsigned long *crst_table_alloc(struct mm_struct *mm)
129{ 127{
130 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 128 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
131 129
132 if (!page) 130 if (!page)
133 return NULL; 131 return NULL;
134 page->index = 0;
135 if (noexec) {
136 struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
137 if (!shadow) {
138 __free_pages(page, ALLOC_ORDER);
139 return NULL;
140 }
141 page->index = page_to_phys(shadow);
142 }
143 spin_lock_bh(&mm->context.list_lock);
144 list_add(&page->lru, &mm->context.crst_list);
145 spin_unlock_bh(&mm->context.list_lock);
146 return (unsigned long *) page_to_phys(page); 132 return (unsigned long *) page_to_phys(page);
147} 133}
148 134
149static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
150{
151 unsigned long *shadow = get_shadow_table(table);
152
153 if (shadow)
154 free_pages((unsigned long) shadow, ALLOC_ORDER);
155 free_pages((unsigned long) table, ALLOC_ORDER);
156}
157
158void crst_table_free(struct mm_struct *mm, unsigned long *table) 135void crst_table_free(struct mm_struct *mm, unsigned long *table)
159{ 136{
160 struct page *page = virt_to_page(table); 137 free_pages((unsigned long) table, ALLOC_ORDER);
161
162 spin_lock_bh(&mm->context.list_lock);
163 list_del(&page->lru);
164 spin_unlock_bh(&mm->context.list_lock);
165 __crst_table_free(mm, table);
166} 138}
167 139
168void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) 140void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
169{ 141{
170 struct rcu_table_freelist *batch; 142 struct rcu_table_freelist *batch;
171 struct page *page = virt_to_page(table);
172 143
173 spin_lock_bh(&mm->context.list_lock);
174 list_del(&page->lru);
175 spin_unlock_bh(&mm->context.list_lock);
176 if (atomic_read(&mm->mm_users) < 2 && 144 if (atomic_read(&mm->mm_users) < 2 &&
177 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 145 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
178 __crst_table_free(mm, table); 146 crst_table_free(mm, table);
179 return; 147 return;
180 } 148 }
181 batch = rcu_table_freelist_get(mm); 149 batch = rcu_table_freelist_get(mm);
182 if (!batch) { 150 if (!batch) {
183 smp_call_function(smp_sync, NULL, 1); 151 smp_call_function(smp_sync, NULL, 1);
184 __crst_table_free(mm, table); 152 crst_table_free(mm, table);
185 return; 153 return;
186 } 154 }
187 batch->table[--batch->crst_index] = table; 155 batch->table[--batch->crst_index] = table;
@@ -197,7 +165,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
197 165
198 BUG_ON(limit > (1UL << 53)); 166 BUG_ON(limit > (1UL << 53));
199repeat: 167repeat:
200 table = crst_table_alloc(mm, mm->context.noexec); 168 table = crst_table_alloc(mm);
201 if (!table) 169 if (!table)
202 return -ENOMEM; 170 return -ENOMEM;
203 spin_lock_bh(&mm->page_table_lock); 171 spin_lock_bh(&mm->page_table_lock);
@@ -273,7 +241,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
273 unsigned long *table; 241 unsigned long *table;
274 unsigned long bits; 242 unsigned long bits;
275 243
276 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 244 bits = (mm->context.has_pgste) ? 3UL : 1UL;
277 spin_lock_bh(&mm->context.list_lock); 245 spin_lock_bh(&mm->context.list_lock);
278 page = NULL; 246 page = NULL;
279 if (!list_empty(&mm->context.pgtable_list)) { 247 if (!list_empty(&mm->context.pgtable_list)) {
@@ -329,7 +297,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
329 struct page *page; 297 struct page *page;
330 unsigned long bits; 298 unsigned long bits;
331 299
332 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 300 bits = (mm->context.has_pgste) ? 3UL : 1UL;
333 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 301 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
334 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 302 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
335 spin_lock_bh(&mm->context.list_lock); 303 spin_lock_bh(&mm->context.list_lock);
@@ -366,7 +334,7 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
366 page_table_free(mm, table); 334 page_table_free(mm, table);
367 return; 335 return;
368 } 336 }
369 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 337 bits = (mm->context.has_pgste) ? 3UL : 1UL;
370 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 338 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
371 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 339 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
372 spin_lock_bh(&mm->context.list_lock); 340 spin_lock_bh(&mm->context.list_lock);
@@ -379,25 +347,6 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
379 rcu_table_freelist_finish(); 347 rcu_table_freelist_finish();
380} 348}
381 349
382void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
383{
384 struct page *page;
385
386 spin_lock_bh(&mm->context.list_lock);
387 /* Free shadow region and segment tables. */
388 list_for_each_entry(page, &mm->context.crst_list, lru)
389 if (page->index) {
390 free_pages((unsigned long) page->index, ALLOC_ORDER);
391 page->index = 0;
392 }
393 /* "Free" second halves of page tables. */
394 list_for_each_entry(page, &mm->context.pgtable_list, lru)
395 page->flags &= ~SECOND_HALVES;
396 spin_unlock_bh(&mm->context.list_lock);
397 mm->context.noexec = 0;
398 update_mm(mm, tsk);
399}
400
401/* 350/*
402 * switch on pgstes for its userspace process (for kvm) 351 * switch on pgstes for its userspace process (for kvm)
403 */ 352 */