aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c24
-rw-r--r--arch/s390/mm/gup.c14
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/maccess.c16
-rw-r--r--arch/s390/mm/pgtable.c422
-rw-r--r--arch/s390/mm/vmem.c8
6 files changed, 451 insertions, 35 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fe103e891e7..9564fc779b2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -299,13 +299,28 @@ static inline int do_exception(struct pt_regs *regs, int access,
299 goto out; 299 goto out;
300 300
301 address = trans_exc_code & __FAIL_ADDR_MASK; 301 address = trans_exc_code & __FAIL_ADDR_MASK;
302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
303 flags = FAULT_FLAG_ALLOW_RETRY; 303 flags = FAULT_FLAG_ALLOW_RETRY;
304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305 flags |= FAULT_FLAG_WRITE; 305 flags |= FAULT_FLAG_WRITE;
306retry:
307 down_read(&mm->mmap_sem); 306 down_read(&mm->mmap_sem);
308 307
308#ifdef CONFIG_PGSTE
309 if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
310 address = gmap_fault(address,
311 (struct gmap *) S390_lowcore.gmap);
312 if (address == -EFAULT) {
313 fault = VM_FAULT_BADMAP;
314 goto out_up;
315 }
316 if (address == -ENOMEM) {
317 fault = VM_FAULT_OOM;
318 goto out_up;
319 }
320 }
321#endif
322
323retry:
309 fault = VM_FAULT_BADMAP; 324 fault = VM_FAULT_BADMAP;
310 vma = find_vma(mm, address); 325 vma = find_vma(mm, address);
311 if (!vma) 326 if (!vma)
@@ -345,17 +360,18 @@ retry:
345 if (flags & FAULT_FLAG_ALLOW_RETRY) { 360 if (flags & FAULT_FLAG_ALLOW_RETRY) {
346 if (fault & VM_FAULT_MAJOR) { 361 if (fault & VM_FAULT_MAJOR) {
347 tsk->maj_flt++; 362 tsk->maj_flt++;
348 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 363 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
349 regs, address); 364 regs, address);
350 } else { 365 } else {
351 tsk->min_flt++; 366 tsk->min_flt++;
352 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 367 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
353 regs, address); 368 regs, address);
354 } 369 }
355 if (fault & VM_FAULT_RETRY) { 370 if (fault & VM_FAULT_RETRY) {
356 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 371 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
357 * of starvation. */ 372 * of starvation. */
358 flags &= ~FAULT_FLAG_ALLOW_RETRY; 373 flags &= ~FAULT_FLAG_ALLOW_RETRY;
374 down_read(&mm->mmap_sem);
359 goto retry; 375 goto retry;
360 } 376 }
361 } 377 }
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 45b405ca256..65cb06e2af4 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -52,7 +52,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
52 unsigned long end, int write, struct page **pages, int *nr) 52 unsigned long end, int write, struct page **pages, int *nr)
53{ 53{
54 unsigned long mask, result; 54 unsigned long mask, result;
55 struct page *head, *page; 55 struct page *head, *page, *tail;
56 int refs; 56 int refs;
57 57
58 result = write ? 0 : _SEGMENT_ENTRY_RO; 58 result = write ? 0 : _SEGMENT_ENTRY_RO;
@@ -64,6 +64,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
64 refs = 0; 64 refs = 0;
65 head = pmd_page(pmd); 65 head = pmd_page(pmd);
66 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 66 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
67 tail = page;
67 do { 68 do {
68 VM_BUG_ON(compound_head(page) != head); 69 VM_BUG_ON(compound_head(page) != head);
69 pages[*nr] = page; 70 pages[*nr] = page;
@@ -81,6 +82,17 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
81 *nr -= refs; 82 *nr -= refs;
82 while (refs--) 83 while (refs--)
83 put_page(head); 84 put_page(head);
85 return 0;
86 }
87
88 /*
89 * Any tail page need their mapcount reference taken before we
90 * return.
91 */
92 while (refs--) {
93 if (PageTail(tail))
94 get_huge_page_tail(tail);
95 tail++;
84 } 96 }
85 97
86 return 1; 98 return 1;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index a4d856db915..597bb2d27c3 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -35,7 +35,7 @@ int arch_prepare_hugepage(struct page *page)
35 if (MACHINE_HAS_HPAGE) 35 if (MACHINE_HAS_HPAGE)
36 return 0; 36 return 0;
37 37
38 ptep = (pte_t *) pte_alloc_one(&init_mm, address); 38 ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
39 if (!ptep) 39 if (!ptep)
40 return -ENOMEM; 40 return -ENOMEM;
41 41
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 51e5cd9b906..5dbbaa6e594 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -85,3 +85,19 @@ int memcpy_real(void *dest, void *src, size_t count)
85 arch_local_irq_restore(flags); 85 arch_local_irq_restore(flags);
86 return rc; 86 return rc;
87} 87}
88
89/*
90 * Copy memory to absolute zero
91 */
92void copy_to_absolute_zero(void *dest, void *src, size_t count)
93{
94 unsigned long cr0;
95
96 BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
97 preempt_disable();
98 __ctl_store(cr0, 0, 0);
99 __ctl_clear_bit(0, 28); /* disable lowcore protection */
100 memcpy_real(dest + store_prefix(), src, count);
101 __ctl_load(cr0, 0, 0);
102 preempt_enable();
103}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 37a23c22370..529a0883837 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/quicklist.h> 17#include <linux/quicklist.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/slab.h>
19 20
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
@@ -133,30 +134,369 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
133} 134}
134#endif 135#endif
135 136
136static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) 137#ifdef CONFIG_PGSTE
138
139/**
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
142 *
143 * Returns a guest address space structure.
144 */
145struct gmap *gmap_alloc(struct mm_struct *mm)
137{ 146{
138 unsigned int old, new; 147 struct gmap *gmap;
148 struct page *page;
149 unsigned long *table;
139 150
140 do { 151 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
141 old = atomic_read(v); 152 if (!gmap)
142 new = old ^ bits; 153 goto out;
143 } while (atomic_cmpxchg(v, old, new) != old); 154 INIT_LIST_HEAD(&gmap->crst_list);
144 return new; 155 gmap->mm = mm;
156 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157 if (!page)
158 goto out_free;
159 list_add(&page->lru, &gmap->crst_list);
160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 gmap->table = table;
163 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 _ASCE_USER_BITS | __pa(table);
165 list_add(&gmap->list, &mm->context.gmap_list);
166 return gmap;
167
168out_free:
169 kfree(gmap);
170out:
171 return NULL;
145} 172}
173EXPORT_SYMBOL_GPL(gmap_alloc);
146 174
147/* 175static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
148 * page table entry allocation/free routines. 176{
177 struct gmap_pgtable *mp;
178 struct gmap_rmap *rmap;
179 struct page *page;
180
181 if (*table & _SEGMENT_ENTRY_INV)
182 return 0;
183 page = pfn_to_page(*table >> PAGE_SHIFT);
184 mp = (struct gmap_pgtable *) page->index;
185 list_for_each_entry(rmap, &mp->mapper, list) {
186 if (rmap->entry != table)
187 continue;
188 list_del(&rmap->list);
189 kfree(rmap);
190 break;
191 }
192 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
193 return 1;
194}
195
196static void gmap_flush_tlb(struct gmap *gmap)
197{
198 if (MACHINE_HAS_IDTE)
199 __tlb_flush_idte((unsigned long) gmap->table |
200 _ASCE_TYPE_REGION1);
201 else
202 __tlb_flush_global();
203}
204
205/**
206 * gmap_free - free a guest address space
207 * @gmap: pointer to the guest address space structure
149 */ 208 */
150#ifdef CONFIG_PGSTE 209void gmap_free(struct gmap *gmap)
151static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) 210{
211 struct page *page, *next;
212 unsigned long *table;
213 int i;
214
215
216 /* Flush tlb. */
217 if (MACHINE_HAS_IDTE)
218 __tlb_flush_idte((unsigned long) gmap->table |
219 _ASCE_TYPE_REGION1);
220 else
221 __tlb_flush_global();
222
223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem);
225 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
226 table = (unsigned long *) page_to_phys(page);
227 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
228 /* Remove gmap rmap structures for segment table. */
229 for (i = 0; i < PTRS_PER_PMD; i++, table++)
230 gmap_unlink_segment(gmap, table);
231 __free_pages(page, ALLOC_ORDER);
232 }
233 up_read(&gmap->mm->mmap_sem);
234 list_del(&gmap->list);
235 kfree(gmap);
236}
237EXPORT_SYMBOL_GPL(gmap_free);
238
239/**
240 * gmap_enable - switch primary space to the guest address space
241 * @gmap: pointer to the guest address space structure
242 */
243void gmap_enable(struct gmap *gmap)
244{
245 S390_lowcore.gmap = (unsigned long) gmap;
246}
247EXPORT_SYMBOL_GPL(gmap_enable);
248
249/**
250 * gmap_disable - switch back to the standard primary address space
251 * @gmap: pointer to the guest address space structure
252 */
253void gmap_disable(struct gmap *gmap)
254{
255 S390_lowcore.gmap = 0UL;
256}
257EXPORT_SYMBOL_GPL(gmap_disable);
258
259static int gmap_alloc_table(struct gmap *gmap,
260 unsigned long *table, unsigned long init)
261{
262 struct page *page;
263 unsigned long *new;
264
265 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
266 if (!page)
267 return -ENOMEM;
268 new = (unsigned long *) page_to_phys(page);
269 crst_table_init(new, init);
270 down_read(&gmap->mm->mmap_sem);
271 if (*table & _REGION_ENTRY_INV) {
272 list_add(&page->lru, &gmap->crst_list);
273 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
274 (*table & _REGION_ENTRY_TYPE_MASK);
275 } else
276 __free_pages(page, ALLOC_ORDER);
277 up_read(&gmap->mm->mmap_sem);
278 return 0;
279}
280
281/**
282 * gmap_unmap_segment - unmap segment from the guest address space
283 * @gmap: pointer to the guest address space structure
284 * @addr: address in the guest address space
285 * @len: length of the memory area to unmap
286 *
287 * Returns 0 if the unmap succeded, -EINVAL if not.
288 */
289int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
290{
291 unsigned long *table;
292 unsigned long off;
293 int flush;
294
295 if ((to | len) & (PMD_SIZE - 1))
296 return -EINVAL;
297 if (len == 0 || to + len < to)
298 return -EINVAL;
299
300 flush = 0;
301 down_read(&gmap->mm->mmap_sem);
302 for (off = 0; off < len; off += PMD_SIZE) {
303 /* Walk the guest addr space page table */
304 table = gmap->table + (((to + off) >> 53) & 0x7ff);
305 if (*table & _REGION_ENTRY_INV)
306 goto out;
307 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
308 table = table + (((to + off) >> 42) & 0x7ff);
309 if (*table & _REGION_ENTRY_INV)
310 goto out;
311 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
312 table = table + (((to + off) >> 31) & 0x7ff);
313 if (*table & _REGION_ENTRY_INV)
314 goto out;
315 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
316 table = table + (((to + off) >> 20) & 0x7ff);
317
318 /* Clear segment table entry in guest address space. */
319 flush |= gmap_unlink_segment(gmap, table);
320 *table = _SEGMENT_ENTRY_INV;
321 }
322out:
323 up_read(&gmap->mm->mmap_sem);
324 if (flush)
325 gmap_flush_tlb(gmap);
326 return 0;
327}
328EXPORT_SYMBOL_GPL(gmap_unmap_segment);
329
330/**
331 * gmap_mmap_segment - map a segment to the guest address space
332 * @gmap: pointer to the guest address space structure
333 * @from: source address in the parent address space
334 * @to: target address in the guest address space
335 *
336 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
337 */
338int gmap_map_segment(struct gmap *gmap, unsigned long from,
339 unsigned long to, unsigned long len)
340{
341 unsigned long *table;
342 unsigned long off;
343 int flush;
344
345 if ((from | to | len) & (PMD_SIZE - 1))
346 return -EINVAL;
347 if (len == 0 || from + len > PGDIR_SIZE ||
348 from + len < from || to + len < to)
349 return -EINVAL;
350
351 flush = 0;
352 down_read(&gmap->mm->mmap_sem);
353 for (off = 0; off < len; off += PMD_SIZE) {
354 /* Walk the gmap address space page table */
355 table = gmap->table + (((to + off) >> 53) & 0x7ff);
356 if ((*table & _REGION_ENTRY_INV) &&
357 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
358 goto out_unmap;
359 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
360 table = table + (((to + off) >> 42) & 0x7ff);
361 if ((*table & _REGION_ENTRY_INV) &&
362 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
363 goto out_unmap;
364 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
365 table = table + (((to + off) >> 31) & 0x7ff);
366 if ((*table & _REGION_ENTRY_INV) &&
367 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
368 goto out_unmap;
369 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
370 table = table + (((to + off) >> 20) & 0x7ff);
371
372 /* Store 'from' address in an invalid segment table entry. */
373 flush |= gmap_unlink_segment(gmap, table);
374 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
375 }
376 up_read(&gmap->mm->mmap_sem);
377 if (flush)
378 gmap_flush_tlb(gmap);
379 return 0;
380
381out_unmap:
382 up_read(&gmap->mm->mmap_sem);
383 gmap_unmap_segment(gmap, to, len);
384 return -ENOMEM;
385}
386EXPORT_SYMBOL_GPL(gmap_map_segment);
387
388unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
389{
390 unsigned long *table, vmaddr, segment;
391 struct mm_struct *mm;
392 struct gmap_pgtable *mp;
393 struct gmap_rmap *rmap;
394 struct vm_area_struct *vma;
395 struct page *page;
396 pgd_t *pgd;
397 pud_t *pud;
398 pmd_t *pmd;
399
400 current->thread.gmap_addr = address;
401 mm = gmap->mm;
402 /* Walk the gmap address space page table */
403 table = gmap->table + ((address >> 53) & 0x7ff);
404 if (unlikely(*table & _REGION_ENTRY_INV))
405 return -EFAULT;
406 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
407 table = table + ((address >> 42) & 0x7ff);
408 if (unlikely(*table & _REGION_ENTRY_INV))
409 return -EFAULT;
410 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
411 table = table + ((address >> 31) & 0x7ff);
412 if (unlikely(*table & _REGION_ENTRY_INV))
413 return -EFAULT;
414 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
415 table = table + ((address >> 20) & 0x7ff);
416
417 /* Convert the gmap address to an mm address. */
418 segment = *table;
419 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
420 page = pfn_to_page(segment >> PAGE_SHIFT);
421 mp = (struct gmap_pgtable *) page->index;
422 return mp->vmaddr | (address & ~PMD_MASK);
423 } else if (segment & _SEGMENT_ENTRY_RO) {
424 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
425 vma = find_vma(mm, vmaddr);
426 if (!vma || vma->vm_start > vmaddr)
427 return -EFAULT;
428
429 /* Walk the parent mm page table */
430 pgd = pgd_offset(mm, vmaddr);
431 pud = pud_alloc(mm, pgd, vmaddr);
432 if (!pud)
433 return -ENOMEM;
434 pmd = pmd_alloc(mm, pud, vmaddr);
435 if (!pmd)
436 return -ENOMEM;
437 if (!pmd_present(*pmd) &&
438 __pte_alloc(mm, vma, pmd, vmaddr))
439 return -ENOMEM;
440 /* pmd now points to a valid segment table entry. */
441 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
442 if (!rmap)
443 return -ENOMEM;
444 /* Link gmap segment table entry location to page table. */
445 page = pmd_page(*pmd);
446 mp = (struct gmap_pgtable *) page->index;
447 rmap->entry = table;
448 list_add(&rmap->list, &mp->mapper);
449 /* Set gmap segment table entry to page table. */
450 *table = pmd_val(*pmd) & PAGE_MASK;
451 return vmaddr | (address & ~PMD_MASK);
452 }
453 return -EFAULT;
454
455}
456EXPORT_SYMBOL_GPL(gmap_fault);
457
458void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
459{
460 struct gmap_rmap *rmap, *next;
461 struct gmap_pgtable *mp;
462 struct page *page;
463 int flush;
464
465 flush = 0;
466 spin_lock(&mm->page_table_lock);
467 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
468 mp = (struct gmap_pgtable *) page->index;
469 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
470 *rmap->entry =
471 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
472 list_del(&rmap->list);
473 kfree(rmap);
474 flush = 1;
475 }
476 spin_unlock(&mm->page_table_lock);
477 if (flush)
478 __tlb_flush_global();
479}
480
481static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
482 unsigned long vmaddr)
152{ 483{
153 struct page *page; 484 struct page *page;
154 unsigned long *table; 485 unsigned long *table;
486 struct gmap_pgtable *mp;
155 487
156 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 488 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
157 if (!page) 489 if (!page)
158 return NULL; 490 return NULL;
491 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
492 if (!mp) {
493 __free_page(page);
494 return NULL;
495 }
159 pgtable_page_ctor(page); 496 pgtable_page_ctor(page);
497 mp->vmaddr = vmaddr & PMD_MASK;
498 INIT_LIST_HEAD(&mp->mapper);
499 page->index = (unsigned long) mp;
160 atomic_set(&page->_mapcount, 3); 500 atomic_set(&page->_mapcount, 3);
161 table = (unsigned long *) page_to_phys(page); 501 table = (unsigned long *) page_to_phys(page);
162 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); 502 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
@@ -167,24 +507,58 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
167static inline void page_table_free_pgste(unsigned long *table) 507static inline void page_table_free_pgste(unsigned long *table)
168{ 508{
169 struct page *page; 509 struct page *page;
510 struct gmap_pgtable *mp;
170 511
171 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 512 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
513 mp = (struct gmap_pgtable *) page->index;
514 BUG_ON(!list_empty(&mp->mapper));
172 pgtable_page_ctor(page); 515 pgtable_page_ctor(page);
173 atomic_set(&page->_mapcount, -1); 516 atomic_set(&page->_mapcount, -1);
517 kfree(mp);
174 __free_page(page); 518 __free_page(page);
175} 519}
176#endif
177 520
178unsigned long *page_table_alloc(struct mm_struct *mm) 521#else /* CONFIG_PGSTE */
522
523static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
524 unsigned long vmaddr)
525{
526 return NULL;
527}
528
529static inline void page_table_free_pgste(unsigned long *table)
530{
531}
532
533static inline void gmap_unmap_notifier(struct mm_struct *mm,
534 unsigned long *table)
535{
536}
537
538#endif /* CONFIG_PGSTE */
539
540static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
541{
542 unsigned int old, new;
543
544 do {
545 old = atomic_read(v);
546 new = old ^ bits;
547 } while (atomic_cmpxchg(v, old, new) != old);
548 return new;
549}
550
551/*
552 * page table entry allocation/free routines.
553 */
554unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
179{ 555{
180 struct page *page; 556 struct page *page;
181 unsigned long *table; 557 unsigned long *table;
182 unsigned int mask, bit; 558 unsigned int mask, bit;
183 559
184#ifdef CONFIG_PGSTE
185 if (mm_has_pgste(mm)) 560 if (mm_has_pgste(mm))
186 return page_table_alloc_pgste(mm); 561 return page_table_alloc_pgste(mm, vmaddr);
187#endif
188 /* Allocate fragments of a 4K page as 1K/2K page table */ 562 /* Allocate fragments of a 4K page as 1K/2K page table */
189 spin_lock_bh(&mm->context.list_lock); 563 spin_lock_bh(&mm->context.list_lock);
190 mask = FRAG_MASK; 564 mask = FRAG_MASK;
@@ -222,10 +596,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
222 struct page *page; 596 struct page *page;
223 unsigned int bit, mask; 597 unsigned int bit, mask;
224 598
225#ifdef CONFIG_PGSTE 599 if (mm_has_pgste(mm)) {
226 if (mm_has_pgste(mm)) 600 gmap_unmap_notifier(mm, table);
227 return page_table_free_pgste(table); 601 return page_table_free_pgste(table);
228#endif 602 }
229 /* Free 1K/2K page table fragment of a 4K page */ 603 /* Free 1K/2K page table fragment of a 4K page */
230 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 604 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
231 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); 605 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
@@ -249,10 +623,8 @@ static void __page_table_free_rcu(void *table, unsigned bit)
249{ 623{
250 struct page *page; 624 struct page *page;
251 625
252#ifdef CONFIG_PGSTE
253 if (bit == FRAG_MASK) 626 if (bit == FRAG_MASK)
254 return page_table_free_pgste(table); 627 return page_table_free_pgste(table);
255#endif
256 /* Free 1K/2K page table fragment of a 4K page */ 628 /* Free 1K/2K page table fragment of a 4K page */
257 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 629 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
258 if (atomic_xor_bits(&page->_mapcount, bit) == 0) { 630 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
@@ -269,13 +641,12 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
269 unsigned int bit, mask; 641 unsigned int bit, mask;
270 642
271 mm = tlb->mm; 643 mm = tlb->mm;
272#ifdef CONFIG_PGSTE
273 if (mm_has_pgste(mm)) { 644 if (mm_has_pgste(mm)) {
645 gmap_unmap_notifier(mm, table);
274 table = (unsigned long *) (__pa(table) | FRAG_MASK); 646 table = (unsigned long *) (__pa(table) | FRAG_MASK);
275 tlb_remove_table(tlb, table); 647 tlb_remove_table(tlb, table);
276 return; 648 return;
277 } 649 }
278#endif
279 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); 650 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
280 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 651 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
281 spin_lock_bh(&mm->context.list_lock); 652 spin_lock_bh(&mm->context.list_lock);
@@ -291,8 +662,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
291 662
292void __tlb_remove_table(void *_table) 663void __tlb_remove_table(void *_table)
293{ 664{
294 void *table = (void *)((unsigned long) _table & PAGE_MASK); 665 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
295 unsigned type = (unsigned long) _table & ~PAGE_MASK; 666 void *table = (void *)((unsigned long) _table & ~mask);
667 unsigned type = (unsigned long) _table & mask;
296 668
297 if (type) 669 if (type)
298 __page_table_free_rcu(table, type); 670 __page_table_free_rcu(table, type);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8c1970d1dd9..781ff516956 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -61,12 +61,12 @@ static inline pmd_t *vmem_pmd_alloc(void)
61 return pmd; 61 return pmd;
62} 62}
63 63
64static pte_t __ref *vmem_pte_alloc(void) 64static pte_t __ref *vmem_pte_alloc(unsigned long address)
65{ 65{
66 pte_t *pte; 66 pte_t *pte;
67 67
68 if (slab_is_available()) 68 if (slab_is_available())
69 pte = (pte_t *) page_table_alloc(&init_mm); 69 pte = (pte_t *) page_table_alloc(&init_mm, address);
70 else 70 else
71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
72 if (!pte) 72 if (!pte)
@@ -120,7 +120,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
120 } 120 }
121#endif 121#endif
122 if (pmd_none(*pm_dir)) { 122 if (pmd_none(*pm_dir)) {
123 pt_dir = vmem_pte_alloc(); 123 pt_dir = vmem_pte_alloc(address);
124 if (!pt_dir) 124 if (!pt_dir)
125 goto out; 125 goto out;
126 pmd_populate(&init_mm, pm_dir, pt_dir); 126 pmd_populate(&init_mm, pm_dir, pt_dir);
@@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
205 205
206 pm_dir = pmd_offset(pu_dir, address); 206 pm_dir = pmd_offset(pu_dir, address);
207 if (pmd_none(*pm_dir)) { 207 if (pmd_none(*pm_dir)) {
208 pt_dir = vmem_pte_alloc(); 208 pt_dir = vmem_pte_alloc(address);
209 if (!pt_dir) 209 if (!pt_dir)
210 goto out; 210 goto out;
211 pmd_populate(&init_mm, pm_dir, pt_dir); 211 pmd_populate(&init_mm, pm_dir, pt_dir);