aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/pgtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r--arch/s390/mm/pgtable.c83
1 files changed, 77 insertions, 6 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5d56c2b95b14..301c84d3b542 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2007,2009 2 * Copyright IBM Corp. 2007,2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */ 4 */
5 5
@@ -222,6 +222,7 @@ void gmap_free(struct gmap *gmap)
222 222
223 /* Free all segment & region tables. */ 223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem); 224 down_read(&gmap->mm->mmap_sem);
225 spin_lock(&gmap->mm->page_table_lock);
225 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { 226 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
226 table = (unsigned long *) page_to_phys(page); 227 table = (unsigned long *) page_to_phys(page);
227 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) 228 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
@@ -230,6 +231,7 @@ void gmap_free(struct gmap *gmap)
230 gmap_unlink_segment(gmap, table); 231 gmap_unlink_segment(gmap, table);
231 __free_pages(page, ALLOC_ORDER); 232 __free_pages(page, ALLOC_ORDER);
232 } 233 }
234 spin_unlock(&gmap->mm->page_table_lock);
233 up_read(&gmap->mm->mmap_sem); 235 up_read(&gmap->mm->mmap_sem);
234 list_del(&gmap->list); 236 list_del(&gmap->list);
235 kfree(gmap); 237 kfree(gmap);
@@ -256,6 +258,9 @@ void gmap_disable(struct gmap *gmap)
256} 258}
257EXPORT_SYMBOL_GPL(gmap_disable); 259EXPORT_SYMBOL_GPL(gmap_disable);
258 260
261/*
262 * gmap_alloc_table is assumed to be called with mmap_sem held
263 */
259static int gmap_alloc_table(struct gmap *gmap, 264static int gmap_alloc_table(struct gmap *gmap,
260 unsigned long *table, unsigned long init) 265 unsigned long *table, unsigned long init)
261{ 266{
@@ -267,14 +272,12 @@ static int gmap_alloc_table(struct gmap *gmap,
267 return -ENOMEM; 272 return -ENOMEM;
268 new = (unsigned long *) page_to_phys(page); 273 new = (unsigned long *) page_to_phys(page);
269 crst_table_init(new, init); 274 crst_table_init(new, init);
270 down_read(&gmap->mm->mmap_sem);
271 if (*table & _REGION_ENTRY_INV) { 275 if (*table & _REGION_ENTRY_INV) {
272 list_add(&page->lru, &gmap->crst_list); 276 list_add(&page->lru, &gmap->crst_list);
273 *table = (unsigned long) new | _REGION_ENTRY_LENGTH | 277 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
274 (*table & _REGION_ENTRY_TYPE_MASK); 278 (*table & _REGION_ENTRY_TYPE_MASK);
275 } else 279 } else
276 __free_pages(page, ALLOC_ORDER); 280 __free_pages(page, ALLOC_ORDER);
277 up_read(&gmap->mm->mmap_sem);
278 return 0; 281 return 0;
279} 282}
280 283
@@ -299,6 +302,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
299 302
300 flush = 0; 303 flush = 0;
301 down_read(&gmap->mm->mmap_sem); 304 down_read(&gmap->mm->mmap_sem);
305 spin_lock(&gmap->mm->page_table_lock);
302 for (off = 0; off < len; off += PMD_SIZE) { 306 for (off = 0; off < len; off += PMD_SIZE) {
303 /* Walk the guest addr space page table */ 307 /* Walk the guest addr space page table */
304 table = gmap->table + (((to + off) >> 53) & 0x7ff); 308 table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -320,6 +324,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
320 *table = _SEGMENT_ENTRY_INV; 324 *table = _SEGMENT_ENTRY_INV;
321 } 325 }
322out: 326out:
327 spin_unlock(&gmap->mm->page_table_lock);
323 up_read(&gmap->mm->mmap_sem); 328 up_read(&gmap->mm->mmap_sem);
324 if (flush) 329 if (flush)
325 gmap_flush_tlb(gmap); 330 gmap_flush_tlb(gmap);
@@ -350,6 +355,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
350 355
351 flush = 0; 356 flush = 0;
352 down_read(&gmap->mm->mmap_sem); 357 down_read(&gmap->mm->mmap_sem);
358 spin_lock(&gmap->mm->page_table_lock);
353 for (off = 0; off < len; off += PMD_SIZE) { 359 for (off = 0; off < len; off += PMD_SIZE) {
354 /* Walk the gmap address space page table */ 360 /* Walk the gmap address space page table */
355 table = gmap->table + (((to + off) >> 53) & 0x7ff); 361 table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -373,19 +379,24 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
373 flush |= gmap_unlink_segment(gmap, table); 379 flush |= gmap_unlink_segment(gmap, table);
374 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); 380 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
375 } 381 }
382 spin_unlock(&gmap->mm->page_table_lock);
376 up_read(&gmap->mm->mmap_sem); 383 up_read(&gmap->mm->mmap_sem);
377 if (flush) 384 if (flush)
378 gmap_flush_tlb(gmap); 385 gmap_flush_tlb(gmap);
379 return 0; 386 return 0;
380 387
381out_unmap: 388out_unmap:
389 spin_unlock(&gmap->mm->page_table_lock);
382 up_read(&gmap->mm->mmap_sem); 390 up_read(&gmap->mm->mmap_sem);
383 gmap_unmap_segment(gmap, to, len); 391 gmap_unmap_segment(gmap, to, len);
384 return -ENOMEM; 392 return -ENOMEM;
385} 393}
386EXPORT_SYMBOL_GPL(gmap_map_segment); 394EXPORT_SYMBOL_GPL(gmap_map_segment);
387 395
388unsigned long gmap_fault(unsigned long address, struct gmap *gmap) 396/*
397 * this function is assumed to be called with mmap_sem held
398 */
399unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
389{ 400{
390 unsigned long *table, vmaddr, segment; 401 unsigned long *table, vmaddr, segment;
391 struct mm_struct *mm; 402 struct mm_struct *mm;
@@ -445,16 +456,75 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
445 page = pmd_page(*pmd); 456 page = pmd_page(*pmd);
446 mp = (struct gmap_pgtable *) page->index; 457 mp = (struct gmap_pgtable *) page->index;
447 rmap->entry = table; 458 rmap->entry = table;
459 spin_lock(&mm->page_table_lock);
448 list_add(&rmap->list, &mp->mapper); 460 list_add(&rmap->list, &mp->mapper);
461 spin_unlock(&mm->page_table_lock);
449 /* Set gmap segment table entry to page table. */ 462 /* Set gmap segment table entry to page table. */
450 *table = pmd_val(*pmd) & PAGE_MASK; 463 *table = pmd_val(*pmd) & PAGE_MASK;
451 return vmaddr | (address & ~PMD_MASK); 464 return vmaddr | (address & ~PMD_MASK);
452 } 465 }
453 return -EFAULT; 466 return -EFAULT;
467}
454 468
469unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
470{
471 unsigned long rc;
472
473 down_read(&gmap->mm->mmap_sem);
474 rc = __gmap_fault(address, gmap);
475 up_read(&gmap->mm->mmap_sem);
476
477 return rc;
455} 478}
456EXPORT_SYMBOL_GPL(gmap_fault); 479EXPORT_SYMBOL_GPL(gmap_fault);
457 480
481void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
482{
483
484 unsigned long *table, address, size;
485 struct vm_area_struct *vma;
486 struct gmap_pgtable *mp;
487 struct page *page;
488
489 down_read(&gmap->mm->mmap_sem);
490 address = from;
491 while (address < to) {
492 /* Walk the gmap address space page table */
493 table = gmap->table + ((address >> 53) & 0x7ff);
494 if (unlikely(*table & _REGION_ENTRY_INV)) {
495 address = (address + PMD_SIZE) & PMD_MASK;
496 continue;
497 }
498 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
499 table = table + ((address >> 42) & 0x7ff);
500 if (unlikely(*table & _REGION_ENTRY_INV)) {
501 address = (address + PMD_SIZE) & PMD_MASK;
502 continue;
503 }
504 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
505 table = table + ((address >> 31) & 0x7ff);
506 if (unlikely(*table & _REGION_ENTRY_INV)) {
507 address = (address + PMD_SIZE) & PMD_MASK;
508 continue;
509 }
510 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
511 table = table + ((address >> 20) & 0x7ff);
512 if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
513 address = (address + PMD_SIZE) & PMD_MASK;
514 continue;
515 }
516 page = pfn_to_page(*table >> PAGE_SHIFT);
517 mp = (struct gmap_pgtable *) page->index;
518 vma = find_vma(gmap->mm, mp->vmaddr);
519 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
520 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
521 size, NULL);
522 address = (address + PMD_SIZE) & PMD_MASK;
523 }
524 up_read(&gmap->mm->mmap_sem);
525}
526EXPORT_SYMBOL_GPL(gmap_discard);
527
458void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) 528void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
459{ 529{
460 struct gmap_rmap *rmap, *next; 530 struct gmap_rmap *rmap, *next;
@@ -662,8 +732,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
662 732
663void __tlb_remove_table(void *_table) 733void __tlb_remove_table(void *_table)
664{ 734{
665 void *table = (void *)((unsigned long) _table & PAGE_MASK); 735 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
666 unsigned type = (unsigned long) _table & ~PAGE_MASK; 736 void *table = (void *)((unsigned long) _table & ~mask);
737 unsigned type = (unsigned long) _table & mask;
667 738
668 if (type) 739 if (type)
669 __page_table_free_rcu(table, type); 740 __page_table_free_rcu(table, type);