diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-04-16 07:37:46 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-04-23 04:18:04 -0400 |
commit | ab8e5235868f99dfc779e4eaff28f53d63714ce4 (patch) | |
tree | 78e4600735c5b690113d281c85b12fcf76b4358b | |
parent | c5034945ce59abacdd02c5eff29f4f54df197880 (diff) |
s390/mm,gmap: segment mapping race
The gmap_map_segment function creates a special invalid segment table
entry with the address of the requested target location in the process
address space. The first access will create the connection between the
gmap segment table and the target page table of the main process.
If two threads do this concurrently both will walk the page tables and
allocate a gmap_rmap structure for the same segment table entry.
To avoid the race recheck the segment table entry after taking to page
table lock.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | arch/s390/mm/pgtable.c | 160 |
1 files changed, 91 insertions, 69 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 2accf7113d13..bd954e96f51c 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -454,12 +454,11 @@ unsigned long gmap_translate(unsigned long address, struct gmap *gmap) | |||
454 | } | 454 | } |
455 | EXPORT_SYMBOL_GPL(gmap_translate); | 455 | EXPORT_SYMBOL_GPL(gmap_translate); |
456 | 456 | ||
457 | /* | 457 | static int gmap_connect_pgtable(unsigned long segment, |
458 | * this function is assumed to be called with mmap_sem held | 458 | unsigned long *segment_ptr, |
459 | */ | 459 | struct gmap *gmap) |
460 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | ||
461 | { | 460 | { |
462 | unsigned long *segment_ptr, vmaddr, segment; | 461 | unsigned long vmaddr; |
463 | struct vm_area_struct *vma; | 462 | struct vm_area_struct *vma; |
464 | struct gmap_pgtable *mp; | 463 | struct gmap_pgtable *mp; |
465 | struct gmap_rmap *rmap; | 464 | struct gmap_rmap *rmap; |
@@ -469,48 +468,94 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | |||
469 | pud_t *pud; | 468 | pud_t *pud; |
470 | pmd_t *pmd; | 469 | pmd_t *pmd; |
471 | 470 | ||
471 | mm = gmap->mm; | ||
472 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; | ||
473 | vma = find_vma(mm, vmaddr); | ||
474 | if (!vma || vma->vm_start > vmaddr) | ||
475 | return -EFAULT; | ||
476 | /* Walk the parent mm page table */ | ||
477 | pgd = pgd_offset(mm, vmaddr); | ||
478 | pud = pud_alloc(mm, pgd, vmaddr); | ||
479 | if (!pud) | ||
480 | return -ENOMEM; | ||
481 | pmd = pmd_alloc(mm, pud, vmaddr); | ||
482 | if (!pmd) | ||
483 | return -ENOMEM; | ||
484 | if (!pmd_present(*pmd) && | ||
485 | __pte_alloc(mm, vma, pmd, vmaddr)) | ||
486 | return -ENOMEM; | ||
487 | /* pmd now points to a valid segment table entry. */ | ||
488 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); | ||
489 | if (!rmap) | ||
490 | return -ENOMEM; | ||
491 | /* Link gmap segment table entry location to page table. */ | ||
492 | page = pmd_page(*pmd); | ||
493 | mp = (struct gmap_pgtable *) page->index; | ||
494 | rmap->entry = segment_ptr; | ||
495 | spin_lock(&mm->page_table_lock); | ||
496 | if (*segment_ptr == segment) { | ||
497 | list_add(&rmap->list, &mp->mapper); | ||
498 | /* Set gmap segment table entry to page table. */ | ||
499 | *segment_ptr = pmd_val(*pmd) & PAGE_MASK; | ||
500 | rmap = NULL; | ||
501 | } | ||
502 | spin_unlock(&mm->page_table_lock); | ||
503 | kfree(rmap); | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) | ||
508 | { | ||
509 | struct gmap_rmap *rmap, *next; | ||
510 | struct gmap_pgtable *mp; | ||
511 | struct page *page; | ||
512 | int flush; | ||
513 | |||
514 | flush = 0; | ||
515 | spin_lock(&mm->page_table_lock); | ||
516 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | ||
517 | mp = (struct gmap_pgtable *) page->index; | ||
518 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { | ||
519 | *rmap->entry = | ||
520 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | ||
521 | list_del(&rmap->list); | ||
522 | kfree(rmap); | ||
523 | flush = 1; | ||
524 | } | ||
525 | spin_unlock(&mm->page_table_lock); | ||
526 | if (flush) | ||
527 | __tlb_flush_global(); | ||
528 | } | ||
529 | |||
530 | /* | ||
531 | * this function is assumed to be called with mmap_sem held | ||
532 | */ | ||
533 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | ||
534 | { | ||
535 | unsigned long *segment_ptr, segment; | ||
536 | struct gmap_pgtable *mp; | ||
537 | struct page *page; | ||
538 | int rc; | ||
539 | |||
472 | current->thread.gmap_addr = address; | 540 | current->thread.gmap_addr = address; |
473 | segment_ptr = gmap_table_walk(address, gmap); | 541 | segment_ptr = gmap_table_walk(address, gmap); |
474 | if (IS_ERR(segment_ptr)) | 542 | if (IS_ERR(segment_ptr)) |
475 | return -EFAULT; | 543 | return -EFAULT; |
476 | /* Convert the gmap address to an mm address. */ | 544 | /* Convert the gmap address to an mm address. */ |
477 | segment = *segment_ptr; | 545 | while (1) { |
478 | if (!(segment & _SEGMENT_ENTRY_INV)) { | 546 | segment = *segment_ptr; |
479 | page = pfn_to_page(segment >> PAGE_SHIFT); | 547 | if (!(segment & _SEGMENT_ENTRY_INV)) { |
480 | mp = (struct gmap_pgtable *) page->index; | 548 | /* Page table is present */ |
481 | return mp->vmaddr | (address & ~PMD_MASK); | 549 | page = pfn_to_page(segment >> PAGE_SHIFT); |
482 | } else if (segment & _SEGMENT_ENTRY_RO) { | 550 | mp = (struct gmap_pgtable *) page->index; |
483 | mm = gmap->mm; | 551 | return mp->vmaddr | (address & ~PMD_MASK); |
484 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; | 552 | } |
485 | vma = find_vma(mm, vmaddr); | 553 | if (!(segment & _SEGMENT_ENTRY_RO)) |
486 | if (!vma || vma->vm_start > vmaddr) | 554 | /* Nothing mapped in the gmap address space. */ |
487 | return -EFAULT; | 555 | break; |
488 | 556 | rc = gmap_connect_pgtable(segment, segment_ptr, gmap); | |
489 | /* Walk the parent mm page table */ | 557 | if (rc) |
490 | pgd = pgd_offset(mm, vmaddr); | 558 | return rc; |
491 | pud = pud_alloc(mm, pgd, vmaddr); | ||
492 | if (!pud) | ||
493 | return -ENOMEM; | ||
494 | pmd = pmd_alloc(mm, pud, vmaddr); | ||
495 | if (!pmd) | ||
496 | return -ENOMEM; | ||
497 | if (!pmd_present(*pmd) && | ||
498 | __pte_alloc(mm, vma, pmd, vmaddr)) | ||
499 | return -ENOMEM; | ||
500 | /* pmd now points to a valid segment table entry. */ | ||
501 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); | ||
502 | if (!rmap) | ||
503 | return -ENOMEM; | ||
504 | /* Link gmap segment table entry location to page table. */ | ||
505 | page = pmd_page(*pmd); | ||
506 | mp = (struct gmap_pgtable *) page->index; | ||
507 | rmap->entry = segment_ptr; | ||
508 | spin_lock(&mm->page_table_lock); | ||
509 | list_add(&rmap->list, &mp->mapper); | ||
510 | spin_unlock(&mm->page_table_lock); | ||
511 | /* Set gmap segment table entry to page table. */ | ||
512 | *segment_ptr = pmd_val(*pmd) & PAGE_MASK; | ||
513 | return vmaddr | (address & ~PMD_MASK); | ||
514 | } | 559 | } |
515 | return -EFAULT; | 560 | return -EFAULT; |
516 | } | 561 | } |
@@ -574,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) | |||
574 | } | 619 | } |
575 | EXPORT_SYMBOL_GPL(gmap_discard); | 620 | EXPORT_SYMBOL_GPL(gmap_discard); |
576 | 621 | ||
577 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) | ||
578 | { | ||
579 | struct gmap_rmap *rmap, *next; | ||
580 | struct gmap_pgtable *mp; | ||
581 | struct page *page; | ||
582 | int flush; | ||
583 | |||
584 | flush = 0; | ||
585 | spin_lock(&mm->page_table_lock); | ||
586 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | ||
587 | mp = (struct gmap_pgtable *) page->index; | ||
588 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { | ||
589 | *rmap->entry = | ||
590 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | ||
591 | list_del(&rmap->list); | ||
592 | kfree(rmap); | ||
593 | flush = 1; | ||
594 | } | ||
595 | spin_unlock(&mm->page_table_lock); | ||
596 | if (flush) | ||
597 | __tlb_flush_global(); | ||
598 | } | ||
599 | |||
600 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | 622 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
601 | unsigned long vmaddr) | 623 | unsigned long vmaddr) |
602 | { | 624 | { |
@@ -649,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table) | |||
649 | { | 671 | { |
650 | } | 672 | } |
651 | 673 | ||
652 | static inline void gmap_unmap_notifier(struct mm_struct *mm, | 674 | static inline void gmap_disconnect_pgtable(struct mm_struct *mm, |
653 | unsigned long *table) | 675 | unsigned long *table) |
654 | { | 676 | { |
655 | } | 677 | } |
656 | 678 | ||
@@ -716,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
716 | unsigned int bit, mask; | 738 | unsigned int bit, mask; |
717 | 739 | ||
718 | if (mm_has_pgste(mm)) { | 740 | if (mm_has_pgste(mm)) { |
719 | gmap_unmap_notifier(mm, table); | 741 | gmap_disconnect_pgtable(mm, table); |
720 | return page_table_free_pgste(table); | 742 | return page_table_free_pgste(table); |
721 | } | 743 | } |
722 | /* Free 1K/2K page table fragment of a 4K page */ | 744 | /* Free 1K/2K page table fragment of a 4K page */ |
@@ -759,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) | |||
759 | 781 | ||
760 | mm = tlb->mm; | 782 | mm = tlb->mm; |
761 | if (mm_has_pgste(mm)) { | 783 | if (mm_has_pgste(mm)) { |
762 | gmap_unmap_notifier(mm, table); | 784 | gmap_disconnect_pgtable(mm, table); |
763 | table = (unsigned long *) (__pa(table) | FRAG_MASK); | 785 | table = (unsigned long *) (__pa(table) | FRAG_MASK); |
764 | tlb_remove_table(tlb, table); | 786 | tlb_remove_table(tlb, table); |
765 | return; | 787 | return; |