aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2013-04-17 04:53:39 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-05-03 08:21:12 -0400
commitd3383632d4e8e9ae747f582eaee8c2e79f828ae6 (patch)
tree9583ce35ae19ba85cc34ffe2f5b64e7d99045cbe /arch/s390/mm
parent56bbe686693df7edcca18d1808edd80609e63c31 (diff)
s390/mm: add pte invalidation notifier for kvm
Add a notifier for kvm to get control before a page table entry is invalidated. The notifier is only called for ptes of an address space with pgstes that have been explicitly marked to require notification. Kvm will use this to get control before prefix pages of virtual CPU are unmapped. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgtable.c121
1 files changed, 117 insertions, 4 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index bd954e96f51c..7805ddca833d 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -454,9 +454,8 @@ unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
454} 454}
455EXPORT_SYMBOL_GPL(gmap_translate); 455EXPORT_SYMBOL_GPL(gmap_translate);
456 456
457static int gmap_connect_pgtable(unsigned long segment, 457static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
458 unsigned long *segment_ptr, 458 unsigned long *segment_ptr, struct gmap *gmap)
459 struct gmap *gmap)
460{ 459{
461 unsigned long vmaddr; 460 unsigned long vmaddr;
462 struct vm_area_struct *vma; 461 struct vm_area_struct *vma;
@@ -491,7 +490,9 @@ static int gmap_connect_pgtable(unsigned long segment,
491 /* Link gmap segment table entry location to page table. */ 490 /* Link gmap segment table entry location to page table. */
492 page = pmd_page(*pmd); 491 page = pmd_page(*pmd);
493 mp = (struct gmap_pgtable *) page->index; 492 mp = (struct gmap_pgtable *) page->index;
493 rmap->gmap = gmap;
494 rmap->entry = segment_ptr; 494 rmap->entry = segment_ptr;
495 rmap->vmaddr = address;
495 spin_lock(&mm->page_table_lock); 496 spin_lock(&mm->page_table_lock);
496 if (*segment_ptr == segment) { 497 if (*segment_ptr == segment) {
497 list_add(&rmap->list, &mp->mapper); 498 list_add(&rmap->list, &mp->mapper);
@@ -553,7 +554,7 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
553 if (!(segment & _SEGMENT_ENTRY_RO)) 554 if (!(segment & _SEGMENT_ENTRY_RO))
554 /* Nothing mapped in the gmap address space. */ 555 /* Nothing mapped in the gmap address space. */
555 break; 556 break;
556 rc = gmap_connect_pgtable(segment, segment_ptr, gmap); 557 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
557 if (rc) 558 if (rc)
558 return rc; 559 return rc;
559 } 560 }
@@ -619,6 +620,118 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
619} 620}
620EXPORT_SYMBOL_GPL(gmap_discard); 621EXPORT_SYMBOL_GPL(gmap_discard);
621 622
623static LIST_HEAD(gmap_notifier_list);
624static DEFINE_SPINLOCK(gmap_notifier_lock);
625
626/**
627 * gmap_register_ipte_notifier - register a pte invalidation callback
628 * @nb: pointer to the gmap notifier block
629 */
630void gmap_register_ipte_notifier(struct gmap_notifier *nb)
631{
632 spin_lock(&gmap_notifier_lock);
633 list_add(&nb->list, &gmap_notifier_list);
634 spin_unlock(&gmap_notifier_lock);
635}
636EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
637
638/**
639 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
640 * @nb: pointer to the gmap notifier block
641 */
642void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
643{
644 spin_lock(&gmap_notifier_lock);
645 list_del_init(&nb->list);
646 spin_unlock(&gmap_notifier_lock);
647}
648EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
649
650/**
651 * gmap_ipte_notify - mark a range of ptes for invalidation notification
652 * @gmap: pointer to guest mapping meta data structure
653 * @address: virtual address in the guest address space
654 * @len: size of area
655 *
656 * Returns 0 if for each page in the given range a gmap mapping exists and
657 * the invalidation notification could be set. If the gmap mapping is missing
658 * for one or more pages -EFAULT is returned. If no memory could be allocated
659 * -ENOMEM is returned. This function establishes missing page table entries.
660 */
661int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
662{
663 unsigned long addr;
664 spinlock_t *ptl;
665 pte_t *ptep, entry;
666 pgste_t pgste;
667 int rc = 0;
668
669 if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
670 return -EINVAL;
671 down_read(&gmap->mm->mmap_sem);
672 while (len) {
673 /* Convert gmap address and connect the page tables */
674 addr = __gmap_fault(start, gmap);
675 if (IS_ERR_VALUE(addr)) {
676 rc = addr;
677 break;
678 }
679 /* Get the page mapped */
680 if (get_user_pages(current, gmap->mm, addr, 1, 1, 0,
681 NULL, NULL) != 1) {
682 rc = -EFAULT;
683 break;
684 }
685 /* Walk the process page table, lock and get pte pointer */
686 ptep = get_locked_pte(gmap->mm, addr, &ptl);
687 if (unlikely(!ptep))
688 continue;
689 /* Set notification bit in the pgste of the pte */
690 entry = *ptep;
691 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) {
692 pgste = pgste_get_lock(ptep);
693 pgste_val(pgste) |= RCP_IN_BIT;
694 pgste_set_unlock(ptep, pgste);
695 start += PAGE_SIZE;
696 len -= PAGE_SIZE;
697 }
698 spin_unlock(ptl);
699 }
700 up_read(&gmap->mm->mmap_sem);
701 return rc;
702}
703EXPORT_SYMBOL_GPL(gmap_ipte_notify);
704
705/**
706 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
707 * @mm: pointer to the process mm_struct
708 * @addr: virtual address in the process address space
709 * @pte: pointer to the page table entry
710 *
711 * This function is assumed to be called with the page table lock held
712 * for the pte to notify.
713 */
714void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
715{
716 unsigned long segment_offset;
717 struct gmap_notifier *nb;
718 struct gmap_pgtable *mp;
719 struct gmap_rmap *rmap;
720 struct page *page;
721
722 segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
723 segment_offset = segment_offset * (4096 / sizeof(pte_t));
724 page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
725 mp = (struct gmap_pgtable *) page->index;
726 spin_lock(&gmap_notifier_lock);
727 list_for_each_entry(rmap, &mp->mapper, list) {
728 list_for_each_entry(nb, &gmap_notifier_list, list)
729 nb->notifier_call(rmap->gmap,
730 rmap->vmaddr + segment_offset);
731 }
732 spin_unlock(&gmap_notifier_lock);
733}
734
622static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 735static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
623 unsigned long vmaddr) 736 unsigned long vmaddr)
624{ 737{