aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-03-08 06:21:41 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 03:54:19 -0400
commita9d23e71d7716e394a772686bfd994f4e181b235 (patch)
tree1eb27541f5c1b5c3d9bf504e3cd9306d10b19adb
parenteea3678d4334925bf838e6f4bc88760811a84cd6 (diff)
s390/mm: shadow pages with real guest requested protection
We really want to avoid manually handling protection for nested virtualization. By shadowing pages with the protection the guest asked us for, the SIE can handle most protection-related actions for us (e.g. special handling for MVPG) and we can directly forward protection exceptions to the guest. PTEs will now always be shadowed with the correct _PAGE_PROTECT flag. Unshadowing will take care of any guest changes to the parent PTE and any host changes to the host PTE. If the host PTE doesn't have the fitting access rights or is not available, we have to fix it up. Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/include/asm/gmap.h3
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/kvm/gaccess.c2
-rw-r--r--arch/s390/mm/gmap.c12
-rw-r--r--arch/s390/mm/pgtable.c16
5 files changed, 15 insertions, 20 deletions
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 58e65ee5b2d2..4a47055f58d7 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -110,8 +110,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt);
110int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt); 110int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt);
111int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, 111int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
112 unsigned long *pgt, int *dat_protection); 112 unsigned long *pgt, int *dat_protection);
113int gmap_shadow_page(struct gmap *sg, unsigned long saddr, 113int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
114 unsigned long paddr, int write);
115 114
116void gmap_register_pte_notifier(struct gmap_notifier *); 115void gmap_register_pte_notifier(struct gmap_notifier *);
117void gmap_unregister_pte_notifier(struct gmap_notifier *); 116void gmap_unregister_pte_notifier(struct gmap_notifier *);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index a6e7fc8f5b49..c7ebba483f09 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -895,7 +895,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
895 pte_t *ptep , int reset); 895 pte_t *ptep , int reset);
896void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 896void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
897int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, 897int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
898 pte_t *sptep, pte_t *tptep, int write); 898 pte_t *sptep, pte_t *tptep, pte_t pte);
899void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); 899void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
900 900
901bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); 901bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index ba4985262bce..c5f79c1205cf 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -1109,7 +1109,7 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write)
1109 dat_protection |= pte.p; 1109 dat_protection |= pte.p;
1110 if (write && dat_protection) 1110 if (write && dat_protection)
1111 return PGM_PROTECTION; 1111 return PGM_PROTECTION;
1112 rc = gmap_shadow_page(sg, saddr, pte.pfra * 4096, write); 1112 rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
1113 if (rc) 1113 if (rc)
1114 return rc; 1114 return rc;
1115 return 0; 1115 return 0;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index b02d0d0cc641..a57a87bfeb27 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1743,8 +1743,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1743 * gmap_shadow_page - create a shadow page mapping 1743 * gmap_shadow_page - create a shadow page mapping
1744 * @sg: pointer to the shadow guest address space structure 1744 * @sg: pointer to the shadow guest address space structure
1745 * @saddr: faulting address in the shadow gmap 1745 * @saddr: faulting address in the shadow gmap
1746 * @paddr: parent gmap address to get mapped at @saddr 1746 * @pte: pte in parent gmap address space to get shadowed
1747 * @write: =1 map r/w, =0 map r/o
1748 * 1747 *
1749 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the 1748 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1750 * shadow table structure is incomplete, -ENOMEM if out of memory and 1749 * shadow table structure is incomplete, -ENOMEM if out of memory and
@@ -1752,12 +1751,11 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1752 * 1751 *
1753 * Called with sg->mm->mmap_sem in read. 1752 * Called with sg->mm->mmap_sem in read.
1754 */ 1753 */
1755int gmap_shadow_page(struct gmap *sg, unsigned long saddr, 1754int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
1756 unsigned long paddr, int write)
1757{ 1755{
1758 struct gmap *parent; 1756 struct gmap *parent;
1759 struct gmap_rmap *rmap; 1757 struct gmap_rmap *rmap;
1760 unsigned long vmaddr; 1758 unsigned long vmaddr, paddr;
1761 spinlock_t *ptl; 1759 spinlock_t *ptl;
1762 pte_t *sptep, *tptep; 1760 pte_t *sptep, *tptep;
1763 int rc; 1761 int rc;
@@ -1771,6 +1769,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr,
1771 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE; 1769 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1772 1770
1773 while (1) { 1771 while (1) {
1772 paddr = pte_val(pte) & PAGE_MASK;
1774 vmaddr = __gmap_translate(parent, paddr); 1773 vmaddr = __gmap_translate(parent, paddr);
1775 if (IS_ERR_VALUE(vmaddr)) { 1774 if (IS_ERR_VALUE(vmaddr)) {
1776 rc = vmaddr; 1775 rc = vmaddr;
@@ -1791,8 +1790,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr,
1791 radix_tree_preload_end(); 1790 radix_tree_preload_end();
1792 break; 1791 break;
1793 } 1792 }
1794 rc = ptep_shadow_pte(sg->mm, saddr, 1793 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
1795 sptep, tptep, write);
1796 if (rc > 0) { 1794 if (rc > 0) {
1797 /* Success and a new mapping */ 1795 /* Success and a new mapping */
1798 gmap_insert_rmap(sg, vmaddr, rmap); 1796 gmap_insert_rmap(sg, vmaddr, rmap);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5b02583fbf4c..293130b5aee7 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -463,29 +463,27 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
463} 463}
464 464
465int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, 465int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
466 pte_t *sptep, pte_t *tptep, int write) 466 pte_t *sptep, pte_t *tptep, pte_t pte)
467{ 467{
468 pgste_t spgste, tpgste; 468 pgste_t spgste, tpgste;
469 pte_t spte, tpte; 469 pte_t spte, tpte;
470 int rc = -EAGAIN; 470 int rc = -EAGAIN;
471 471
472 if (!(pte_val(*tptep) & _PAGE_INVALID))
473 return 0; /* already shadowed */
472 spgste = pgste_get_lock(sptep); 474 spgste = pgste_get_lock(sptep);
473 spte = *sptep; 475 spte = *sptep;
474 if (!(pte_val(spte) & _PAGE_INVALID) && 476 if (!(pte_val(spte) & _PAGE_INVALID) &&
475 !(pte_val(spte) & _PAGE_PROTECT)) { 477 !((pte_val(spte) & _PAGE_PROTECT) &&
476 rc = 0; 478 !(pte_val(pte) & _PAGE_PROTECT))) {
477 if (!(pte_val(*tptep) & _PAGE_INVALID))
478 /* Update existing mapping */
479 ptep_flush_direct(mm, saddr, tptep);
480 else
481 rc = 1;
482 pgste_val(spgste) |= PGSTE_VSIE_BIT; 479 pgste_val(spgste) |= PGSTE_VSIE_BIT;
483 tpgste = pgste_get_lock(tptep); 480 tpgste = pgste_get_lock(tptep);
484 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | 481 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
485 (write ? 0 : _PAGE_PROTECT); 482 (pte_val(pte) & _PAGE_PROTECT);
486 /* don't touch the storage key - it belongs to parent pgste */ 483 /* don't touch the storage key - it belongs to parent pgste */
487 tpgste = pgste_set_pte(tptep, tpgste, tpte); 484 tpgste = pgste_set_pte(tptep, tpgste, tpte);
488 pgste_set_unlock(tptep, tpgste); 485 pgste_set_unlock(tptep, tpgste);
486 rc = 1;
489 } 487 }
490 pgste_set_unlock(sptep, spgste); 488 pgste_set_unlock(sptep, spgste);
491 return rc; 489 return rc;