aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-02-24 04:18:50 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-03-02 07:44:22 -0500
commit443a813304ec36d4e81264b6a452a412a6b3ad9b (patch)
treedfcbb07c4a35414bb7cd9d8faa08ab3c5ba6e991 /arch/s390/mm
parent993e0681084c8e84dd870bffedec9410778dfa87 (diff)
s390/kvm: simplify set_guest_storage_key
Git commit ab3f285f227fec62868037e9b1b1fd18294a83b8 "KVM: s390/mm: try a cow on read only pages for key ops" added a fixup_user_fault to set_guest_storage_key force a copy on write if the page is mapped read-only. This is supposed to fix the problem of differing storage keys for shared mappings, e.g. the empty_zero_page. But if the storage key is set before the pte is mapped the storage key update is done on the pgste. A later fault will happily map the shared page with the key from the pgste. Eventually git commit 2faee8ff9dc6f4bfe46f6d2d110add858140fb20 "s390/mm: prevent and break zero page mappings in case of storage keys" fixed this problem for the empty_zero_page. The commit makes sure that guests enabled for storage keys will not use the empty_zero_page at all. As the call to fixup_user_fault in set_guest_storage_key depends on the order of the storage key operation vs. the fault that maps the pte it does not really fix anything. Just remove it. Reviewed-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgtable.c17
1 files changed, 0 insertions, 17 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5109827883ac..6acd7174fe75 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -809,30 +809,13 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
809 spinlock_t *ptl; 809 spinlock_t *ptl;
810 pgste_t old, new; 810 pgste_t old, new;
811 pte_t *ptep; 811 pte_t *ptep;
812 bool unlocked;
813 812
814 down_read(&mm->mmap_sem); 813 down_read(&mm->mmap_sem);
815retry:
816 unlocked = false;
817 ptep = get_locked_pte(mm, addr, &ptl); 814 ptep = get_locked_pte(mm, addr, &ptl);
818 if (unlikely(!ptep)) { 815 if (unlikely(!ptep)) {
819 up_read(&mm->mmap_sem); 816 up_read(&mm->mmap_sem);
820 return -EFAULT; 817 return -EFAULT;
821 } 818 }
822 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
823 (pte_val(*ptep) & _PAGE_PROTECT)) {
824 pte_unmap_unlock(ptep, ptl);
825 /*
826 * We do not really care about unlocked. We will retry either
827 * way. But this allows fixup_user_fault to enable userfaultfd.
828 */
829 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE,
830 &unlocked)) {
831 up_read(&mm->mmap_sem);
832 return -EFAULT;
833 }
834 goto retry;
835 }
836 819
837 new = old = pgste_get_lock(ptep); 820 new = old = pgste_get_lock(ptep);
838 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | 821 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |