aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorDominik Dingel <dingel@linux.vnet.ibm.com>2016-01-15 19:57:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commitfef8953ae4e2b87392434a1a3774694c3f8164f6 (patch)
treefbfe75ac80473261d7ebc6146ac614759563f1d4 /arch/s390/mm
parent4a9e1cda274893eca7d178d7dc265503ccb9d87a (diff)
s390/mm: enable fixup_user_fault retrying
By passing a non-null flag we allow fixup_user_fault to retry, which enables userfaultfd. As during these retries we might drop the mmap_sem we need to check if that happened and redo the complete chain of actions. Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: "Jason J. Herne" <jjherne@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Cc: Eric B Munson <emunson@akamai.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Dominik Dingel <dingel@linux.vnet.ibm.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgtable.c29
1 files changed, 26 insertions, 3 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 84bddda8d412..a809fa8e6f8b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -578,17 +578,29 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
578{ 578{
579 unsigned long vmaddr; 579 unsigned long vmaddr;
580 int rc; 580 int rc;
581 bool unlocked;
581 582
582 down_read(&gmap->mm->mmap_sem); 583 down_read(&gmap->mm->mmap_sem);
584
585retry:
586 unlocked = false;
583 vmaddr = __gmap_translate(gmap, gaddr); 587 vmaddr = __gmap_translate(gmap, gaddr);
584 if (IS_ERR_VALUE(vmaddr)) { 588 if (IS_ERR_VALUE(vmaddr)) {
585 rc = vmaddr; 589 rc = vmaddr;
586 goto out_up; 590 goto out_up;
587 } 591 }
588 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, NULL)) { 592 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
593 &unlocked)) {
589 rc = -EFAULT; 594 rc = -EFAULT;
590 goto out_up; 595 goto out_up;
591 } 596 }
597 /*
598 * In the case that fixup_user_fault unlocked the mmap_sem during
599 * faultin redo __gmap_translate to not race with a map/unmap_segment.
600 */
601 if (unlocked)
602 goto retry;
603
592 rc = __gmap_link(gmap, gaddr, vmaddr); 604 rc = __gmap_link(gmap, gaddr, vmaddr);
593out_up: 605out_up:
594 up_read(&gmap->mm->mmap_sem); 606 up_read(&gmap->mm->mmap_sem);
@@ -714,12 +726,14 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
714 spinlock_t *ptl; 726 spinlock_t *ptl;
715 pte_t *ptep, entry; 727 pte_t *ptep, entry;
716 pgste_t pgste; 728 pgste_t pgste;
729 bool unlocked;
717 int rc = 0; 730 int rc = 0;
718 731
719 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK)) 732 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
720 return -EINVAL; 733 return -EINVAL;
721 down_read(&gmap->mm->mmap_sem); 734 down_read(&gmap->mm->mmap_sem);
722 while (len) { 735 while (len) {
736 unlocked = false;
723 /* Convert gmap address and connect the page tables */ 737 /* Convert gmap address and connect the page tables */
724 addr = __gmap_translate(gmap, gaddr); 738 addr = __gmap_translate(gmap, gaddr);
725 if (IS_ERR_VALUE(addr)) { 739 if (IS_ERR_VALUE(addr)) {
@@ -728,10 +742,13 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
728 } 742 }
729 /* Get the page mapped */ 743 /* Get the page mapped */
730 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE, 744 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE,
731 NULL)) { 745 &unlocked)) {
732 rc = -EFAULT; 746 rc = -EFAULT;
733 break; 747 break;
734 } 748 }
749 /* While trying to map mmap_sem got unlocked. Let us retry */
750 if (unlocked)
751 continue;
735 rc = __gmap_link(gmap, gaddr, addr); 752 rc = __gmap_link(gmap, gaddr, addr);
736 if (rc) 753 if (rc)
737 break; 754 break;
@@ -792,9 +809,11 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
792 spinlock_t *ptl; 809 spinlock_t *ptl;
793 pgste_t old, new; 810 pgste_t old, new;
794 pte_t *ptep; 811 pte_t *ptep;
812 bool unlocked;
795 813
796 down_read(&mm->mmap_sem); 814 down_read(&mm->mmap_sem);
797retry: 815retry:
816 unlocked = false;
798 ptep = get_locked_pte(mm, addr, &ptl); 817 ptep = get_locked_pte(mm, addr, &ptl);
799 if (unlikely(!ptep)) { 818 if (unlikely(!ptep)) {
800 up_read(&mm->mmap_sem); 819 up_read(&mm->mmap_sem);
@@ -803,8 +822,12 @@ retry:
803 if (!(pte_val(*ptep) & _PAGE_INVALID) && 822 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
804 (pte_val(*ptep) & _PAGE_PROTECT)) { 823 (pte_val(*ptep) & _PAGE_PROTECT)) {
805 pte_unmap_unlock(ptep, ptl); 824 pte_unmap_unlock(ptep, ptl);
825 /*
826 * We do not really care about unlocked. We will retry either
827 * way. But this allows fixup_user_fault to enable userfaultfd.
828 */
806 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE, 829 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE,
807 NULL)) { 830 &unlocked)) {
808 up_read(&mm->mmap_sem); 831 up_read(&mm->mmap_sem);
809 return -EFAULT; 832 return -EFAULT;
810 } 833 }