aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@linux.vnet.ibm.com>2015-05-29 09:34:51 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-08-03 04:06:12 -0400
commitecf46abdd32a35fcd7dfeef72600a065425532b3 (patch)
tree1156f97f7b770739466421dd616079073e0d844d
parentb54565b86824ecc9f0ad5d0ee69696f38edc50fd (diff)
s390/mm: enable gup code for NUMA
Force get_user_page() to take the slow path for NUMA migration pages. Signed-off-by: Gerald Schaefer <geraldsc@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/mm/gup.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 1eb41bb3010c..12bbf0e8478f 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -30,6 +30,9 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
30 do { 30 do {
31 pte = *ptep; 31 pte = *ptep;
32 barrier(); 32 barrier();
33 /* Similar to the PMD case, NUMA hinting must take slow path */
34 if (pte_protnone(pte))
35 return 0;
33 if ((pte_val(pte) & mask) != 0) 36 if ((pte_val(pte) & mask) != 0)
34 return 0; 37 return 0;
35 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 38 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -125,6 +128,13 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
125 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 128 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
126 return 0; 129 return 0;
127 if (unlikely(pmd_large(pmd))) { 130 if (unlikely(pmd_large(pmd))) {
131 /*
132 * NUMA hinting faults need to be handled in the GUP
133 * slowpath for accounting purposes and so that they
134 * can be serialised against THP migration.
135 */
136 if (pmd_protnone(pmd))
137 return 0;
128 if (!gup_huge_pmd(pmdp, pmd, addr, next, 138 if (!gup_huge_pmd(pmdp, pmd, addr, next,
129 write, pages, nr)) 139 write, pages, nr))
130 return 0; 140 return 0;