aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/include/asm/hugetlb.h19
-rw-r--r--arch/s390/include/asm/pgtable.h210
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/tlb.h1
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/mm/gup.c11
-rw-r--r--arch/s390/mm/pgtable.c108
9 files changed, 341 insertions, 19 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c8af429991d9..ceff7aef2477 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -68,6 +68,7 @@ config S390
68 select HAVE_FTRACE_MCOUNT_RECORD 68 select HAVE_FTRACE_MCOUNT_RECORD
69 select HAVE_C_RECORDMCOUNT 69 select HAVE_C_RECORDMCOUNT
70 select HAVE_SYSCALL_TRACEPOINTS 70 select HAVE_SYSCALL_TRACEPOINTS
71 select SYSCTL_EXCEPTION_TRACE
71 select HAVE_DYNAMIC_FTRACE 72 select HAVE_DYNAMIC_FTRACE
72 select HAVE_FUNCTION_GRAPH_TRACER 73 select HAVE_FUNCTION_GRAPH_TRACER
73 select HAVE_REGS_AND_STACK_ACCESS_API 74 select HAVE_REGS_AND_STACK_ACCESS_API
@@ -80,6 +81,7 @@ config S390
80 select HAVE_IRQ_WORK 81 select HAVE_IRQ_WORK
81 select HAVE_PERF_EVENTS 82 select HAVE_PERF_EVENTS
82 select ARCH_HAVE_NMI_SAFE_CMPXCHG 83 select ARCH_HAVE_NMI_SAFE_CMPXCHG
84 select HAVE_DEBUG_KMEMLEAK
83 select HAVE_KERNEL_GZIP 85 select HAVE_KERNEL_GZIP
84 select HAVE_KERNEL_BZIP2 86 select HAVE_KERNEL_BZIP2
85 select HAVE_KERNEL_LZMA 87 select HAVE_KERNEL_LZMA
@@ -126,6 +128,7 @@ config S390
126 select ARCH_INLINE_WRITE_UNLOCK_BH 128 select ARCH_INLINE_WRITE_UNLOCK_BH
127 select ARCH_INLINE_WRITE_UNLOCK_IRQ 129 select ARCH_INLINE_WRITE_UNLOCK_IRQ
128 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 130 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
131 select HAVE_UID16 if 32BIT
129 select ARCH_WANT_IPC_PARSE_VERSION 132 select ARCH_WANT_IPC_PARSE_VERSION
130 select GENERIC_SMP_IDLE_THREAD 133 select GENERIC_SMP_IDLE_THREAD
131 select GENERIC_TIME_VSYSCALL 134 select GENERIC_TIME_VSYSCALL
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 2d6e6e380564..593753ee07f3 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -33,6 +33,7 @@ static inline int prepare_hugepage_range(struct file *file,
33} 33}
34 34
35#define hugetlb_prefault_arch_hook(mm) do { } while (0) 35#define hugetlb_prefault_arch_hook(mm) do { } while (0)
36#define arch_clear_hugepage_flags(page) do { } while (0)
36 37
37int arch_prepare_hugepage(struct page *page); 38int arch_prepare_hugepage(struct page *page);
38void arch_release_hugepage(struct page *page); 39void arch_release_hugepage(struct page *page);
@@ -77,23 +78,6 @@ static inline void __pmd_csp(pmd_t *pmdp)
77 " csp %1,%3" 78 " csp %1,%3"
78 : "=m" (*pmdp) 79 : "=m" (*pmdp)
79 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); 80 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
80 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
81}
82
83static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
84{
85 unsigned long sto = (unsigned long) pmdp -
86 pmd_index(address) * sizeof(pmd_t);
87
88 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
89 asm volatile(
90 " .insn rrf,0xb98e0000,%2,%3,0,0"
91 : "=m" (*pmdp)
92 : "m" (*pmdp), "a" (sto),
93 "a" ((address & HPAGE_MASK))
94 );
95 }
96 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
97} 81}
98 82
99static inline void huge_ptep_invalidate(struct mm_struct *mm, 83static inline void huge_ptep_invalidate(struct mm_struct *mm,
@@ -105,6 +89,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
105 __pmd_idte(address, pmdp); 89 __pmd_idte(address, pmdp);
106 else 90 else
107 __pmd_csp(pmdp); 91 __pmd_csp(pmdp);
92 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
108} 93}
109 94
110static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 95static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 6bd7d7483017..979fe3dc0788 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -42,6 +42,7 @@ extern void fault_init(void);
42 * tables contain all the necessary information. 42 * tables contain all the necessary information.
43 */ 43 */
44#define update_mmu_cache(vma, address, ptep) do { } while (0) 44#define update_mmu_cache(vma, address, ptep) do { } while (0)
45#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
45 46
46/* 47/*
47 * ZERO_PAGE is a global shared page that is always zero; used 48 * ZERO_PAGE is a global shared page that is always zero; used
@@ -347,6 +348,12 @@ extern struct page *vmemmap;
347 348
348#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 349#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
349#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 350#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
351#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
352#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
353
354/* Set of bits not changed in pmd_modify */
355#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
356 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
350 357
351/* Page status table bits for virtualization */ 358/* Page status table bits for virtualization */
352#define RCP_ACC_BITS 0xf000000000000000UL 359#define RCP_ACC_BITS 0xf000000000000000UL
@@ -506,6 +513,30 @@ static inline int pmd_bad(pmd_t pmd)
506 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; 513 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
507} 514}
508 515
516#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
517extern void pmdp_splitting_flush(struct vm_area_struct *vma,
518 unsigned long addr, pmd_t *pmdp);
519
520#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
521extern int pmdp_set_access_flags(struct vm_area_struct *vma,
522 unsigned long address, pmd_t *pmdp,
523 pmd_t entry, int dirty);
524
525#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
526extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
527 unsigned long address, pmd_t *pmdp);
528
529#define __HAVE_ARCH_PMD_WRITE
530static inline int pmd_write(pmd_t pmd)
531{
532 return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
533}
534
535static inline int pmd_young(pmd_t pmd)
536{
537 return 0;
538}
539
509static inline int pte_none(pte_t pte) 540static inline int pte_none(pte_t pte)
510{ 541{
511 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 542 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
@@ -1159,6 +1190,185 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1159#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1190#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1160#define pte_unmap(pte) do { } while (0) 1191#define pte_unmap(pte) do { } while (0)
1161 1192
1193static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1194{
1195 unsigned long sto = (unsigned long) pmdp -
1196 pmd_index(address) * sizeof(pmd_t);
1197
1198 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
1199 asm volatile(
1200 " .insn rrf,0xb98e0000,%2,%3,0,0"
1201 : "=m" (*pmdp)
1202 : "m" (*pmdp), "a" (sto),
1203 "a" ((address & HPAGE_MASK))
1204 : "cc"
1205 );
1206 }
1207}
1208
1209#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1210#define __HAVE_ARCH_PGTABLE_DEPOSIT
1211extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1212
1213#define __HAVE_ARCH_PGTABLE_WITHDRAW
1214extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1215
1216static inline int pmd_trans_splitting(pmd_t pmd)
1217{
1218 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1219}
1220
1221static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1222 pmd_t *pmdp, pmd_t entry)
1223{
1224 *pmdp = entry;
1225}
1226
1227static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1228{
1229 unsigned long pgprot_pmd = 0;
1230
1231 if (pgprot_val(pgprot) & _PAGE_INVALID) {
1232 if (pgprot_val(pgprot) & _PAGE_SWT)
1233 pgprot_pmd |= _HPAGE_TYPE_NONE;
1234 pgprot_pmd |= _SEGMENT_ENTRY_INV;
1235 }
1236 if (pgprot_val(pgprot) & _PAGE_RO)
1237 pgprot_pmd |= _SEGMENT_ENTRY_RO;
1238 return pgprot_pmd;
1239}
1240
1241static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1242{
1243 pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1244 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1245 return pmd;
1246}
1247
1248static inline pmd_t pmd_mkhuge(pmd_t pmd)
1249{
1250 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1251 return pmd;
1252}
1253
1254static inline pmd_t pmd_mkwrite(pmd_t pmd)
1255{
1256 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
1257 return pmd;
1258}
1259
1260static inline pmd_t pmd_wrprotect(pmd_t pmd)
1261{
1262 pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
1263 return pmd;
1264}
1265
1266static inline pmd_t pmd_mkdirty(pmd_t pmd)
1267{
1268 /* No dirty bit in the segment table entry. */
1269 return pmd;
1270}
1271
1272static inline pmd_t pmd_mkold(pmd_t pmd)
1273{
1274 /* No referenced bit in the segment table entry. */
1275 return pmd;
1276}
1277
1278static inline pmd_t pmd_mkyoung(pmd_t pmd)
1279{
1280 /* No referenced bit in the segment table entry. */
1281 return pmd;
1282}
1283
1284#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1285static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1286 unsigned long address, pmd_t *pmdp)
1287{
1288 unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
1289 long tmp, rc;
1290 int counter;
1291
1292 rc = 0;
1293 if (MACHINE_HAS_RRBM) {
1294 counter = PTRS_PER_PTE >> 6;
1295 asm volatile(
1296 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
1297 " ogr %1,%0\n"
1298 " la %3,0(%4,%3)\n"
1299 " brct %2,0b\n"
1300 : "=&d" (tmp), "+&d" (rc), "+d" (counter),
1301 "+a" (pmd_addr)
1302 : "a" (64 * 4096UL) : "cc");
1303 rc = !!rc;
1304 } else {
1305 counter = PTRS_PER_PTE;
1306 asm volatile(
1307 "0: rrbe 0,%2\n"
1308 " la %2,0(%3,%2)\n"
1309 " brc 12,1f\n"
1310 " lhi %0,1\n"
1311 "1: brct %1,0b\n"
1312 : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
1313 : "a" (4096UL) : "cc");
1314 }
1315 return rc;
1316}
1317
1318#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1319static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1320 unsigned long address, pmd_t *pmdp)
1321{
1322 pmd_t pmd = *pmdp;
1323
1324 __pmd_idte(address, pmdp);
1325 pmd_clear(pmdp);
1326 return pmd;
1327}
1328
1329#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1330static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1331 unsigned long address, pmd_t *pmdp)
1332{
1333 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1334}
1335
1336#define __HAVE_ARCH_PMDP_INVALIDATE
1337static inline void pmdp_invalidate(struct vm_area_struct *vma,
1338 unsigned long address, pmd_t *pmdp)
1339{
1340 __pmd_idte(address, pmdp);
1341}
1342
1343static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1344{
1345 pmd_t __pmd;
1346 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1347 return __pmd;
1348}
1349
1350#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1351#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1352
1353static inline int pmd_trans_huge(pmd_t pmd)
1354{
1355 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1356}
1357
1358static inline int has_transparent_hugepage(void)
1359{
1360 return MACHINE_HAS_HPAGE ? 1 : 0;
1361}
1362
1363static inline unsigned long pmd_pfn(pmd_t pmd)
1364{
1365 if (pmd_trans_huge(pmd))
1366 return pmd_val(pmd) >> HPAGE_SHIFT;
1367 else
1368 return pmd_val(pmd) >> PAGE_SHIFT;
1369}
1370#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1371
1162/* 1372/*
1163 * 31 bit swap entry format: 1373 * 31 bit swap entry format:
1164 * A page-table entry has some bits we have to treat in a special way. 1374 * A page-table entry has some bits we have to treat in a special way.
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 87b47ca954f1..8cfd731a18d8 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -81,6 +81,7 @@ extern unsigned int s390_user_mode;
81#define MACHINE_FLAG_SPP (1UL << 13) 81#define MACHINE_FLAG_SPP (1UL << 13)
82#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 82#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
83#define MACHINE_FLAG_TE (1UL << 15) 83#define MACHINE_FLAG_TE (1UL << 15)
84#define MACHINE_FLAG_RRBM (1UL << 16)
84 85
85#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 86#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
86#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 87#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -99,7 +100,8 @@ extern unsigned int s390_user_mode;
99#define MACHINE_HAS_PFMF (0) 100#define MACHINE_HAS_PFMF (0)
100#define MACHINE_HAS_SPP (0) 101#define MACHINE_HAS_SPP (0)
101#define MACHINE_HAS_TOPOLOGY (0) 102#define MACHINE_HAS_TOPOLOGY (0)
102#define MACHINE_HAS_TE (0) 103#define MACHINE_HAS_TE (0)
104#define MACHINE_HAS_RRBM (0)
103#else /* CONFIG_64BIT */ 105#else /* CONFIG_64BIT */
104#define MACHINE_HAS_IEEE (1) 106#define MACHINE_HAS_IEEE (1)
105#define MACHINE_HAS_CSP (1) 107#define MACHINE_HAS_CSP (1)
@@ -112,6 +114,7 @@ extern unsigned int s390_user_mode;
112#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 114#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
113#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 115#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
114#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 116#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
117#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
115#endif /* CONFIG_64BIT */ 118#endif /* CONFIG_64BIT */
116 119
117#define ZFCPDUMP_HSA_SIZE (32UL<<20) 120#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 06e5acbc84bd..b75d7d686684 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -137,6 +137,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
137#define tlb_start_vma(tlb, vma) do { } while (0) 137#define tlb_start_vma(tlb, vma) do { } while (0)
138#define tlb_end_vma(tlb, vma) do { } while (0) 138#define tlb_end_vma(tlb, vma) do { } while (0)
139#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) 139#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
140#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
140#define tlb_migrate_finish(mm) do { } while (0) 141#define tlb_migrate_finish(mm) do { } while (0)
141 142
142#endif /* _S390_TLB_H */ 143#endif /* _S390_TLB_H */
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 7f4717675c19..00d114445068 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -388,6 +388,8 @@ static __init void detect_machine_facilities(void)
388 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 388 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
389 if (test_facility(50) && test_facility(73)) 389 if (test_facility(50) && test_facility(73))
390 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 390 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
391 if (test_facility(66))
392 S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
391#endif 393#endif
392} 394}
393 395
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ac9122ca1152..04ad4001a289 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -367,6 +367,7 @@ retry:
367 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 367 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
368 * of starvation. */ 368 * of starvation. */
369 flags &= ~FAULT_FLAG_ALLOW_RETRY; 369 flags &= ~FAULT_FLAG_ALLOW_RETRY;
370 flags |= FAULT_FLAG_TRIED;
370 down_read(&mm->mmap_sem); 371 down_read(&mm->mmap_sem);
371 goto retry; 372 goto retry;
372 } 373 }
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index eeaf8023851f..60acb93a4680 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -115,7 +115,16 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
115 pmd = *pmdp; 115 pmd = *pmdp;
116 barrier(); 116 barrier();
117 next = pmd_addr_end(addr, end); 117 next = pmd_addr_end(addr, end);
118 if (pmd_none(pmd)) 118 /*
119 * The pmd_trans_splitting() check below explains why
120 * pmdp_splitting_flush() has to serialize with
121 * smp_call_function() against our disabled IRQs, to stop
122 * this gup-fast code from running while we set the
123 * splitting bit in the pmd. Returning zero will take
124 * the slow path that will call wait_split_huge_page()
125 * if the pmd is still in splitting state.
126 */
127 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
119 return 0; 128 return 0;
120 if (unlikely(pmd_huge(pmd))) { 129 if (unlikely(pmd_huge(pmd))) {
121 if (!gup_huge_pmd(pmdp, pmd, addr, next, 130 if (!gup_huge_pmd(pmdp, pmd, addr, next,
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b402991e43d7..c8188a18af05 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -787,6 +787,30 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
787 tlb_table_flush(tlb); 787 tlb_table_flush(tlb);
788} 788}
789 789
790#ifdef CONFIG_TRANSPARENT_HUGEPAGE
791void thp_split_vma(struct vm_area_struct *vma)
792{
793 unsigned long addr;
794 struct page *page;
795
796 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
797 page = follow_page(vma, addr, FOLL_SPLIT);
798 }
799}
800
801void thp_split_mm(struct mm_struct *mm)
802{
803 struct vm_area_struct *vma = mm->mmap;
804
805 while (vma != NULL) {
806 thp_split_vma(vma);
807 vma->vm_flags &= ~VM_HUGEPAGE;
808 vma->vm_flags |= VM_NOHUGEPAGE;
809 vma = vma->vm_next;
810 }
811}
812#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
813
790/* 814/*
791 * switch on pgstes for its userspace process (for kvm) 815 * switch on pgstes for its userspace process (for kvm)
792 */ 816 */
@@ -824,6 +848,12 @@ int s390_enable_sie(void)
824 if (!mm) 848 if (!mm)
825 return -ENOMEM; 849 return -ENOMEM;
826 850
851#ifdef CONFIG_TRANSPARENT_HUGEPAGE
852 /* split thp mappings and disable thp for future mappings */
853 thp_split_mm(mm);
854 mm->def_flags |= VM_NOHUGEPAGE;
855#endif
856
827 /* Now lets check again if something happened */ 857 /* Now lets check again if something happened */
828 task_lock(tsk); 858 task_lock(tsk);
829 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || 859 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
@@ -866,3 +896,81 @@ bool kernel_page_present(struct page *page)
866 return cc == 0; 896 return cc == 0;
867} 897}
868#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ 898#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
899
900#ifdef CONFIG_TRANSPARENT_HUGEPAGE
901int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
902 pmd_t *pmdp)
903{
904 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
905 /* No need to flush TLB
906 * On s390 reference bits are in storage key and never in TLB */
907 return pmdp_test_and_clear_young(vma, address, pmdp);
908}
909
910int pmdp_set_access_flags(struct vm_area_struct *vma,
911 unsigned long address, pmd_t *pmdp,
912 pmd_t entry, int dirty)
913{
914 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
915
916 if (pmd_same(*pmdp, entry))
917 return 0;
918 pmdp_invalidate(vma, address, pmdp);
919 set_pmd_at(vma->vm_mm, address, pmdp, entry);
920 return 1;
921}
922
923static void pmdp_splitting_flush_sync(void *arg)
924{
925 /* Simply deliver the interrupt */
926}
927
928void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
929 pmd_t *pmdp)
930{
931 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
932 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
933 (unsigned long *) pmdp)) {
934 /* need to serialize against gup-fast (IRQ disabled) */
935 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
936 }
937}
938
939void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
940{
941 struct list_head *lh = (struct list_head *) pgtable;
942
943 assert_spin_locked(&mm->page_table_lock);
944
945 /* FIFO */
946 if (!mm->pmd_huge_pte)
947 INIT_LIST_HEAD(lh);
948 else
949 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
950 mm->pmd_huge_pte = pgtable;
951}
952
953pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
954{
955 struct list_head *lh;
956 pgtable_t pgtable;
957 pte_t *ptep;
958
959 assert_spin_locked(&mm->page_table_lock);
960
961 /* FIFO */
962 pgtable = mm->pmd_huge_pte;
963 lh = (struct list_head *) pgtable;
964 if (list_empty(lh))
965 mm->pmd_huge_pte = NULL;
966 else {
967 mm->pmd_huge_pte = (pgtable_t) lh->next;
968 list_del(lh);
969 }
970 ptep = (pte_t *) pgtable;
971 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
972 ptep++;
973 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
974 return pgtable;
975}
976#endif /* CONFIG_TRANSPARENT_HUGEPAGE */