aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2013-07-23 14:57:57 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-08-22 06:20:06 -0400
commite509861105a3c1425f3f929bd631f88340b499bf (patch)
tree0616b1c17c1f88dfb63a3bce0774a3e518f49119 /arch
parent416fd0ffb14afead5b1feea14bbf33c2277942ef (diff)
s390/mm: cleanup page table definitions
Improve the encoding of the different pte types and the naming of the page, segment table and region table bits. Due to the different pte encoding the hugetlbfs primitives need to be adapted as well. To improve compatability with common code make the huge ptes use the encoding of normal ptes. The conversion between the pte and pmd encoding for a huge pte is done with set_huge_pte_at and huge_ptep_get. Overall the code is now easier to understand. Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/hugetlb.h135
-rw-r--r--arch/s390/include/asm/pgtable.h300
-rw-r--r--arch/s390/kernel/vdso.c4
-rw-r--r--arch/s390/lib/uaccess_pt.c16
-rw-r--r--arch/s390/mm/dump_pagetables.c18
-rw-r--r--arch/s390/mm/gup.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c104
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c59
-rw-r--r--arch/s390/mm/vmem.c14
10 files changed, 333 insertions, 325 deletions
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index bd90359d6d22..11eae5f55b70 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -17,6 +17,9 @@
17 17
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte); 19 pte_t *ptep, pte_t pte);
20pte_t huge_ptep_get(pte_t *ptep);
21pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
22 unsigned long addr, pte_t *ptep);
20 23
21/* 24/*
22 * If the arch doesn't supply something else, assume that hugepage 25 * If the arch doesn't supply something else, assume that hugepage
@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file,
38int arch_prepare_hugepage(struct page *page); 41int arch_prepare_hugepage(struct page *page);
39void arch_release_hugepage(struct page *page); 42void arch_release_hugepage(struct page *page);
40 43
41static inline pte_t huge_pte_wrprotect(pte_t pte) 44static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep)
42{ 46{
43 pte_val(pte) |= _PAGE_RO; 47 pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
44 return pte;
45} 48}
46 49
47static inline int huge_pte_none(pte_t pte) 50static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
51 unsigned long address, pte_t *ptep)
48{ 52{
49 return (pte_val(pte) & _SEGMENT_ENTRY_INV) && 53 huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
50 !(pte_val(pte) & _SEGMENT_ENTRY_RO);
51} 54}
52 55
53static inline pte_t huge_ptep_get(pte_t *ptep) 56static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
57 unsigned long addr, pte_t *ptep,
58 pte_t pte, int dirty)
54{ 59{
55 pte_t pte = *ptep; 60 int changed = !pte_same(huge_ptep_get(ptep), pte);
56 unsigned long mask; 61 if (changed) {
57 62 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
58 if (!MACHINE_HAS_HPAGE) { 63 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
59 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
60 if (ptep) {
61 mask = pte_val(pte) &
62 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
63 pte = pte_mkhuge(*ptep);
64 pte_val(pte) |= mask;
65 }
66 } 64 }
67 return pte; 65 return changed;
68} 66}
69 67
70static inline void __pmd_csp(pmd_t *pmdp) 68static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
69 unsigned long addr, pte_t *ptep)
71{ 70{
72 register unsigned long reg2 asm("2") = pmd_val(*pmdp); 71 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
73 register unsigned long reg3 asm("3") = pmd_val(*pmdp) | 72 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
74 _SEGMENT_ENTRY_INV;
75 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
76
77 asm volatile(
78 " csp %1,%3"
79 : "=m" (*pmdp)
80 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
81} 73}
82 74
83static inline void huge_ptep_invalidate(struct mm_struct *mm, 75static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
84 unsigned long address, pte_t *ptep)
85{
86 pmd_t *pmdp = (pmd_t *) ptep;
87
88 if (MACHINE_HAS_IDTE)
89 __pmd_idte(address, pmdp);
90 else
91 __pmd_csp(pmdp);
92 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
93}
94
95static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
96 unsigned long addr, pte_t *ptep)
97{
98 pte_t pte = huge_ptep_get(ptep);
99
100 huge_ptep_invalidate(mm, addr, ptep);
101 return pte;
102}
103
104#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
105({ \
106 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
107 if (__changed) { \
108 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
109 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
110 } \
111 __changed; \
112})
113
114#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
115({ \
116 pte_t __pte = huge_ptep_get(__ptep); \
117 if (huge_pte_write(__pte)) { \
118 huge_ptep_invalidate(__mm, __addr, __ptep); \
119 set_huge_pte_at(__mm, __addr, __ptep, \
120 huge_pte_wrprotect(__pte)); \
121 } \
122})
123
124static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
125 unsigned long address, pte_t *ptep)
126{ 76{
127 huge_ptep_invalidate(vma->vm_mm, address, ptep); 77 return mk_pte(page, pgprot);
128} 78}
129 79
130static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) 80static inline int huge_pte_none(pte_t pte)
131{ 81{
132 pte_t pte; 82 return pte_none(pte);
133 pmd_t pmd;
134
135 pmd = mk_pmd_phys(page_to_phys(page), pgprot);
136 pte_val(pte) = pmd_val(pmd);
137 return pte;
138} 83}
139 84
140static inline int huge_pte_write(pte_t pte) 85static inline int huge_pte_write(pte_t pte)
141{ 86{
142 pmd_t pmd; 87 return pte_write(pte);
143
144 pmd_val(pmd) = pte_val(pte);
145 return pmd_write(pmd);
146} 88}
147 89
148static inline int huge_pte_dirty(pte_t pte) 90static inline int huge_pte_dirty(pte_t pte)
149{ 91{
150 /* No dirty bit in the segment table entry. */ 92 return pte_dirty(pte);
151 return 0;
152} 93}
153 94
154static inline pte_t huge_pte_mkwrite(pte_t pte) 95static inline pte_t huge_pte_mkwrite(pte_t pte)
155{ 96{
156 pmd_t pmd; 97 return pte_mkwrite(pte);
157
158 pmd_val(pmd) = pte_val(pte);
159 pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
160 return pte;
161} 98}
162 99
163static inline pte_t huge_pte_mkdirty(pte_t pte) 100static inline pte_t huge_pte_mkdirty(pte_t pte)
164{ 101{
165 /* No dirty bit in the segment table entry. */ 102 return pte_mkdirty(pte);
166 return pte;
167} 103}
168 104
169static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) 105static inline pte_t huge_pte_wrprotect(pte_t pte)
170{ 106{
171 pmd_t pmd; 107 return pte_wrprotect(pte);
172
173 pmd_val(pmd) = pte_val(pte);
174 pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
175 return pte;
176} 108}
177 109
178static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, 110static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
179 pte_t *ptep)
180{ 111{
181 pmd_clear((pmd_t *) ptep); 112 return pte_modify(pte, newprot);
182} 113}
183 114
184#endif /* _ASM_S390_HUGETLB_H */ 115#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 75fb726de91f..b09c00b5cfa2 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -217,63 +217,50 @@ extern unsigned long MODULES_END;
217 217
218/* Hardware bits in the page table entry */ 218/* Hardware bits in the page table entry */
219#define _PAGE_CO 0x100 /* HW Change-bit override */ 219#define _PAGE_CO 0x100 /* HW Change-bit override */
220#define _PAGE_RO 0x200 /* HW read-only bit */ 220#define _PAGE_PROTECT 0x200 /* HW read-only bit */
221#define _PAGE_INVALID 0x400 /* HW invalid bit */ 221#define _PAGE_INVALID 0x400 /* HW invalid bit */
222#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
222 223
223/* Software bits in the page table entry */ 224/* Software bits in the page table entry */
224#define _PAGE_SWT 0x001 /* SW pte type bit t */ 225#define _PAGE_PRESENT 0x001 /* SW pte present bit */
225#define _PAGE_SWX 0x002 /* SW pte type bit x */ 226#define _PAGE_TYPE 0x002 /* SW pte type bit */
226#define _PAGE_SWC 0x004 /* SW pte changed bit */ 227#define _PAGE_YOUNG 0x004 /* SW pte young bit */
227#define _PAGE_SWR 0x008 /* SW pte referenced bit */ 228#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
228#define _PAGE_SWW 0x010 /* SW pte write bit */ 229#define _PAGE_WRITE 0x010 /* SW pte write bit */
229#define _PAGE_SPECIAL 0x020 /* SW associated with special page */ 230#define _PAGE_SPECIAL 0x020 /* SW associated with special page */
230#define __HAVE_ARCH_PTE_SPECIAL 231#define __HAVE_ARCH_PTE_SPECIAL
231 232
232/* Set of bits not changed in pte_modify */ 233/* Set of bits not changed in pte_modify */
233#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ 234#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
234 _PAGE_SWC | _PAGE_SWR) 235 _PAGE_DIRTY | _PAGE_YOUNG)
235
236/* Six different types of pages. */
237#define _PAGE_TYPE_EMPTY 0x400
238#define _PAGE_TYPE_NONE 0x401
239#define _PAGE_TYPE_SWAP 0x403
240#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
241#define _PAGE_TYPE_RO 0x200
242#define _PAGE_TYPE_RW 0x000
243
244/*
245 * Only four types for huge pages, using the invalid bit and protection bit
246 * of a segment table entry.
247 */
248#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
249#define _HPAGE_TYPE_NONE 0x220
250#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
251#define _HPAGE_TYPE_RW 0x000
252 236
253/* 237/*
254 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 238 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
255 * pte_none and pte_file to find out the pte type WITHOUT holding the page 239 * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
256 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to 240 * is used to distinguish present from not-present ptes. It is changed only
257 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs 241 * with the page table lock held.
258 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
259 * This change is done while holding the lock, but the intermediate step
260 * of a previously valid pte with the hw invalid bit set can be observed by
261 * handle_pte_fault. That makes it necessary that all valid pte types with
262 * the hw invalid bit set must be distinguishable from the four pte types
263 * empty, none, swap and file.
264 * 242 *
265 * irxt ipte irxt 243 * The following table gives the different possible bit combinations for
266 * _PAGE_TYPE_EMPTY 1000 -> 1000 244 * the pte hardware and software bits in the last 12 bits of a pte:
267 * _PAGE_TYPE_NONE 1001 -> 1001
268 * _PAGE_TYPE_SWAP 1011 -> 1011
269 * _PAGE_TYPE_FILE 11?1 -> 11?1
270 * _PAGE_TYPE_RO 0100 -> 1100
271 * _PAGE_TYPE_RW 0000 -> 1000
272 * 245 *
273 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 246 * 842100000000
274 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 247 * 000084210000
275 * pte_file is true for bits combinations 1101, 1111 248 * 000000008421
276 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 249 * .IR....wdytp
250 * empty .10....00000
251 * swap .10....xxx10
252 * file .11....xxxx0
253 * prot-none, clean .11....00x01
254 * prot-none, dirty .10....01x01
255 * read-only, clean .01....00x01
256 * read-only, dirty .01....01x01
257 * read-write, clean .01....10x01
258 * read-write, dirty .00....11x01
259 *
260 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
261 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
262 * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
263 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
277 */ 264 */
278 265
279#ifndef CONFIG_64BIT 266#ifndef CONFIG_64BIT
@@ -287,13 +274,13 @@ extern unsigned long MODULES_END;
287 274
288/* Bits in the segment table entry */ 275/* Bits in the segment table entry */
289#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 276#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
290#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 277#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
291#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 278#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
292#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 279#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
293#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 280#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
294 281
295#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 282#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
296#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 283#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
297 284
298/* Page status table bits for virtualization */ 285/* Page status table bits for virtualization */
299#define PGSTE_ACC_BITS 0xf0000000UL 286#define PGSTE_ACC_BITS 0xf0000000UL
@@ -324,8 +311,8 @@ extern unsigned long MODULES_END;
324 311
325/* Bits in the region table entry */ 312/* Bits in the region table entry */
326#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 313#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
327#define _REGION_ENTRY_RO 0x200 /* region protection bit */ 314#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
328#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 315#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
329#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 316#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
330#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 317#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
331#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 318#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
@@ -333,11 +320,11 @@ extern unsigned long MODULES_END;
333#define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 320#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
334 321
335#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 322#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
336#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) 323#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
337#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 324#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
338#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) 325#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
339#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 326#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
340#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) 327#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
341 328
342#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ 329#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
343#define _REGION3_ENTRY_RO 0x200 /* page protection bit */ 330#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
@@ -346,16 +333,17 @@ extern unsigned long MODULES_END;
346/* Bits in the segment table entry */ 333/* Bits in the segment table entry */
347#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 334#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
348#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 335#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
349#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 336#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
350#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 337#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
351 338
352#define _SEGMENT_ENTRY (0) 339#define _SEGMENT_ENTRY (0)
353#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 340#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
354 341
355#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 342#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
356#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 343#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
344#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
345
357#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ 346#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
358#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
359 347
360/* Set of bits not changed in pmd_modify */ 348/* Set of bits not changed in pmd_modify */
361#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ 349#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
@@ -386,14 +374,13 @@ extern unsigned long MODULES_END;
386/* 374/*
387 * Page protection definitions. 375 * Page protection definitions.
388 */ 376 */
389#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 377#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
390#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 378#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
391#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) 379#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_PROTECT)
392#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
393 380
394#define PAGE_KERNEL PAGE_RWC 381#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
395#define PAGE_SHARED PAGE_KERNEL 382#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
396#define PAGE_COPY PAGE_RO 383#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
397 384
398/* 385/*
399 * On s390 the page table entry has an invalid bit and a read-only bit. 386 * On s390 the page table entry has an invalid bit and a read-only bit.
@@ -402,29 +389,30 @@ extern unsigned long MODULES_END;
402 */ 389 */
403 /*xwr*/ 390 /*xwr*/
404#define __P000 PAGE_NONE 391#define __P000 PAGE_NONE
405#define __P001 PAGE_RO 392#define __P001 PAGE_READ
406#define __P010 PAGE_RO 393#define __P010 PAGE_READ
407#define __P011 PAGE_RO 394#define __P011 PAGE_READ
408#define __P100 PAGE_RO 395#define __P100 PAGE_READ
409#define __P101 PAGE_RO 396#define __P101 PAGE_READ
410#define __P110 PAGE_RO 397#define __P110 PAGE_READ
411#define __P111 PAGE_RO 398#define __P111 PAGE_READ
412 399
413#define __S000 PAGE_NONE 400#define __S000 PAGE_NONE
414#define __S001 PAGE_RO 401#define __S001 PAGE_READ
415#define __S010 PAGE_RW 402#define __S010 PAGE_WRITE
416#define __S011 PAGE_RW 403#define __S011 PAGE_WRITE
417#define __S100 PAGE_RO 404#define __S100 PAGE_READ
418#define __S101 PAGE_RO 405#define __S101 PAGE_READ
419#define __S110 PAGE_RW 406#define __S110 PAGE_WRITE
420#define __S111 PAGE_RW 407#define __S111 PAGE_WRITE
421 408
422/* 409/*
423 * Segment entry (large page) protection definitions. 410 * Segment entry (large page) protection definitions.
424 */ 411 */
425#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) 412#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
426#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) 413 _SEGMENT_ENTRY_PROTECT)
427#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) 414#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT)
415#define SEGMENT_WRITE __pgprot(0)
428 416
429static inline int mm_exclusive(struct mm_struct *mm) 417static inline int mm_exclusive(struct mm_struct *mm)
430{ 418{
@@ -467,7 +455,7 @@ static inline int pgd_none(pgd_t pgd)
467{ 455{
468 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 456 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
469 return 0; 457 return 0;
470 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; 458 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
471} 459}
472 460
473static inline int pgd_bad(pgd_t pgd) 461static inline int pgd_bad(pgd_t pgd)
@@ -478,7 +466,7 @@ static inline int pgd_bad(pgd_t pgd)
478 * invalid for either table entry. 466 * invalid for either table entry.
479 */ 467 */
480 unsigned long mask = 468 unsigned long mask =
481 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 469 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
482 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 470 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
483 return (pgd_val(pgd) & mask) != 0; 471 return (pgd_val(pgd) & mask) != 0;
484} 472}
@@ -494,7 +482,7 @@ static inline int pud_none(pud_t pud)
494{ 482{
495 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 483 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
496 return 0; 484 return 0;
497 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 485 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
498} 486}
499 487
500static inline int pud_large(pud_t pud) 488static inline int pud_large(pud_t pud)
@@ -512,7 +500,7 @@ static inline int pud_bad(pud_t pud)
512 * invalid for either table entry. 500 * invalid for either table entry.
513 */ 501 */
514 unsigned long mask = 502 unsigned long mask =
515 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 503 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
516 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 504 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
517 return (pud_val(pud) & mask) != 0; 505 return (pud_val(pud) & mask) != 0;
518} 506}
@@ -521,21 +509,18 @@ static inline int pud_bad(pud_t pud)
521 509
522static inline int pmd_present(pmd_t pmd) 510static inline int pmd_present(pmd_t pmd)
523{ 511{
524 unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; 512 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
525 return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
526 !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
527} 513}
528 514
529static inline int pmd_none(pmd_t pmd) 515static inline int pmd_none(pmd_t pmd)
530{ 516{
531 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && 517 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
532 !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
533} 518}
534 519
535static inline int pmd_large(pmd_t pmd) 520static inline int pmd_large(pmd_t pmd)
536{ 521{
537#ifdef CONFIG_64BIT 522#ifdef CONFIG_64BIT
538 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); 523 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
539#else 524#else
540 return 0; 525 return 0;
541#endif 526#endif
@@ -543,7 +528,7 @@ static inline int pmd_large(pmd_t pmd)
543 528
544static inline int pmd_bad(pmd_t pmd) 529static inline int pmd_bad(pmd_t pmd)
545{ 530{
546 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; 531 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INVALID;
547 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; 532 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
548} 533}
549 534
@@ -563,7 +548,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
563#define __HAVE_ARCH_PMD_WRITE 548#define __HAVE_ARCH_PMD_WRITE
564static inline int pmd_write(pmd_t pmd) 549static inline int pmd_write(pmd_t pmd)
565{ 550{
566 return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; 551 return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
567} 552}
568 553
569static inline int pmd_young(pmd_t pmd) 554static inline int pmd_young(pmd_t pmd)
@@ -571,23 +556,23 @@ static inline int pmd_young(pmd_t pmd)
571 return 0; 556 return 0;
572} 557}
573 558
574static inline int pte_none(pte_t pte) 559static inline int pte_present(pte_t pte)
575{ 560{
576 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 561 /* Bit pattern: (pte & 0x001) == 0x001 */
562 return (pte_val(pte) & _PAGE_PRESENT) != 0;
577} 563}
578 564
579static inline int pte_present(pte_t pte) 565static inline int pte_none(pte_t pte)
580{ 566{
581 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; 567 /* Bit pattern: pte == 0x400 */
582 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || 568 return pte_val(pte) == _PAGE_INVALID;
583 (!(pte_val(pte) & _PAGE_INVALID) &&
584 !(pte_val(pte) & _PAGE_SWT));
585} 569}
586 570
587static inline int pte_file(pte_t pte) 571static inline int pte_file(pte_t pte)
588{ 572{
589 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; 573 /* Bit pattern: (pte & 0x601) == 0x600 */
590 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 574 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
575 == (_PAGE_INVALID | _PAGE_PROTECT);
591} 576}
592 577
593static inline int pte_special(pte_t pte) 578static inline int pte_special(pte_t pte)
@@ -695,7 +680,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
695 /* Transfer referenced bit to kvm user bits and pte */ 680 /* Transfer referenced bit to kvm user bits and pte */
696 if (young) { 681 if (young) {
697 pgste_val(pgste) |= PGSTE_UR_BIT; 682 pgste_val(pgste) |= PGSTE_UR_BIT;
698 pte_val(*ptep) |= _PAGE_SWR; 683 pte_val(*ptep) |= _PAGE_YOUNG;
699 } 684 }
700#endif 685#endif
701 return pgste; 686 return pgste;
@@ -723,13 +708,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
723 708
724static inline void pgste_set_pte(pte_t *ptep, pte_t entry) 709static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
725{ 710{
726 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { 711 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
727 /* 712 /*
728 * Without enhanced suppression-on-protection force 713 * Without enhanced suppression-on-protection force
729 * the dirty bit on for all writable ptes. 714 * the dirty bit on for all writable ptes.
730 */ 715 */
731 pte_val(entry) |= _PAGE_SWC; 716 pte_val(entry) |= _PAGE_DIRTY;
732 pte_val(entry) &= ~_PAGE_RO; 717 pte_val(entry) &= ~_PAGE_PROTECT;
733 } 718 }
734 *ptep = entry; 719 *ptep = entry;
735} 720}
@@ -841,18 +826,18 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
841 */ 826 */
842static inline int pte_write(pte_t pte) 827static inline int pte_write(pte_t pte)
843{ 828{
844 return (pte_val(pte) & _PAGE_SWW) != 0; 829 return (pte_val(pte) & _PAGE_WRITE) != 0;
845} 830}
846 831
847static inline int pte_dirty(pte_t pte) 832static inline int pte_dirty(pte_t pte)
848{ 833{
849 return (pte_val(pte) & _PAGE_SWC) != 0; 834 return (pte_val(pte) & _PAGE_DIRTY) != 0;
850} 835}
851 836
852static inline int pte_young(pte_t pte) 837static inline int pte_young(pte_t pte)
853{ 838{
854#ifdef CONFIG_PGSTE 839#ifdef CONFIG_PGSTE
855 if (pte_val(pte) & _PAGE_SWR) 840 if (pte_val(pte) & _PAGE_YOUNG)
856 return 1; 841 return 1;
857#endif 842#endif
858 return 0; 843 return 0;
@@ -880,12 +865,12 @@ static inline void pud_clear(pud_t *pud)
880 865
881static inline void pmd_clear(pmd_t *pmdp) 866static inline void pmd_clear(pmd_t *pmdp)
882{ 867{
883 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 868 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
884} 869}
885 870
886static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 871static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
887{ 872{
888 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 873 pte_val(*ptep) = _PAGE_INVALID;
889} 874}
890 875
891/* 876/*
@@ -896,49 +881,45 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
896{ 881{
897 pte_val(pte) &= _PAGE_CHG_MASK; 882 pte_val(pte) &= _PAGE_CHG_MASK;
898 pte_val(pte) |= pgprot_val(newprot); 883 pte_val(pte) |= pgprot_val(newprot);
899 if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) 884 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
900 pte_val(pte) &= ~_PAGE_RO; 885 pte_val(pte) &= ~_PAGE_PROTECT;
901 return pte; 886 return pte;
902} 887}
903 888
904static inline pte_t pte_wrprotect(pte_t pte) 889static inline pte_t pte_wrprotect(pte_t pte)
905{ 890{
906 pte_val(pte) &= ~_PAGE_SWW; 891 pte_val(pte) &= ~_PAGE_WRITE;
907 /* Do not clobber _PAGE_TYPE_NONE pages! */ 892 pte_val(pte) |= _PAGE_PROTECT;
908 if (!(pte_val(pte) & _PAGE_INVALID))
909 pte_val(pte) |= _PAGE_RO;
910 return pte; 893 return pte;
911} 894}
912 895
913static inline pte_t pte_mkwrite(pte_t pte) 896static inline pte_t pte_mkwrite(pte_t pte)
914{ 897{
915 pte_val(pte) |= _PAGE_SWW; 898 pte_val(pte) |= _PAGE_WRITE;
916 if (pte_val(pte) & _PAGE_SWC) 899 if (pte_val(pte) & _PAGE_DIRTY)
917 pte_val(pte) &= ~_PAGE_RO; 900 pte_val(pte) &= ~_PAGE_PROTECT;
918 return pte; 901 return pte;
919} 902}
920 903
921static inline pte_t pte_mkclean(pte_t pte) 904static inline pte_t pte_mkclean(pte_t pte)
922{ 905{
923 pte_val(pte) &= ~_PAGE_SWC; 906 pte_val(pte) &= ~_PAGE_DIRTY;
924 /* Do not clobber _PAGE_TYPE_NONE pages! */ 907 pte_val(pte) |= _PAGE_PROTECT;
925 if (!(pte_val(pte) & _PAGE_INVALID))
926 pte_val(pte) |= _PAGE_RO;
927 return pte; 908 return pte;
928} 909}
929 910
930static inline pte_t pte_mkdirty(pte_t pte) 911static inline pte_t pte_mkdirty(pte_t pte)
931{ 912{
932 pte_val(pte) |= _PAGE_SWC; 913 pte_val(pte) |= _PAGE_DIRTY;
933 if (pte_val(pte) & _PAGE_SWW) 914 if (pte_val(pte) & _PAGE_WRITE)
934 pte_val(pte) &= ~_PAGE_RO; 915 pte_val(pte) &= ~_PAGE_PROTECT;
935 return pte; 916 return pte;
936} 917}
937 918
938static inline pte_t pte_mkold(pte_t pte) 919static inline pte_t pte_mkold(pte_t pte)
939{ 920{
940#ifdef CONFIG_PGSTE 921#ifdef CONFIG_PGSTE
941 pte_val(pte) &= ~_PAGE_SWR; 922 pte_val(pte) &= ~_PAGE_YOUNG;
942#endif 923#endif
943 return pte; 924 return pte;
944} 925}
@@ -957,7 +938,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
957#ifdef CONFIG_HUGETLB_PAGE 938#ifdef CONFIG_HUGETLB_PAGE
958static inline pte_t pte_mkhuge(pte_t pte) 939static inline pte_t pte_mkhuge(pte_t pte)
959{ 940{
960 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); 941 pte_val(pte) |= _PAGE_LARGE;
961 return pte; 942 return pte;
962} 943}
963#endif 944#endif
@@ -1076,7 +1057,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1076 pte = *ptep; 1057 pte = *ptep;
1077 if (!mm_exclusive(mm)) 1058 if (!mm_exclusive(mm))
1078 __ptep_ipte(address, ptep); 1059 __ptep_ipte(address, ptep);
1079 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1060 pte_val(*ptep) = _PAGE_INVALID;
1080 1061
1081 if (mm_has_pgste(mm)) { 1062 if (mm_has_pgste(mm)) {
1082 pgste = pgste_update_all(&pte, pgste); 1063 pgste = pgste_update_all(&pte, pgste);
@@ -1139,7 +1120,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1139 1120
1140 pte = *ptep; 1121 pte = *ptep;
1141 __ptep_ipte(address, ptep); 1122 __ptep_ipte(address, ptep);
1142 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1123 pte_val(*ptep) = _PAGE_INVALID;
1143 1124
1144 if (mm_has_pgste(vma->vm_mm)) { 1125 if (mm_has_pgste(vma->vm_mm)) {
1145 pgste = pgste_update_all(&pte, pgste); 1126 pgste = pgste_update_all(&pte, pgste);
@@ -1172,7 +1153,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1172 pte = *ptep; 1153 pte = *ptep;
1173 if (!full) 1154 if (!full)
1174 __ptep_ipte(address, ptep); 1155 __ptep_ipte(address, ptep);
1175 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1156 pte_val(*ptep) = _PAGE_INVALID;
1176 1157
1177 if (mm_has_pgste(mm)) { 1158 if (mm_has_pgste(mm)) {
1178 pgste = pgste_update_all(&pte, pgste); 1159 pgste = pgste_update_all(&pte, pgste);
@@ -1248,10 +1229,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1248 unsigned long physpage = page_to_phys(page); 1229 unsigned long physpage = page_to_phys(page);
1249 pte_t __pte = mk_pte_phys(physpage, pgprot); 1230 pte_t __pte = mk_pte_phys(physpage, pgprot);
1250 1231
1251 if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { 1232 if (pte_write(__pte) && PageDirty(page))
1252 pte_val(__pte) |= _PAGE_SWC; 1233 __pte = pte_mkdirty(__pte);
1253 pte_val(__pte) &= ~_PAGE_RO;
1254 }
1255 return __pte; 1234 return __pte;
1256} 1235}
1257 1236
@@ -1313,7 +1292,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1313 unsigned long sto = (unsigned long) pmdp - 1292 unsigned long sto = (unsigned long) pmdp -
1314 pmd_index(address) * sizeof(pmd_t); 1293 pmd_index(address) * sizeof(pmd_t);
1315 1294
1316 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { 1295 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
1317 asm volatile( 1296 asm volatile(
1318 " .insn rrf,0xb98e0000,%2,%3,0,0" 1297 " .insn rrf,0xb98e0000,%2,%3,0,0"
1319 : "=m" (*pmdp) 1298 : "=m" (*pmdp)
@@ -1324,18 +1303,31 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1324 } 1303 }
1325} 1304}
1326 1305
1306static inline void __pmd_csp(pmd_t *pmdp)
1307{
1308 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1309 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1310 _SEGMENT_ENTRY_INVALID;
1311 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1312
1313 asm volatile(
1314 " csp %1,%3"
1315 : "=m" (*pmdp)
1316 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1317}
1318
1327#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1319#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1328static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1320static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1329{ 1321{
1330 /* 1322 /*
1331 * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) 1323 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1332 * Convert to segment table entry format. 1324 * Convert to segment table entry format.
1333 */ 1325 */
1334 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1326 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1335 return pgprot_val(SEGMENT_NONE); 1327 return pgprot_val(SEGMENT_NONE);
1336 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1328 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1337 return pgprot_val(SEGMENT_RO); 1329 return pgprot_val(SEGMENT_READ);
1338 return pgprot_val(SEGMENT_RW); 1330 return pgprot_val(SEGMENT_WRITE);
1339} 1331}
1340 1332
1341static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1333static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
@@ -1354,9 +1346,9 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1354 1346
1355static inline pmd_t pmd_mkwrite(pmd_t pmd) 1347static inline pmd_t pmd_mkwrite(pmd_t pmd)
1356{ 1348{
1357 /* Do not clobber _HPAGE_TYPE_NONE pages! */ 1349 /* Do not clobber PROT_NONE pages! */
1358 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) 1350 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INVALID))
1359 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; 1351 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1360 return pmd; 1352 return pmd;
1361} 1353}
1362#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1354#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
@@ -1378,7 +1370,7 @@ static inline int pmd_trans_splitting(pmd_t pmd)
1378static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1370static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1379 pmd_t *pmdp, pmd_t entry) 1371 pmd_t *pmdp, pmd_t entry)
1380{ 1372{
1381 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) 1373 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
1382 pmd_val(entry) |= _SEGMENT_ENTRY_CO; 1374 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1383 *pmdp = entry; 1375 *pmdp = entry;
1384} 1376}
@@ -1391,7 +1383,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
1391 1383
1392static inline pmd_t pmd_wrprotect(pmd_t pmd) 1384static inline pmd_t pmd_wrprotect(pmd_t pmd)
1393{ 1385{
1394 pmd_val(pmd) |= _SEGMENT_ENTRY_RO; 1386 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1395 return pmd; 1387 return pmd;
1396} 1388}
1397 1389
@@ -1510,10 +1502,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
1510 * exception will occur instead of a page translation exception. The 1502 * exception will occur instead of a page translation exception. The
1511 * specifiation exception has the bad habit not to store necessary 1503 * specifiation exception has the bad habit not to store necessary
1512 * information in the lowcore. 1504 * information in the lowcore.
1513 * Bit 21 and bit 22 are the page invalid bit and the page protection 1505 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1514 * bit. We set both to indicate a swapped page. 1506 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1515 * Bit 30 and 31 are used to distinguish the different page types. For
1516 * a swapped page these bits need to be zero.
1517 * This leaves the bits 1-19 and bits 24-29 to store type and offset. 1507 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1518 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 1508 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1519 * plus 24 for the offset. 1509 * plus 24 for the offset.
@@ -1527,10 +1517,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
1527 * exception will occur instead of a page translation exception. The 1517 * exception will occur instead of a page translation exception. The
1528 * specifiation exception has the bad habit not to store necessary 1518 * specifiation exception has the bad habit not to store necessary
1529 * information in the lowcore. 1519 * information in the lowcore.
1530 * Bit 53 and bit 54 are the page invalid bit and the page protection 1520 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1531 * bit. We set both to indicate a swapped page. 1521 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1532 * Bit 62 and 63 are used to distinguish the different page types. For
1533 * a swapped page these bits need to be zero.
1534 * This leaves the bits 0-51 and bits 56-61 to store type and offset. 1522 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1535 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 1523 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1536 * plus 56 for the offset. 1524 * plus 56 for the offset.
@@ -1547,7 +1535,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1547{ 1535{
1548 pte_t pte; 1536 pte_t pte;
1549 offset &= __SWP_OFFSET_MASK; 1537 offset &= __SWP_OFFSET_MASK;
1550 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | 1538 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1551 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 1539 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1552 return pte; 1540 return pte;
1553} 1541}
@@ -1570,7 +1558,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1570 1558
1571#define pgoff_to_pte(__off) \ 1559#define pgoff_to_pte(__off) \
1572 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 1560 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1573 | _PAGE_TYPE_FILE }) 1561 | _PAGE_INVALID | _PAGE_PROTECT })
1574 1562
1575#endif /* !__ASSEMBLY__ */ 1563#endif /* !__ASSEMBLY__ */
1576 1564
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 3cf20930574e..05d75c413137 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
113 113
114 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, 114 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
115 PAGE_SIZE << SEGMENT_ORDER); 115 PAGE_SIZE << SEGMENT_ORDER);
116 clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, 116 clear_table((unsigned long *) page_table, _PAGE_INVALID,
117 256*sizeof(unsigned long)); 117 256*sizeof(unsigned long));
118 118
119 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; 119 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
120 *(unsigned long *) page_table = _PAGE_RO + page_frame; 120 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
121 121
122 psal = (u32 *) (page_table + 256*sizeof(unsigned long)); 122 psal = (u32 *) (page_table + 256*sizeof(unsigned long));
123 aste = psal + 32; 123 aste = psal + 32;
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 50ea137a2d3c..1694d738b175 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm,
86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { 86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
87 case _ASCE_TYPE_REGION1: 87 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff); 88 table = table + ((address >> 53) & 0x7ff);
89 if (unlikely(*table & _REGION_ENTRY_INV)) 89 if (unlikely(*table & _REGION_ENTRY_INVALID))
90 return -0x39UL; 90 return -0x39UL;
91 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 91 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
92 /* fallthrough */ 92 /* fallthrough */
93 case _ASCE_TYPE_REGION2: 93 case _ASCE_TYPE_REGION2:
94 table = table + ((address >> 42) & 0x7ff); 94 table = table + ((address >> 42) & 0x7ff);
95 if (unlikely(*table & _REGION_ENTRY_INV)) 95 if (unlikely(*table & _REGION_ENTRY_INVALID))
96 return -0x3aUL; 96 return -0x3aUL;
97 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 97 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
98 /* fallthrough */ 98 /* fallthrough */
99 case _ASCE_TYPE_REGION3: 99 case _ASCE_TYPE_REGION3:
100 table = table + ((address >> 31) & 0x7ff); 100 table = table + ((address >> 31) & 0x7ff);
101 if (unlikely(*table & _REGION_ENTRY_INV)) 101 if (unlikely(*table & _REGION_ENTRY_INVALID))
102 return -0x3bUL; 102 return -0x3bUL;
103 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 103 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
104 /* fallthrough */ 104 /* fallthrough */
105 case _ASCE_TYPE_SEGMENT: 105 case _ASCE_TYPE_SEGMENT:
106 table = table + ((address >> 20) & 0x7ff); 106 table = table + ((address >> 20) & 0x7ff);
107 if (unlikely(*table & _SEGMENT_ENTRY_INV)) 107 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
108 return -0x10UL; 108 return -0x10UL;
109 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { 109 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
110 if (write && (*table & _SEGMENT_ENTRY_RO)) 110 if (write && (*table & _SEGMENT_ENTRY_PROTECT))
111 return -0x04UL; 111 return -0x04UL;
112 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + 112 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
113 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); 113 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm,
117 table = table + ((address >> 12) & 0xff); 117 table = table + ((address >> 12) & 0xff);
118 if (unlikely(*table & _PAGE_INVALID)) 118 if (unlikely(*table & _PAGE_INVALID))
119 return -0x11UL; 119 return -0x11UL;
120 if (write && (*table & _PAGE_RO)) 120 if (write && (*table & _PAGE_PROTECT))
121 return -0x04UL; 121 return -0x04UL;
122 return (*table & PAGE_MASK) + (address & ~PAGE_MASK); 122 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
123} 123}
@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm,
130 unsigned long *table = (unsigned long *)__pa(mm->pgd); 130 unsigned long *table = (unsigned long *)__pa(mm->pgd);
131 131
132 table = table + ((address >> 20) & 0x7ff); 132 table = table + ((address >> 20) & 0x7ff);
133 if (unlikely(*table & _SEGMENT_ENTRY_INV)) 133 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
134 return -0x10UL; 134 return -0x10UL;
135 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 135 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
136 table = table + ((address >> 12) & 0xff); 136 table = table + ((address >> 12) & 0xff);
137 if (unlikely(*table & _PAGE_INVALID)) 137 if (unlikely(*table & _PAGE_INVALID))
138 return -0x11UL; 138 return -0x11UL;
139 if (write && (*table & _PAGE_RO)) 139 if (write && (*table & _PAGE_PROTECT))
140 return -0x04UL; 140 return -0x04UL;
141 return (*table & PAGE_MASK) + (address & ~PAGE_MASK); 141 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
142} 142}
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 3ad65b04ac15..46d517c3c763 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
53 seq_printf(m, "I\n"); 53 seq_printf(m, "I\n");
54 return; 54 return;
55 } 55 }
56 seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW "); 56 seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
57 seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); 57 seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
58 seq_putc(m, '\n'); 58 seq_putc(m, '\n');
59} 59}
@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
105} 105}
106 106
107/* 107/*
108 * The actual page table walker functions. In order to keep the implementation 108 * The actual page table walker functions. In order to keep the
109 * of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO 109 * implementation of print_prot() short, we only check and pass
110 * flags to note_page() if a region, segment or page table entry is invalid or 110 * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
111 * read-only. 111 * segment or page table entry is invalid or read-only.
112 * After all it's just a hint that the current level being walked contains an 112 * After all it's just a hint that the current level being walked
113 * invalid or read-only entry. 113 * contains an invalid or read-only entry.
114 */ 114 */
115static void walk_pte_level(struct seq_file *m, struct pg_state *st, 115static void walk_pte_level(struct seq_file *m, struct pg_state *st,
116 pmd_t *pmd, unsigned long addr) 116 pmd_t *pmd, unsigned long addr)
@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
122 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { 122 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
123 st->current_address = addr; 123 st->current_address = addr;
124 pte = pte_offset_kernel(pmd, addr); 124 pte = pte_offset_kernel(pmd, addr);
125 prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); 125 prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
126 note_page(m, st, prot, 4); 126 note_page(m, st, prot, 4);
127 addr += PAGE_SIZE; 127 addr += PAGE_SIZE;
128 } 128 }
129} 129}
130 130
131#ifdef CONFIG_64BIT 131#ifdef CONFIG_64BIT
132#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO) 132#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
133#else 133#else
134#define _PMD_PROT_MASK 0 134#define _PMD_PROT_MASK 0
135#endif 135#endif
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 1f5315d1215c..5d758db27bdc 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
24 pte_t *ptep, pte; 24 pte_t *ptep, pte;
25 struct page *page; 25 struct page *page;
26 26
27 mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; 27 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
28 28
29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); 29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
30 do { 30 do {
@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
55 struct page *head, *page, *tail; 55 struct page *head, *page, *tail;
56 int refs; 56 int refs;
57 57
58 result = write ? 0 : _SEGMENT_ENTRY_RO; 58 result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
59 mask = result | _SEGMENT_ENTRY_INV; 59 mask = result | _SEGMENT_ENTRY_INVALID;
60 if ((pmd_val(pmd) & mask) != result) 60 if ((pmd_val(pmd) & mask) != result)
61 return 0; 61 return 0;
62 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); 62 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 121089d57802..b0bd0ae17796 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -8,21 +8,107 @@
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/hugetlb.h> 9#include <linux/hugetlb.h>
10 10
11static inline pmd_t __pte_to_pmd(pte_t pte)
12{
13 int none, prot;
14 pmd_t pmd;
15
16 /*
17 * Convert encoding pte bits pmd bits
18 * .IR.....wdtp ..R...I.....
19 * empty .10.....0000 -> ..0...1.....
20 * prot-none, clean .11.....0001 -> ..1...1.....
21 * prot-none, dirty .10.....0101 -> ..1...1.....
22 * read-only, clean .01.....0001 -> ..1...0.....
23 * read-only, dirty .01.....0101 -> ..1...0.....
24 * read-write, clean .01.....1001 -> ..0...0.....
25 * read-write, dirty .00.....1101 -> ..0...0.....
26 * Huge ptes are dirty by definition, a clean pte is made dirty
27 * by the conversion.
28 */
29 if (pte_present(pte)) {
30 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
31 if (pte_val(pte) & _PAGE_INVALID)
32 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
33 none = (pte_val(pte) & _PAGE_PRESENT) &&
34 (pte_val(pte) & _PAGE_INVALID);
35 prot = (pte_val(pte) & _PAGE_PROTECT);
36 if (prot || none)
37 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
38 } else
39 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
40 return pmd;
41}
42
43static inline pte_t __pmd_to_pte(pmd_t pmd)
44{
45 pte_t pte;
46
47 /*
48 * Convert encoding pmd bits pte bits
49 * ..R...I..... .IR.....wdtp
50 * empty ..0...1..... -> .10.....0000
51 * prot-none, young ..1...1..... -> .10.....0101
52 * read-only, young ..1...0..... -> .01.....0101
53 * read-write, young ..0...0..... -> .00.....1101
54 * Huge ptes are dirty by definition
55 */
56 if (pmd_present(pmd)) {
57 pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
58 (pmd_val(pmd) & PAGE_MASK);
59 if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
60 pte_val(pte) |= _PAGE_INVALID;
61 else {
62 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
63 pte_val(pte) |= _PAGE_PROTECT;
64 else
65 pte_val(pte) |= _PAGE_WRITE;
66 }
67 } else
68 pte_val(pte) = _PAGE_INVALID;
69 return pte;
70}
11 71
12void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 72void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 pte_t *pteptr, pte_t pteval) 73 pte_t *ptep, pte_t pte)
14{ 74{
15 pmd_t *pmdp = (pmd_t *) pteptr; 75 pmd_t pmd;
16 unsigned long mask;
17 76
77 pmd = __pte_to_pmd(pte);
18 if (!MACHINE_HAS_HPAGE) { 78 if (!MACHINE_HAS_HPAGE) {
19 pteptr = (pte_t *) pte_page(pteval)[1].index; 79 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
20 mask = pte_val(pteval) & 80 pmd_val(pmd) |= pte_page(pte)[1].index;
21 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); 81 } else
22 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; 82 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
83 *(pmd_t *) ptep = pmd;
84}
85
86pte_t huge_ptep_get(pte_t *ptep)
87{
88 unsigned long origin;
89 pmd_t pmd;
90
91 pmd = *(pmd_t *) ptep;
92 if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
93 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
94 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
95 pmd_val(pmd) |= *(unsigned long *) origin;
23 } 96 }
97 return __pmd_to_pte(pmd);
98}
24 99
25 pmd_val(*pmdp) = pte_val(pteval); 100pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
101 unsigned long addr, pte_t *ptep)
102{
103 pmd_t *pmdp = (pmd_t *) ptep;
104 pte_t pte = huge_ptep_get(ptep);
105
106 if (MACHINE_HAS_IDTE)
107 __pmd_idte(addr, pmdp);
108 else
109 __pmd_csp(pmdp);
110 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
111 return pte;
26} 112}
27 113
28int arch_prepare_hugepage(struct page *page) 114int arch_prepare_hugepage(struct page *page)
@@ -58,7 +144,7 @@ void arch_release_hugepage(struct page *page)
58 ptep = (pte_t *) page[1].index; 144 ptep = (pte_t *) page[1].index;
59 if (!ptep) 145 if (!ptep)
60 return; 146 return;
61 clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, 147 clear_table((unsigned long *) ptep, _PAGE_INVALID,
62 PTRS_PER_PTE * sizeof(pte_t)); 148 PTRS_PER_PTE * sizeof(pte_t));
63 page_table_free(&init_mm, (unsigned long *) ptep); 149 page_table_free(&init_mm, (unsigned long *) ptep);
64 page[1].index = 0; 150 page[1].index = 0;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 80adfbf75065..990397420e6b 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
118 pte = pte_offset_kernel(pmd, address); 118 pte = pte_offset_kernel(pmd, address);
119 if (!enable) { 119 if (!enable) {
120 __ptep_ipte(address, pte); 120 __ptep_ipte(address, pte);
121 pte_val(*pte) = _PAGE_TYPE_EMPTY; 121 pte_val(*pte) = _PAGE_INVALID;
122 continue; 122 continue;
123 } 123 }
124 pte_val(*pte) = __pa(address); 124 pte_val(*pte) = __pa(address);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a8154a1a2c94..b9d35d63934e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
161 struct gmap_rmap *rmap; 161 struct gmap_rmap *rmap;
162 struct page *page; 162 struct page *page;
163 163
164 if (*table & _SEGMENT_ENTRY_INV) 164 if (*table & _SEGMENT_ENTRY_INVALID)
165 return 0; 165 return 0;
166 page = pfn_to_page(*table >> PAGE_SHIFT); 166 page = pfn_to_page(*table >> PAGE_SHIFT);
167 mp = (struct gmap_pgtable *) page->index; 167 mp = (struct gmap_pgtable *) page->index;
@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
172 kfree(rmap); 172 kfree(rmap);
173 break; 173 break;
174 } 174 }
175 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; 175 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
176 return 1; 176 return 1;
177} 177}
178 178
@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap,
258 return -ENOMEM; 258 return -ENOMEM;
259 new = (unsigned long *) page_to_phys(page); 259 new = (unsigned long *) page_to_phys(page);
260 crst_table_init(new, init); 260 crst_table_init(new, init);
261 if (*table & _REGION_ENTRY_INV) { 261 if (*table & _REGION_ENTRY_INVALID) {
262 list_add(&page->lru, &gmap->crst_list); 262 list_add(&page->lru, &gmap->crst_list);
263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH | 263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
264 (*table & _REGION_ENTRY_TYPE_MASK); 264 (*table & _REGION_ENTRY_TYPE_MASK);
@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
292 for (off = 0; off < len; off += PMD_SIZE) { 292 for (off = 0; off < len; off += PMD_SIZE) {
293 /* Walk the guest addr space page table */ 293 /* Walk the guest addr space page table */
294 table = gmap->table + (((to + off) >> 53) & 0x7ff); 294 table = gmap->table + (((to + off) >> 53) & 0x7ff);
295 if (*table & _REGION_ENTRY_INV) 295 if (*table & _REGION_ENTRY_INVALID)
296 goto out; 296 goto out;
297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298 table = table + (((to + off) >> 42) & 0x7ff); 298 table = table + (((to + off) >> 42) & 0x7ff);
299 if (*table & _REGION_ENTRY_INV) 299 if (*table & _REGION_ENTRY_INVALID)
300 goto out; 300 goto out;
301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302 table = table + (((to + off) >> 31) & 0x7ff); 302 table = table + (((to + off) >> 31) & 0x7ff);
303 if (*table & _REGION_ENTRY_INV) 303 if (*table & _REGION_ENTRY_INVALID)
304 goto out; 304 goto out;
305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306 table = table + (((to + off) >> 20) & 0x7ff); 306 table = table + (((to + off) >> 20) & 0x7ff);
307 307
308 /* Clear segment table entry in guest address space. */ 308 /* Clear segment table entry in guest address space. */
309 flush |= gmap_unlink_segment(gmap, table); 309 flush |= gmap_unlink_segment(gmap, table);
310 *table = _SEGMENT_ENTRY_INV; 310 *table = _SEGMENT_ENTRY_INVALID;
311 } 311 }
312out: 312out:
313 spin_unlock(&gmap->mm->page_table_lock); 313 spin_unlock(&gmap->mm->page_table_lock);
@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
345 for (off = 0; off < len; off += PMD_SIZE) { 345 for (off = 0; off < len; off += PMD_SIZE) {
346 /* Walk the gmap address space page table */ 346 /* Walk the gmap address space page table */
347 table = gmap->table + (((to + off) >> 53) & 0x7ff); 347 table = gmap->table + (((to + off) >> 53) & 0x7ff);
348 if ((*table & _REGION_ENTRY_INV) && 348 if ((*table & _REGION_ENTRY_INVALID) &&
349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) 349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
350 goto out_unmap; 350 goto out_unmap;
351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352 table = table + (((to + off) >> 42) & 0x7ff); 352 table = table + (((to + off) >> 42) & 0x7ff);
353 if ((*table & _REGION_ENTRY_INV) && 353 if ((*table & _REGION_ENTRY_INVALID) &&
354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) 354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
355 goto out_unmap; 355 goto out_unmap;
356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357 table = table + (((to + off) >> 31) & 0x7ff); 357 table = table + (((to + off) >> 31) & 0x7ff);
358 if ((*table & _REGION_ENTRY_INV) && 358 if ((*table & _REGION_ENTRY_INVALID) &&
359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) 359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
360 goto out_unmap; 360 goto out_unmap;
361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); 361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
363 363
364 /* Store 'from' address in an invalid segment table entry. */ 364 /* Store 'from' address in an invalid segment table entry. */
365 flush |= gmap_unlink_segment(gmap, table); 365 flush |= gmap_unlink_segment(gmap, table);
366 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); 366 *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
367 _SEGMENT_ENTRY_PROTECT);
367 } 368 }
368 spin_unlock(&gmap->mm->page_table_lock); 369 spin_unlock(&gmap->mm->page_table_lock);
369 up_read(&gmap->mm->mmap_sem); 370 up_read(&gmap->mm->mmap_sem);
@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
384 unsigned long *table; 385 unsigned long *table;
385 386
386 table = gmap->table + ((address >> 53) & 0x7ff); 387 table = gmap->table + ((address >> 53) & 0x7ff);
387 if (unlikely(*table & _REGION_ENTRY_INV)) 388 if (unlikely(*table & _REGION_ENTRY_INVALID))
388 return ERR_PTR(-EFAULT); 389 return ERR_PTR(-EFAULT);
389 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 390 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
390 table = table + ((address >> 42) & 0x7ff); 391 table = table + ((address >> 42) & 0x7ff);
391 if (unlikely(*table & _REGION_ENTRY_INV)) 392 if (unlikely(*table & _REGION_ENTRY_INVALID))
392 return ERR_PTR(-EFAULT); 393 return ERR_PTR(-EFAULT);
393 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 394 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
394 table = table + ((address >> 31) & 0x7ff); 395 table = table + ((address >> 31) & 0x7ff);
395 if (unlikely(*table & _REGION_ENTRY_INV)) 396 if (unlikely(*table & _REGION_ENTRY_INVALID))
396 return ERR_PTR(-EFAULT); 397 return ERR_PTR(-EFAULT);
397 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 398 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
398 table = table + ((address >> 20) & 0x7ff); 399 table = table + ((address >> 20) & 0x7ff);
@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
422 return PTR_ERR(segment_ptr); 423 return PTR_ERR(segment_ptr);
423 /* Convert the gmap address to an mm address. */ 424 /* Convert the gmap address to an mm address. */
424 segment = *segment_ptr; 425 segment = *segment_ptr;
425 if (!(segment & _SEGMENT_ENTRY_INV)) { 426 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
426 page = pfn_to_page(segment >> PAGE_SHIFT); 427 page = pfn_to_page(segment >> PAGE_SHIFT);
427 mp = (struct gmap_pgtable *) page->index; 428 mp = (struct gmap_pgtable *) page->index;
428 return mp->vmaddr | (address & ~PMD_MASK); 429 return mp->vmaddr | (address & ~PMD_MASK);
429 } else if (segment & _SEGMENT_ENTRY_RO) { 430 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
430 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 431 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
431 return vmaddr | (address & ~PMD_MASK); 432 return vmaddr | (address & ~PMD_MASK);
432 } 433 }
@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
517 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 518 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
518 mp = (struct gmap_pgtable *) page->index; 519 mp = (struct gmap_pgtable *) page->index;
519 list_for_each_entry_safe(rmap, next, &mp->mapper, list) { 520 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
520 *rmap->entry = 521 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
521 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; 522 _SEGMENT_ENTRY_PROTECT);
522 list_del(&rmap->list); 523 list_del(&rmap->list);
523 kfree(rmap); 524 kfree(rmap);
524 flush = 1; 525 flush = 1;
@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
545 /* Convert the gmap address to an mm address. */ 546 /* Convert the gmap address to an mm address. */
546 while (1) { 547 while (1) {
547 segment = *segment_ptr; 548 segment = *segment_ptr;
548 if (!(segment & _SEGMENT_ENTRY_INV)) { 549 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
549 /* Page table is present */ 550 /* Page table is present */
550 page = pfn_to_page(segment >> PAGE_SHIFT); 551 page = pfn_to_page(segment >> PAGE_SHIFT);
551 mp = (struct gmap_pgtable *) page->index; 552 mp = (struct gmap_pgtable *) page->index;
552 return mp->vmaddr | (address & ~PMD_MASK); 553 return mp->vmaddr | (address & ~PMD_MASK);
553 } 554 }
554 if (!(segment & _SEGMENT_ENTRY_RO)) 555 if (!(segment & _SEGMENT_ENTRY_PROTECT))
555 /* Nothing mapped in the gmap address space. */ 556 /* Nothing mapped in the gmap address space. */
556 break; 557 break;
557 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); 558 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
586 while (address < to) { 587 while (address < to) {
587 /* Walk the gmap address space page table */ 588 /* Walk the gmap address space page table */
588 table = gmap->table + ((address >> 53) & 0x7ff); 589 table = gmap->table + ((address >> 53) & 0x7ff);
589 if (unlikely(*table & _REGION_ENTRY_INV)) { 590 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
590 address = (address + PMD_SIZE) & PMD_MASK; 591 address = (address + PMD_SIZE) & PMD_MASK;
591 continue; 592 continue;
592 } 593 }
593 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 594 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
594 table = table + ((address >> 42) & 0x7ff); 595 table = table + ((address >> 42) & 0x7ff);
595 if (unlikely(*table & _REGION_ENTRY_INV)) { 596 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
596 address = (address + PMD_SIZE) & PMD_MASK; 597 address = (address + PMD_SIZE) & PMD_MASK;
597 continue; 598 continue;
598 } 599 }
599 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 600 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
600 table = table + ((address >> 31) & 0x7ff); 601 table = table + ((address >> 31) & 0x7ff);
601 if (unlikely(*table & _REGION_ENTRY_INV)) { 602 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
602 address = (address + PMD_SIZE) & PMD_MASK; 603 address = (address + PMD_SIZE) & PMD_MASK;
603 continue; 604 continue;
604 } 605 }
605 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 606 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
606 table = table + ((address >> 20) & 0x7ff); 607 table = table + ((address >> 20) & 0x7ff);
607 if (unlikely(*table & _SEGMENT_ENTRY_INV)) { 608 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
608 address = (address + PMD_SIZE) & PMD_MASK; 609 address = (address + PMD_SIZE) & PMD_MASK;
609 continue; 610 continue;
610 } 611 }
@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
687 continue; 688 continue;
688 /* Set notification bit in the pgste of the pte */ 689 /* Set notification bit in the pgste of the pte */
689 entry = *ptep; 690 entry = *ptep;
690 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { 691 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
691 pgste = pgste_get_lock(ptep); 692 pgste = pgste_get_lock(ptep);
692 pgste_val(pgste) |= PGSTE_IN_BIT; 693 pgste_val(pgste) |= PGSTE_IN_BIT;
693 pgste_set_unlock(ptep, pgste); 694 pgste_set_unlock(ptep, pgste);
@@ -752,7 +753,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
752 page->index = (unsigned long) mp; 753 page->index = (unsigned long) mp;
753 atomic_set(&page->_mapcount, 3); 754 atomic_set(&page->_mapcount, 3);
754 table = (unsigned long *) page_to_phys(page); 755 table = (unsigned long *) page_to_phys(page);
755 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); 756 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
756 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); 757 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
757 return table; 758 return table;
758} 759}
@@ -878,7 +879,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
878 pgtable_page_ctor(page); 879 pgtable_page_ctor(page);
879 atomic_set(&page->_mapcount, 1); 880 atomic_set(&page->_mapcount, 1);
880 table = (unsigned long *) page_to_phys(page); 881 table = (unsigned long *) page_to_phys(page);
881 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 882 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
882 spin_lock_bh(&mm->context.list_lock); 883 spin_lock_bh(&mm->context.list_lock);
883 list_add(&page->lru, &mm->context.pgtable_list); 884 list_add(&page->lru, &mm->context.pgtable_list);
884 } else { 885 } else {
@@ -1198,9 +1199,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1198 list_del(lh); 1199 list_del(lh);
1199 } 1200 }
1200 ptep = (pte_t *) pgtable; 1201 ptep = (pte_t *) pgtable;
1201 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1202 pte_val(*ptep) = _PAGE_INVALID;
1202 ptep++; 1203 ptep++;
1203 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1204 pte_val(*ptep) = _PAGE_INVALID;
1204 return pgtable; 1205 return pgtable;
1205} 1206}
1206#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1207#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8b268fcc4612..e1299d40818d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
70 if (!pte) 70 if (!pte)
71 return NULL; 71 return NULL;
72 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, 72 clear_table((unsigned long *) pte, _PAGE_INVALID,
73 PTRS_PER_PTE * sizeof(pte_t)); 73 PTRS_PER_PTE * sizeof(pte_t));
74 return pte; 74 return pte;
75} 75}
@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
102 pud_val(*pu_dir) = __pa(address) | 102 pud_val(*pu_dir) = __pa(address) |
103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | 103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
104 (ro ? _REGION_ENTRY_RO : 0); 104 (ro ? _REGION_ENTRY_PROTECT : 0);
105 address += PUD_SIZE; 105 address += PUD_SIZE;
106 continue; 106 continue;
107 } 107 }
@@ -118,7 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
119 pmd_val(*pm_dir) = __pa(address) | 119 pmd_val(*pm_dir) = __pa(address) |
120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | 120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
121 (ro ? _SEGMENT_ENTRY_RO : 0); 121 (ro ? _SEGMENT_ENTRY_PROTECT : 0);
122 address += PMD_SIZE; 122 address += PMD_SIZE;
123 continue; 123 continue;
124 } 124 }
@@ -131,7 +131,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
131 } 131 }
132 132
133 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); 134 pte_val(*pt_dir) = __pa(address) |
135 pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
135 address += PAGE_SIZE; 136 address += PAGE_SIZE;
136 } 137 }
137 ret = 0; 138 ret = 0;
@@ -154,7 +155,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
154 pte_t *pt_dir; 155 pte_t *pt_dir;
155 pte_t pte; 156 pte_t pte;
156 157
157 pte_val(pte) = _PAGE_TYPE_EMPTY; 158 pte_val(pte) = _PAGE_INVALID;
158 while (address < end) { 159 while (address < end) {
159 pg_dir = pgd_offset_k(address); 160 pg_dir = pgd_offset_k(address);
160 if (pgd_none(*pg_dir)) { 161 if (pgd_none(*pg_dir)) {
@@ -255,7 +256,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
255 new_page =__pa(vmem_alloc_pages(0)); 256 new_page =__pa(vmem_alloc_pages(0));
256 if (!new_page) 257 if (!new_page)
257 goto out; 258 goto out;
258 pte_val(*pt_dir) = __pa(new_page); 259 pte_val(*pt_dir) =
260 __pa(new_page) | pgprot_val(PAGE_KERNEL);
259 } 261 }
260 address += PAGE_SIZE; 262 address += PAGE_SIZE;
261 } 263 }